Created
November 19, 2025 09:33
-
-
Save byeongkyu/7d9b999c310c3ef3852c6844b11e3191 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig | |
| index 6d5afe2e6ba3..5d92aef787e3 100644 | |
| --- a/arch/arm/Kconfig | |
| +++ b/arch/arm/Kconfig | |
| @@ -33,6 +33,7 @@ config ARM | |
| select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7 | |
| select ARCH_SUPPORTS_ATOMIC_RMW | |
| select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE | |
| + select ARCH_SUPPORTS_RT if HAVE_POSIX_CPU_TIMERS_TASK_WORK | |
| select ARCH_USE_BUILTIN_BSWAP | |
| select ARCH_USE_CMPXCHG_LOCKREF | |
| select ARCH_USE_MEMTEST | |
| @@ -71,7 +72,7 @@ config ARM | |
| select HARDIRQS_SW_RESEND | |
| select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT | |
| select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 | |
| - select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU | |
| + select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT | |
| select HAVE_ARCH_KFENCE if MMU && !XIP_KERNEL | |
| select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU | |
| select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL | |
| @@ -94,7 +95,7 @@ config ARM | |
| select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE | |
| select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU | |
| select HAVE_EXIT_THREAD | |
| - select HAVE_FAST_GUP if ARM_LPAE | |
| + select HAVE_FAST_GUP if ARM_LPAE && !(PREEMPT_RT && HIGHPTE) | |
| select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL | |
| select HAVE_FUNCTION_GRAPH_TRACER | |
| select HAVE_FUNCTION_TRACER if !XIP_KERNEL | |
| @@ -115,6 +116,8 @@ config ARM | |
| select HAVE_PERF_EVENTS | |
| select HAVE_PERF_REGS | |
| select HAVE_PERF_USER_STACK_DUMP | |
| + select HAVE_POSIX_CPU_TIMERS_TASK_WORK if !KVM | |
| + select HAVE_PREEMPT_LAZY | |
| select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE | |
| select HAVE_REGS_AND_STACK_ACCESS_API | |
| select HAVE_RSEQ | |
| diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h | |
| index 7f092cb55a41..ffcbf8ebed4b 100644 | |
| --- a/arch/arm/include/asm/thread_info.h | |
| +++ b/arch/arm/include/asm/thread_info.h | |
| @@ -62,6 +62,7 @@ struct cpu_context_save { | |
| struct thread_info { | |
| unsigned long flags; /* low level flags */ | |
| int preempt_count; /* 0 => preemptable, <0 => bug */ | |
| + int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ | |
| __u32 cpu; /* cpu */ | |
| __u32 cpu_domain; /* cpu domain */ | |
| struct cpu_context_save cpu_context; /* cpu context */ | |
| @@ -129,6 +130,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, | |
| #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ | |
| #define TIF_UPROBE 3 /* breakpointed or singlestepping */ | |
| #define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */ | |
| +#define TIF_NEED_RESCHED_LAZY 5 | |
| #define TIF_USING_IWMMXT 17 | |
| #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ | |
| @@ -148,6 +150,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, | |
| #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) | |
| #define _TIF_SECCOMP (1 << TIF_SECCOMP) | |
| #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) | |
| +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) | |
| #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) | |
| /* Checks for any syscall work in entry-common.S */ | |
| @@ -157,7 +160,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, | |
| /* | |
| * Change these and you break ASM code in entry-common.S | |
| */ | |
| -#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ | |
| +#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \ | |
| + _TIF_SIGPENDING | \ | |
| _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ | |
| _TIF_NOTIFY_SIGNAL) | |
| diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c | |
| index 2c8d76fd7c66..c3bdec7d2df9 100644 | |
| --- a/arch/arm/kernel/asm-offsets.c | |
| +++ b/arch/arm/kernel/asm-offsets.c | |
| @@ -43,6 +43,7 @@ int main(void) | |
| BLANK(); | |
| DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | |
| DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); | |
| + DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); | |
| DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); | |
| DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain)); | |
| DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context)); | |
| diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S | |
| index c39303e5c234..cfb4660e9fea 100644 | |
| --- a/arch/arm/kernel/entry-armv.S | |
| +++ b/arch/arm/kernel/entry-armv.S | |
| @@ -222,11 +222,18 @@ ENDPROC(__dabt_svc) | |
| #ifdef CONFIG_PREEMPTION | |
| ldr r8, [tsk, #TI_PREEMPT] @ get preempt count | |
| - ldr r0, [tsk, #TI_FLAGS] @ get flags | |
| teq r8, #0 @ if preempt count != 0 | |
| + bne 1f @ return from exeption | |
| + ldr r0, [tsk, #TI_FLAGS] @ get flags | |
| + tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set | |
| + blne svc_preempt @ preempt! | |
| + | |
| + ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count | |
| + teq r8, #0 @ if preempt lazy count != 0 | |
| movne r0, #0 @ force flags to 0 | |
| - tst r0, #_TIF_NEED_RESCHED | |
| + tst r0, #_TIF_NEED_RESCHED_LAZY | |
| blne svc_preempt | |
| +1: | |
| #endif | |
| svc_exit r5, irq = 1 @ return from exception | |
| @@ -241,8 +248,14 @@ ENDPROC(__irq_svc) | |
| 1: bl preempt_schedule_irq @ irq en/disable is done inside | |
| ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS | |
| tst r0, #_TIF_NEED_RESCHED | |
| + bne 1b | |
| + tst r0, #_TIF_NEED_RESCHED_LAZY | |
| reteq r8 @ go again | |
| - b 1b | |
| + ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count | |
| + teq r0, #0 @ if preempt lazy count != 0 | |
| + beq 1b | |
| + ret r8 @ go again | |
| + | |
| #endif | |
| __und_fault: | |
| diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c | |
| index e07f359254c3..b50a3248e79f 100644 | |
| --- a/arch/arm/kernel/signal.c | |
| +++ b/arch/arm/kernel/signal.c | |
| @@ -607,7 +607,8 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) | |
| */ | |
| trace_hardirqs_off(); | |
| do { | |
| - if (likely(thread_flags & _TIF_NEED_RESCHED)) { | |
| + if (likely(thread_flags & (_TIF_NEED_RESCHED | | |
| + _TIF_NEED_RESCHED_LAZY))) { | |
| schedule(); | |
| } else { | |
| if (unlikely(!user_mode(regs))) | |
| diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c | |
| index b0db85310331..77877dcb54ed 100644 | |
| --- a/arch/arm/mm/fault.c | |
| +++ b/arch/arm/mm/fault.c | |
| @@ -400,6 +400,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr, | |
| if (addr < TASK_SIZE) | |
| return do_page_fault(addr, fsr, regs); | |
| + if (interrupts_enabled(regs)) | |
| + local_irq_enable(); | |
| + | |
| if (user_mode(regs)) | |
| goto bad_area; | |
| @@ -470,6 +473,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr, | |
| static int | |
| do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |
| { | |
| + if (interrupts_enabled(regs)) | |
| + local_irq_enable(); | |
| + | |
| do_bad_area(addr, fsr, regs); | |
| return 0; | |
| } | |
| diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig | |
| index 2ef939075039..4a184c28fc94 100644 | |
| --- a/arch/arm64/Kconfig | |
| +++ b/arch/arm64/Kconfig | |
| @@ -93,6 +93,7 @@ config ARM64 | |
| select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 | |
| select ARCH_SUPPORTS_NUMA_BALANCING | |
| select ARCH_SUPPORTS_PAGE_TABLE_CHECK | |
| + select ARCH_SUPPORTS_RT | |
| select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT | |
| select ARCH_WANT_DEFAULT_BPF_JIT | |
| select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT | |
| @@ -199,6 +200,7 @@ config ARM64 | |
| select HAVE_PERF_USER_STACK_DUMP | |
| select HAVE_PREEMPT_DYNAMIC_KEY | |
| select HAVE_REGS_AND_STACK_ACCESS_API | |
| + select HAVE_PREEMPT_LAZY | |
| select HAVE_POSIX_CPU_TIMERS_TASK_WORK | |
| select HAVE_FUNCTION_ARG_ACCESS_API | |
| select MMU_GATHER_RCU_TABLE_FREE | |
| diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h | |
| index 0159b625cc7f..a5486918e5ee 100644 | |
| --- a/arch/arm64/include/asm/preempt.h | |
| +++ b/arch/arm64/include/asm/preempt.h | |
| @@ -71,13 +71,36 @@ static inline bool __preempt_count_dec_and_test(void) | |
| * interrupt occurring between the non-atomic READ_ONCE/WRITE_ONCE | |
| * pair. | |
| */ | |
| - return !pc || !READ_ONCE(ti->preempt_count); | |
| + if (!pc || !READ_ONCE(ti->preempt_count)) | |
| + return true; | |
| +#ifdef CONFIG_PREEMPT_LAZY | |
| + if ((pc & ~PREEMPT_NEED_RESCHED)) | |
| + return false; | |
| + if (current_thread_info()->preempt_lazy_count) | |
| + return false; | |
| + return test_thread_flag(TIF_NEED_RESCHED_LAZY); | |
| +#else | |
| + return false; | |
| +#endif | |
| } | |
| static inline bool should_resched(int preempt_offset) | |
| { | |
| +#ifdef CONFIG_PREEMPT_LAZY | |
| + u64 pc = READ_ONCE(current_thread_info()->preempt_count); | |
| + if (pc == preempt_offset) | |
| + return true; | |
| + | |
| + if ((pc & ~PREEMPT_NEED_RESCHED) != preempt_offset) | |
| + return false; | |
| + | |
| + if (current_thread_info()->preempt_lazy_count) | |
| + return false; | |
| + return test_thread_flag(TIF_NEED_RESCHED_LAZY); | |
| +#else | |
| u64 pc = READ_ONCE(current_thread_info()->preempt_count); | |
| return pc == preempt_offset; | |
| +#endif | |
| } | |
| #ifdef CONFIG_PREEMPTION | |
| diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h | |
| index 848739c15de8..4b7148fd5551 100644 | |
| --- a/arch/arm64/include/asm/thread_info.h | |
| +++ b/arch/arm64/include/asm/thread_info.h | |
| @@ -26,6 +26,7 @@ struct thread_info { | |
| #ifdef CONFIG_ARM64_SW_TTBR0_PAN | |
| u64 ttbr0; /* saved TTBR0_EL1 */ | |
| #endif | |
| + int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ | |
| union { | |
| u64 preempt_count; /* 0 => preemptible, <0 => bug */ | |
| struct { | |
| @@ -68,6 +69,7 @@ int arch_dup_task_struct(struct task_struct *dst, | |
| #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ | |
| #define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */ | |
| #define TIF_NOTIFY_SIGNAL 6 /* signal notifications exist */ | |
| +#define TIF_NEED_RESCHED_LAZY 7 | |
| #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ | |
| #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ | |
| #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ | |
| @@ -100,8 +102,10 @@ int arch_dup_task_struct(struct task_struct *dst, | |
| #define _TIF_SVE (1 << TIF_SVE) | |
| #define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT) | |
| #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) | |
| +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) | |
| -#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ | |
| +#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \ | |
| + _TIF_SIGPENDING | \ | |
| _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ | |
| _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \ | |
| _TIF_NOTIFY_SIGNAL) | |
| @@ -110,6 +114,8 @@ int arch_dup_task_struct(struct task_struct *dst, | |
| _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ | |
| _TIF_SYSCALL_EMU) | |
| +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) | |
| + | |
| #ifdef CONFIG_SHADOW_CALL_STACK | |
| #define INIT_SCS \ | |
| .scs_base = init_shadow_call_stack, \ | |
| diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c | |
| index 1197e7679882..e74c0415f67e 100644 | |
| --- a/arch/arm64/kernel/asm-offsets.c | |
| +++ b/arch/arm64/kernel/asm-offsets.c | |
| @@ -32,6 +32,7 @@ int main(void) | |
| DEFINE(TSK_TI_CPU, offsetof(struct task_struct, thread_info.cpu)); | |
| DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags)); | |
| DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count)); | |
| + DEFINE(TSK_TI_PREEMPT_LAZY, offsetof(struct task_struct, thread_info.preempt_lazy_count)); | |
| #ifdef CONFIG_ARM64_SW_TTBR0_PAN | |
| DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0)); | |
| #endif | |
| diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c | |
| index 82f4572c8ddf..2a606c7bf025 100644 | |
| --- a/arch/arm64/kernel/signal.c | |
| +++ b/arch/arm64/kernel/signal.c | |
| @@ -1108,7 +1108,7 @@ static void do_signal(struct pt_regs *regs) | |
| void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) | |
| { | |
| do { | |
| - if (thread_flags & _TIF_NEED_RESCHED) { | |
| + if (thread_flags & _TIF_NEED_RESCHED_MASK) { | |
| /* Unmask Debug and SError for the next task */ | |
| local_daif_restore(DAIF_PROCCTX_NOIRQ); | |
| diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig | |
| index 6050e6e10d32..df697d3f68cd 100644 | |
| --- a/arch/powerpc/Kconfig | |
| +++ b/arch/powerpc/Kconfig | |
| @@ -151,6 +151,7 @@ config PPC | |
| select ARCH_STACKWALK | |
| select ARCH_SUPPORTS_ATOMIC_RMW | |
| select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x | |
| + select ARCH_SUPPORTS_RT if HAVE_POSIX_CPU_TIMERS_TASK_WORK | |
| select ARCH_USE_BUILTIN_BSWAP | |
| select ARCH_USE_CMPXCHG_LOCKREF if PPC64 | |
| select ARCH_USE_MEMTEST | |
| @@ -242,8 +243,10 @@ config PPC | |
| select HAVE_PERF_EVENTS_NMI if PPC64 | |
| select HAVE_PERF_REGS | |
| select HAVE_PERF_USER_STACK_DUMP | |
| + select HAVE_PREEMPT_LAZY | |
| select HAVE_REGS_AND_STACK_ACCESS_API | |
| select HAVE_RELIABLE_STACKTRACE | |
| + select HAVE_POSIX_CPU_TIMERS_TASK_WORK if !KVM | |
| select HAVE_RSEQ | |
| select HAVE_SETUP_PER_CPU_AREA if PPC64 | |
| select HAVE_SOFTIRQ_ON_OWN_STACK | |
| diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h | |
| index 1c8460e23583..b1653c160bab 100644 | |
| --- a/arch/powerpc/include/asm/stackprotector.h | |
| +++ b/arch/powerpc/include/asm/stackprotector.h | |
| @@ -24,7 +24,11 @@ static __always_inline void boot_init_stack_canary(void) | |
| unsigned long canary; | |
| /* Try to get a semi random initial value. */ | |
| +#ifdef CONFIG_PREEMPT_RT | |
| + canary = (unsigned long)&canary; | |
| +#else | |
| canary = get_random_canary(); | |
| +#endif | |
| canary ^= mftb(); | |
| canary ^= LINUX_VERSION_CODE; | |
| canary &= CANARY_MASK; | |
| diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h | |
| index c4b798aa6ce8..923621486811 100644 | |
| --- a/arch/powerpc/include/asm/thread_info.h | |
| +++ b/arch/powerpc/include/asm/thread_info.h | |
| @@ -53,6 +53,8 @@ | |
| struct thread_info { | |
| int preempt_count; /* 0 => preemptable, | |
| <0 => BUG */ | |
| + int preempt_lazy_count; /* 0 => preemptable, | |
| + <0 => BUG */ | |
| #ifdef CONFIG_SMP | |
| unsigned int cpu; | |
| #endif | |
| @@ -77,6 +79,7 @@ struct thread_info { | |
| #define INIT_THREAD_INFO(tsk) \ | |
| { \ | |
| .preempt_count = INIT_PREEMPT_COUNT, \ | |
| + .preempt_lazy_count = 0, \ | |
| .flags = 0, \ | |
| } | |
| @@ -102,6 +105,7 @@ void arch_setup_new_exec(void); | |
| #define TIF_PATCH_PENDING 6 /* pending live patching update */ | |
| #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ | |
| #define TIF_SINGLESTEP 8 /* singlestepping active */ | |
| +#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */ | |
| #define TIF_SECCOMP 10 /* secure computing */ | |
| #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ | |
| #define TIF_NOERROR 12 /* Force successful syscall return */ | |
| @@ -117,6 +121,7 @@ void arch_setup_new_exec(void); | |
| #define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | |
| #define TIF_32BIT 20 /* 32 bit binary */ | |
| + | |
| /* as above, but as bit values */ | |
| #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | |
| #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | |
| @@ -128,6 +133,7 @@ void arch_setup_new_exec(void); | |
| #define _TIF_PATCH_PENDING (1<<TIF_PATCH_PENDING) | |
| #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | |
| #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) | |
| +#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY) | |
| #define _TIF_SECCOMP (1<<TIF_SECCOMP) | |
| #define _TIF_RESTOREALL (1<<TIF_RESTOREALL) | |
| #define _TIF_NOERROR (1<<TIF_NOERROR) | |
| @@ -141,10 +147,12 @@ void arch_setup_new_exec(void); | |
| _TIF_SYSCALL_EMU) | |
| #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ | |
| + _TIF_NEED_RESCHED_LAZY | \ | |
| _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ | |
| _TIF_RESTORE_TM | _TIF_PATCH_PENDING | \ | |
| _TIF_NOTIFY_SIGNAL) | |
| #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) | |
| +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) | |
| /* Bits in local_flags */ | |
| /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */ | |
| diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c | |
| index cf770d86c03c..2c454731c250 100644 | |
| --- a/arch/powerpc/kernel/interrupt.c | |
| +++ b/arch/powerpc/kernel/interrupt.c | |
| @@ -186,7 +186,7 @@ interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs) | |
| ti_flags = read_thread_flags(); | |
| while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { | |
| local_irq_enable(); | |
| - if (ti_flags & _TIF_NEED_RESCHED) { | |
| + if (ti_flags & _TIF_NEED_RESCHED_MASK) { | |
| schedule(); | |
| } else { | |
| /* | |
| @@ -397,11 +397,15 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs) | |
| /* Returning to a kernel context with local irqs enabled. */ | |
| WARN_ON_ONCE(!(regs->msr & MSR_EE)); | |
| again: | |
| - if (IS_ENABLED(CONFIG_PREEMPT)) { | |
| + if (IS_ENABLED(CONFIG_PREEMPTION)) { | |
| /* Return to preemptible kernel context */ | |
| if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) { | |
| if (preempt_count() == 0) | |
| preempt_schedule_irq(); | |
| + } else if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED_LAZY)) { | |
| + if ((preempt_count() == 0) && | |
| + (current_thread_info()->preempt_lazy_count == 0)) | |
| + preempt_schedule_irq(); | |
| } | |
| } | |
| diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c | |
| index 362b712386f6..ddf5b716394f 100644 | |
| --- a/arch/powerpc/kernel/traps.c | |
| +++ b/arch/powerpc/kernel/traps.c | |
| @@ -261,12 +261,17 @@ static char *get_mmu_str(void) | |
| static int __die(const char *str, struct pt_regs *regs, long err) | |
| { | |
| + const char *pr = ""; | |
| + | |
| printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); | |
| + if (IS_ENABLED(CONFIG_PREEMPTION)) | |
| + pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT"; | |
| + | |
| printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n", | |
| IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE", | |
| PAGE_SIZE / 1024, get_mmu_str(), | |
| - IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "", | |
| + pr, | |
| IS_ENABLED(CONFIG_SMP) ? " SMP" : "", | |
| IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "", | |
| debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", | |
| diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig | |
| index a9f57dad6d91..a0b528d4bb7c 100644 | |
| --- a/arch/powerpc/kvm/Kconfig | |
| +++ b/arch/powerpc/kvm/Kconfig | |
| @@ -225,6 +225,7 @@ config KVM_E500MC | |
| config KVM_MPIC | |
| bool "KVM in-kernel MPIC emulation" | |
| depends on KVM && PPC_E500 | |
| + depends on !PREEMPT_RT | |
| select HAVE_KVM_IRQCHIP | |
| select HAVE_KVM_IRQFD | |
| select HAVE_KVM_IRQ_ROUTING | |
| diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c | |
| index 5e00a3cde93b..6070c9a7c979 100644 | |
| --- a/arch/powerpc/platforms/pseries/iommu.c | |
| +++ b/arch/powerpc/platforms/pseries/iommu.c | |
| @@ -24,6 +24,7 @@ | |
| #include <linux/of.h> | |
| #include <linux/iommu.h> | |
| #include <linux/rculist.h> | |
| +#include <linux/local_lock.h> | |
| #include <asm/io.h> | |
| #include <asm/prom.h> | |
| #include <asm/rtas.h> | |
| @@ -200,7 +201,13 @@ static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift, | |
| return ret; | |
| } | |
| -static DEFINE_PER_CPU(__be64 *, tce_page); | |
| +struct tce_page { | |
| + __be64 * page; | |
| + local_lock_t lock; | |
| +}; | |
| +static DEFINE_PER_CPU(struct tce_page, tce_page) = { | |
| + .lock = INIT_LOCAL_LOCK(lock), | |
| +}; | |
| static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, | |
| long npages, unsigned long uaddr, | |
| @@ -223,9 +230,10 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, | |
| direction, attrs); | |
| } | |
| - local_irq_save(flags); /* to protect tcep and the page behind it */ | |
| + /* to protect tcep and the page behind it */ | |
| + local_lock_irqsave(&tce_page.lock, flags); | |
| - tcep = __this_cpu_read(tce_page); | |
| + tcep = __this_cpu_read(tce_page.page); | |
| /* This is safe to do since interrupts are off when we're called | |
| * from iommu_alloc{,_sg}() | |
| @@ -234,12 +242,12 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, | |
| tcep = (__be64 *)__get_free_page(GFP_ATOMIC); | |
| /* If allocation fails, fall back to the loop implementation */ | |
| if (!tcep) { | |
| - local_irq_restore(flags); | |
| + local_unlock_irqrestore(&tce_page.lock, flags); | |
| return tce_build_pSeriesLP(tbl->it_index, tcenum, | |
| tceshift, | |
| npages, uaddr, direction, attrs); | |
| } | |
| - __this_cpu_write(tce_page, tcep); | |
| + __this_cpu_write(tce_page.page, tcep); | |
| } | |
| rpn = __pa(uaddr) >> tceshift; | |
| @@ -269,7 +277,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, | |
| tcenum += limit; | |
| } while (npages > 0 && !rc); | |
| - local_irq_restore(flags); | |
| + local_unlock_irqrestore(&tce_page.lock, flags); | |
| if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) { | |
| ret = (int)rc; | |
| @@ -454,16 +462,17 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, | |
| DMA_BIDIRECTIONAL, 0); | |
| } | |
| - local_irq_disable(); /* to protect tcep and the page behind it */ | |
| - tcep = __this_cpu_read(tce_page); | |
| + /* to protect tcep and the page behind it */ | |
| + local_lock_irq(&tce_page.lock); | |
| + tcep = __this_cpu_read(tce_page.page); | |
| if (!tcep) { | |
| tcep = (__be64 *)__get_free_page(GFP_ATOMIC); | |
| if (!tcep) { | |
| - local_irq_enable(); | |
| + local_unlock_irq(&tce_page.lock); | |
| return -ENOMEM; | |
| } | |
| - __this_cpu_write(tce_page, tcep); | |
| + __this_cpu_write(tce_page.page, tcep); | |
| } | |
| proto_tce = TCE_PCI_READ | TCE_PCI_WRITE; | |
| @@ -506,7 +515,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, | |
| /* error cleanup: caller will clear whole range */ | |
| - local_irq_enable(); | |
| + local_unlock_irq(&tce_page.lock); | |
| return rc; | |
| } | |
| diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig | |
| index 49cea5b81649..3a6dc90c8ebc 100644 | |
| --- a/arch/x86/Kconfig | |
| +++ b/arch/x86/Kconfig | |
| @@ -114,6 +114,7 @@ config X86 | |
| select ARCH_USES_CFI_TRAPS if X86_64 && CFI_CLANG | |
| select ARCH_SUPPORTS_LTO_CLANG | |
| select ARCH_SUPPORTS_LTO_CLANG_THIN | |
| + select ARCH_SUPPORTS_RT | |
| select ARCH_USE_BUILTIN_BSWAP | |
| select ARCH_USE_MEMTEST | |
| select ARCH_USE_QUEUED_RWLOCKS | |
| @@ -251,6 +252,7 @@ config X86 | |
| select HAVE_PCI | |
| select HAVE_PERF_REGS | |
| select HAVE_PERF_USER_STACK_DUMP | |
| + select HAVE_PREEMPT_LAZY | |
| select MMU_GATHER_RCU_TABLE_FREE if PARAVIRT | |
| select MMU_GATHER_MERGE_VMAS | |
| select HAVE_POSIX_CPU_TIMERS_TASK_WORK | |
| diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h | |
| index 5f6daea1ee24..cd20b4a5719a 100644 | |
| --- a/arch/x86/include/asm/preempt.h | |
| +++ b/arch/x86/include/asm/preempt.h | |
| @@ -90,17 +90,48 @@ static __always_inline void __preempt_count_sub(int val) | |
| * a decrement which hits zero means we have no preempt_count and should | |
| * reschedule. | |
| */ | |
| -static __always_inline bool __preempt_count_dec_and_test(void) | |
| +static __always_inline bool ____preempt_count_dec_and_test(void) | |
| { | |
| return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var])); | |
| } | |
| +static __always_inline bool __preempt_count_dec_and_test(void) | |
| +{ | |
| + if (____preempt_count_dec_and_test()) | |
| + return true; | |
| +#ifdef CONFIG_PREEMPT_LAZY | |
| + if (preempt_count()) | |
| + return false; | |
| + if (current_thread_info()->preempt_lazy_count) | |
| + return false; | |
| + return test_thread_flag(TIF_NEED_RESCHED_LAZY); | |
| +#else | |
| + return false; | |
| +#endif | |
| +} | |
| + | |
| /* | |
| * Returns true when we need to resched and can (barring IRQ state). | |
| */ | |
| static __always_inline bool should_resched(int preempt_offset) | |
| { | |
| +#ifdef CONFIG_PREEMPT_LAZY | |
| + u32 tmp; | |
| + tmp = raw_cpu_read_4(__preempt_count); | |
| + if (tmp == preempt_offset) | |
| + return true; | |
| + | |
| + /* preempt count == 0 ? */ | |
| + tmp &= ~PREEMPT_NEED_RESCHED; | |
| + if (tmp != preempt_offset) | |
| + return false; | |
| + /* XXX PREEMPT_LOCK_OFFSET */ | |
| + if (current_thread_info()->preempt_lazy_count) | |
| + return false; | |
| + return test_thread_flag(TIF_NEED_RESCHED_LAZY); | |
| +#else | |
| return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); | |
| +#endif | |
| } | |
| #ifdef CONFIG_PREEMPTION | |
| diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h | |
| index f0cb881c1d69..fd8fb76f324f 100644 | |
| --- a/arch/x86/include/asm/thread_info.h | |
| +++ b/arch/x86/include/asm/thread_info.h | |
| @@ -57,6 +57,8 @@ struct thread_info { | |
| unsigned long flags; /* low level flags */ | |
| unsigned long syscall_work; /* SYSCALL_WORK_ flags */ | |
| u32 status; /* thread synchronous flags */ | |
| + int preempt_lazy_count; /* 0 => lazy preemptable | |
| + <0 => BUG */ | |
| #ifdef CONFIG_SMP | |
| u32 cpu; /* current CPU */ | |
| #endif | |
| @@ -65,6 +67,7 @@ struct thread_info { | |
| #define INIT_THREAD_INFO(tsk) \ | |
| { \ | |
| .flags = 0, \ | |
| + .preempt_lazy_count = 0, \ | |
| } | |
| #else /* !__ASSEMBLY__ */ | |
| @@ -92,6 +95,7 @@ struct thread_info { | |
| #define TIF_NOCPUID 15 /* CPUID is not accessible in userland */ | |
| #define TIF_NOTSC 16 /* TSC is not accessible in userland */ | |
| #define TIF_NOTIFY_SIGNAL 17 /* signal notifications exist */ | |
| +#define TIF_NEED_RESCHED_LAZY 19 /* lazy rescheduling necessary */ | |
| #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ | |
| #define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */ | |
| #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ | |
| @@ -115,6 +119,7 @@ struct thread_info { | |
| #define _TIF_NOCPUID (1 << TIF_NOCPUID) | |
| #define _TIF_NOTSC (1 << TIF_NOTSC) | |
| #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) | |
| +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) | |
| #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | |
| #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) | |
| #define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE) | |
| diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c | |
| index 966aab902d19..ee69e4443691 100644 | |
| --- a/drivers/block/zram/zram_drv.c | |
| +++ b/drivers/block/zram/zram_drv.c | |
| @@ -57,6 +57,40 @@ static void zram_free_page(struct zram *zram, size_t index); | |
| static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, | |
| u32 index, int offset, struct bio *bio); | |
| +#ifdef CONFIG_PREEMPT_RT | |
| +static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) | |
| +{ | |
| + size_t index; | |
| + | |
| + for (index = 0; index < num_pages; index++) | |
| + spin_lock_init(&zram->table[index].lock); | |
| +} | |
| + | |
| +static int zram_slot_trylock(struct zram *zram, u32 index) | |
| +{ | |
| + int ret; | |
| + | |
| + ret = spin_trylock(&zram->table[index].lock); | |
| + if (ret) | |
| + __set_bit(ZRAM_LOCK, &zram->table[index].flags); | |
| + return ret; | |
| +} | |
| + | |
| +static void zram_slot_lock(struct zram *zram, u32 index) | |
| +{ | |
| + spin_lock(&zram->table[index].lock); | |
| + __set_bit(ZRAM_LOCK, &zram->table[index].flags); | |
| +} | |
| + | |
| +static void zram_slot_unlock(struct zram *zram, u32 index) | |
| +{ | |
| + __clear_bit(ZRAM_LOCK, &zram->table[index].flags); | |
| + spin_unlock(&zram->table[index].lock); | |
| +} | |
| + | |
| +#else | |
| + | |
| +static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { } | |
| static int zram_slot_trylock(struct zram *zram, u32 index) | |
| { | |
| @@ -72,6 +106,7 @@ static void zram_slot_unlock(struct zram *zram, u32 index) | |
| { | |
| bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags); | |
| } | |
| +#endif | |
| static inline bool init_done(struct zram *zram) | |
| { | |
| @@ -1187,6 +1222,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) | |
| if (!huge_class_size) | |
| huge_class_size = zs_huge_class_size(zram->mem_pool); | |
| + zram_meta_init_table_locks(zram, num_pages); | |
| return true; | |
| } | |
| diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h | |
| index a2bda53020fd..ae7950b26db5 100644 | |
| --- a/drivers/block/zram/zram_drv.h | |
| +++ b/drivers/block/zram/zram_drv.h | |
| @@ -62,6 +62,9 @@ struct zram_table_entry { | |
| unsigned long element; | |
| }; | |
| unsigned long flags; | |
| +#ifdef CONFIG_PREEMPT_RT | |
| + spinlock_t lock; | |
| +#endif | |
| #ifdef CONFIG_ZRAM_MEMORY_TRACKING | |
| ktime_t ac_time; | |
| #endif | |
| diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c | |
| index 0d084d6652c4..5d620322bdc2 100644 | |
| --- a/drivers/char/tpm/tpm_tis.c | |
| +++ b/drivers/char/tpm/tpm_tis.c | |
| @@ -50,6 +50,31 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da | |
| return container_of(data, struct tpm_tis_tcg_phy, priv); | |
| } | |
| +#ifdef CONFIG_PREEMPT_RT | |
| +/* | |
| + * Flushes previous write operations to chip so that a subsequent | |
| + * ioread*()s won't stall a cpu. | |
| + */ | |
| +static inline void tpm_tis_flush(void __iomem *iobase) | |
| +{ | |
| + ioread8(iobase + TPM_ACCESS(0)); | |
| +} | |
| +#else | |
| +#define tpm_tis_flush(iobase) do { } while (0) | |
| +#endif | |
| + | |
| +static inline void tpm_tis_iowrite8(u8 b, void __iomem *iobase, u32 addr) | |
| +{ | |
| + iowrite8(b, iobase + addr); | |
| + tpm_tis_flush(iobase); | |
| +} | |
| + | |
| +static inline void tpm_tis_iowrite32(u32 b, void __iomem *iobase, u32 addr) | |
| +{ | |
| + iowrite32(b, iobase + addr); | |
| + tpm_tis_flush(iobase); | |
| +} | |
| + | |
| static int interrupts = -1; | |
| module_param(interrupts, int, 0444); | |
| MODULE_PARM_DESC(interrupts, "Enable interrupts"); | |
| @@ -202,12 +227,12 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, | |
| switch (io_mode) { | |
| case TPM_TIS_PHYS_8: | |
| while (len--) | |
| - iowrite8(*value++, phy->iobase + addr); | |
| + tpm_tis_iowrite8(*value++, phy->iobase, addr); | |
| break; | |
| case TPM_TIS_PHYS_16: | |
| return -EINVAL; | |
| case TPM_TIS_PHYS_32: | |
| - iowrite32(le32_to_cpu(*((__le32 *)value)), phy->iobase + addr); | |
| + tpm_tis_iowrite32(le32_to_cpu(*((__le32 *)value)), phy->iobase, addr); | |
| break; | |
| } | |
| diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig | |
| index 6b10868ec72f..1fbdb7b4e6e1 100644 | |
| --- a/drivers/gpu/drm/i915/Kconfig | |
| +++ b/drivers/gpu/drm/i915/Kconfig | |
| @@ -3,7 +3,6 @@ config DRM_I915 | |
| tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics" | |
| depends on DRM | |
| depends on X86 && PCI | |
| - depends on !PREEMPT_RT | |
| select INTEL_GTT if X86 | |
| select INTERVAL_TREE | |
| # we need shmfs for the swappable backing store, and in particular | |
| diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c | |
| index 6792a9056f46..43cedfef104f 100644 | |
| --- a/drivers/gpu/drm/i915/display/intel_crtc.c | |
| +++ b/drivers/gpu/drm/i915/display/intel_crtc.c | |
| @@ -521,7 +521,8 @@ void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state) | |
| */ | |
| intel_psr_wait_for_idle_locked(new_crtc_state); | |
| - local_irq_disable(); | |
| + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) | |
| + local_irq_disable(); | |
| crtc->debug.min_vbl = min; | |
| crtc->debug.max_vbl = max; | |
| @@ -546,11 +547,13 @@ void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state) | |
| break; | |
| } | |
| - local_irq_enable(); | |
| + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) | |
| + local_irq_enable(); | |
| timeout = schedule_timeout(timeout); | |
| - local_irq_disable(); | |
| + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) | |
| + local_irq_disable(); | |
| } | |
| finish_wait(wq, &wait); | |
| @@ -583,7 +586,8 @@ void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state) | |
| return; | |
| irq_disable: | |
| - local_irq_disable(); | |
| + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) | |
| + local_irq_disable(); | |
| } | |
| #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE) | |
| @@ -684,7 +688,8 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) | |
| */ | |
| intel_vrr_send_push(new_crtc_state); | |
| - local_irq_enable(); | |
| + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) | |
| + local_irq_enable(); | |
| if (intel_vgpu_active(dev_priv)) | |
| return; | |
| diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c | |
| index f2973cd1a8aa..aa77f8601b8a 100644 | |
| --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c | |
| +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c | |
| @@ -315,7 +315,12 @@ void __intel_breadcrumbs_park(struct intel_breadcrumbs *b) | |
| return; | |
| /* Kick the work once more to drain the signalers, and disarm the irq */ | |
| - irq_work_queue(&b->irq_work); | |
| + irq_work_sync(&b->irq_work); | |
| + while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) { | |
| + irq_work_queue(&b->irq_work); | |
| + cond_resched(); | |
| + irq_work_sync(&b->irq_work); | |
| + } | |
| } | |
| void intel_breadcrumbs_free(struct kref *kref) | |
| diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c | |
| index 321dbecba0f3..ea8ffea8d311 100644 | |
| --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c | |
| +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c | |
| @@ -1302,7 +1302,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) | |
| * and context switches) submission. | |
| */ | |
| - spin_lock(&sched_engine->lock); | |
| + spin_lock_irq(&sched_engine->lock); | |
| /* | |
| * If the queue is higher priority than the last | |
| @@ -1402,7 +1402,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) | |
| * Even if ELSP[1] is occupied and not worthy | |
| * of timeslices, our queue might be. | |
| */ | |
| - spin_unlock(&sched_engine->lock); | |
| + spin_unlock_irq(&sched_engine->lock); | |
| return; | |
| } | |
| } | |
| @@ -1428,7 +1428,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) | |
| if (last && !can_merge_rq(last, rq)) { | |
| spin_unlock(&ve->base.sched_engine->lock); | |
| - spin_unlock(&engine->sched_engine->lock); | |
| + spin_unlock_irq(&engine->sched_engine->lock); | |
| return; /* leave this for another sibling */ | |
| } | |
| @@ -1590,7 +1590,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) | |
| */ | |
| sched_engine->queue_priority_hint = queue_prio(sched_engine); | |
| i915_sched_engine_reset_on_empty(sched_engine); | |
| - spin_unlock(&sched_engine->lock); | |
| + spin_unlock_irq(&sched_engine->lock); | |
| /* | |
| * We can skip poking the HW if we ended up with exactly the same set | |
| @@ -1616,13 +1616,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) | |
| } | |
| } | |
| -static void execlists_dequeue_irq(struct intel_engine_cs *engine) | |
| -{ | |
| - local_irq_disable(); /* Suspend interrupts across request submission */ | |
| - execlists_dequeue(engine); | |
| - local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */ | |
| -} | |
| - | |
| static void clear_ports(struct i915_request **ports, int count) | |
| { | |
| memset_p((void **)ports, NULL, count); | |
| @@ -2476,7 +2469,7 @@ static void execlists_submission_tasklet(struct tasklet_struct *t) | |
| } | |
| if (!engine->execlists.pending[0]) { | |
| - execlists_dequeue_irq(engine); | |
| + execlists_dequeue(engine); | |
| start_timeslice(engine); | |
| } | |
| diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c | |
| index 10b930eaa8cb..6108a449cd19 100644 | |
| --- a/drivers/gpu/drm/i915/gt/intel_reset.c | |
| +++ b/drivers/gpu/drm/i915/gt/intel_reset.c | |
| @@ -174,13 +174,13 @@ static int i915_do_reset(struct intel_gt *gt, | |
| /* Assert reset for at least 20 usec, and wait for acknowledgement. */ | |
| pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); | |
| udelay(50); | |
| - err = wait_for_atomic(i915_in_reset(pdev), 50); | |
| + err = _wait_for_atomic(i915_in_reset(pdev), 50, 0); | |
| /* Clear the reset request. */ | |
| pci_write_config_byte(pdev, I915_GDRST, 0); | |
| udelay(50); | |
| if (!err) | |
| - err = wait_for_atomic(!i915_in_reset(pdev), 50); | |
| + err = _wait_for_atomic(!i915_in_reset(pdev), 50, 0); | |
| return err; | |
| } | |
| @@ -200,7 +200,7 @@ static int g33_do_reset(struct intel_gt *gt, | |
| struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); | |
| pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); | |
| - return wait_for_atomic(g4x_reset_complete(pdev), 50); | |
| + return _wait_for_atomic(g4x_reset_complete(pdev), 50, 0); | |
| } | |
| static int g4x_do_reset(struct intel_gt *gt, | |
| @@ -217,7 +217,7 @@ static int g4x_do_reset(struct intel_gt *gt, | |
| pci_write_config_byte(pdev, I915_GDRST, | |
| GRDOM_MEDIA | GRDOM_RESET_ENABLE); | |
| - ret = wait_for_atomic(g4x_reset_complete(pdev), 50); | |
| + ret = _wait_for_atomic(g4x_reset_complete(pdev), 50, 0); | |
| if (ret) { | |
| GT_TRACE(gt, "Wait for media reset failed\n"); | |
| goto out; | |
| @@ -225,7 +225,7 @@ static int g4x_do_reset(struct intel_gt *gt, | |
| pci_write_config_byte(pdev, I915_GDRST, | |
| GRDOM_RENDER | GRDOM_RESET_ENABLE); | |
| - ret = wait_for_atomic(g4x_reset_complete(pdev), 50); | |
| + ret = _wait_for_atomic(g4x_reset_complete(pdev), 50, 0); | |
| if (ret) { | |
| GT_TRACE(gt, "Wait for render reset failed\n"); | |
| goto out; | |
| @@ -718,9 +718,7 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) | |
| intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); | |
| for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) { | |
| GT_TRACE(gt, "engine_mask=%x\n", engine_mask); | |
| - preempt_disable(); | |
| ret = reset(gt, engine_mask, retry); | |
| - preempt_enable(); | |
| } | |
| intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); | |
| diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c | |
| index f93ffa6626a5..6e9d033cf808 100644 | |
| --- a/drivers/gpu/drm/i915/i915_irq.c | |
| +++ b/drivers/gpu/drm/i915/i915_irq.c | |
| @@ -917,7 +917,8 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, | |
| */ | |
| spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | |
| - /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ | |
| + if (IS_ENABLED(CONFIG_PREEMPT_RT)) | |
| + preempt_disable(); | |
| /* Get optional system timestamp before query. */ | |
| if (stime) | |
| @@ -981,7 +982,8 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, | |
| if (etime) | |
| *etime = ktime_get(); | |
| - /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ | |
| + if (IS_ENABLED(CONFIG_PREEMPT_RT)) | |
| + preempt_enable(); | |
| spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | |
| diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c | |
| index 7ce126a01cbf..64a032dfaa90 100644 | |
| --- a/drivers/gpu/drm/i915/i915_request.c | |
| +++ b/drivers/gpu/drm/i915/i915_request.c | |
| @@ -609,7 +609,6 @@ bool __i915_request_submit(struct i915_request *request) | |
| RQ_TRACE(request, "\n"); | |
| - GEM_BUG_ON(!irqs_disabled()); | |
| lockdep_assert_held(&engine->sched_engine->lock); | |
| /* | |
| @@ -718,7 +717,6 @@ void __i915_request_unsubmit(struct i915_request *request) | |
| */ | |
| RQ_TRACE(request, "\n"); | |
| - GEM_BUG_ON(!irqs_disabled()); | |
| lockdep_assert_held(&engine->sched_engine->lock); | |
| /* | |
| diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h | |
| index 37b5c9e9d260..73f29d8008f0 100644 | |
| --- a/drivers/gpu/drm/i915/i915_trace.h | |
| +++ b/drivers/gpu/drm/i915/i915_trace.h | |
| @@ -6,6 +6,10 @@ | |
| #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) | |
| #define _I915_TRACE_H_ | |
| +#ifdef CONFIG_PREEMPT_RT | |
| +#define NOTRACE | |
| +#endif | |
| + | |
| #include <linux/stringify.h> | |
| #include <linux/types.h> | |
| #include <linux/tracepoint.h> | |
| @@ -323,7 +327,7 @@ DEFINE_EVENT(i915_request, i915_request_add, | |
| TP_ARGS(rq) | |
| ); | |
| -#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) | |
| +#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) && !defined(NOTRACE) | |
| DEFINE_EVENT(i915_request, i915_request_guc_submit, | |
| TP_PROTO(struct i915_request *rq), | |
| TP_ARGS(rq) | |
| diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h | |
| index 6c14d13364bf..de58855e6926 100644 | |
| --- a/drivers/gpu/drm/i915/i915_utils.h | |
| +++ b/drivers/gpu/drm/i915/i915_utils.h | |
| @@ -294,7 +294,7 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) | |
| #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) | |
| /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */ | |
| -#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) | |
| +#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT) | |
| # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic()) | |
| #else | |
| # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0) | |
| diff --git a/drivers/net/ethernet/alacritech/slic.h b/drivers/net/ethernet/alacritech/slic.h | |
| index 4eecbdfff3ff..82071d0e5f7f 100644 | |
| --- a/drivers/net/ethernet/alacritech/slic.h | |
| +++ b/drivers/net/ethernet/alacritech/slic.h | |
| @@ -288,13 +288,13 @@ do { \ | |
| u64_stats_update_end(&(st)->syncp); \ | |
| } while (0) | |
| -#define SLIC_GET_STATS_COUNTER(newst, st, counter) \ | |
| -{ \ | |
| - unsigned int start; \ | |
| +#define SLIC_GET_STATS_COUNTER(newst, st, counter) \ | |
| +{ \ | |
| + unsigned int start; \ | |
| do { \ | |
| - start = u64_stats_fetch_begin_irq(&(st)->syncp); \ | |
| - newst = (st)->counter; \ | |
| - } while (u64_stats_fetch_retry_irq(&(st)->syncp, start)); \ | |
| + start = u64_stats_fetch_begin(&(st)->syncp); \ | |
| + newst = (st)->counter; \ | |
| + } while (u64_stats_fetch_retry(&(st)->syncp, start)); \ | |
| } | |
| struct slic_upr { | |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c | |
| index 444ccef76da2..8da79eedc057 100644 | |
| --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c | |
| +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c | |
| @@ -118,9 +118,9 @@ static void ena_safe_update_stat(u64 *src, u64 *dst, | |
| unsigned int start; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(syncp); | |
| + start = u64_stats_fetch_begin(syncp); | |
| *(dst) = *src; | |
| - } while (u64_stats_fetch_retry_irq(syncp, start)); | |
| + } while (u64_stats_fetch_retry(syncp, start)); | |
| } | |
| static void ena_queue_stats(struct ena_adapter *adapter, u64 **data) | |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c | |
| index 77fa4c35f233..d9b6f57abccb 100644 | |
| --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c | |
| +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c | |
| @@ -3345,10 +3345,10 @@ static void ena_get_stats64(struct net_device *netdev, | |
| tx_ring = &adapter->tx_ring[i]; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&tx_ring->syncp); | |
| + start = u64_stats_fetch_begin(&tx_ring->syncp); | |
| packets = tx_ring->tx_stats.cnt; | |
| bytes = tx_ring->tx_stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&tx_ring->syncp, start)); | |
| stats->tx_packets += packets; | |
| stats->tx_bytes += bytes; | |
| @@ -3356,20 +3356,20 @@ static void ena_get_stats64(struct net_device *netdev, | |
| rx_ring = &adapter->rx_ring[i]; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&rx_ring->syncp); | |
| + start = u64_stats_fetch_begin(&rx_ring->syncp); | |
| packets = rx_ring->rx_stats.cnt; | |
| bytes = rx_ring->rx_stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); | |
| stats->rx_packets += packets; | |
| stats->rx_bytes += bytes; | |
| } | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&adapter->syncp); | |
| + start = u64_stats_fetch_begin(&adapter->syncp); | |
| rx_drops = adapter->dev_stats.rx_drops; | |
| tx_drops = adapter->dev_stats.tx_drops; | |
| - } while (u64_stats_fetch_retry_irq(&adapter->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&adapter->syncp, start)); | |
| stats->rx_dropped = rx_drops; | |
| stats->tx_dropped = tx_drops; | |
| diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c | |
| index 0eaaba3a18ee..4f6315e5b714 100644 | |
| --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c | |
| +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c | |
| @@ -941,7 +941,7 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) | |
| /* This data should mimic aq_ethtool_queue_rx_stat_names structure */ | |
| do { | |
| count = 0; | |
| - start = u64_stats_fetch_begin_irq(&self->stats.rx.syncp); | |
| + start = u64_stats_fetch_begin(&self->stats.rx.syncp); | |
| data[count] = self->stats.rx.packets; | |
| data[++count] = self->stats.rx.jumbo_packets; | |
| data[++count] = self->stats.rx.lro_packets; | |
| @@ -958,15 +958,15 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) | |
| data[++count] = self->stats.rx.xdp_tx; | |
| data[++count] = self->stats.rx.xdp_invalid; | |
| data[++count] = self->stats.rx.xdp_redirect; | |
| - } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start)); | |
| + } while (u64_stats_fetch_retry(&self->stats.rx.syncp, start)); | |
| } else { | |
| /* This data should mimic aq_ethtool_queue_tx_stat_names structure */ | |
| do { | |
| count = 0; | |
| - start = u64_stats_fetch_begin_irq(&self->stats.tx.syncp); | |
| + start = u64_stats_fetch_begin(&self->stats.tx.syncp); | |
| data[count] = self->stats.tx.packets; | |
| data[++count] = self->stats.tx.queue_restarts; | |
| - } while (u64_stats_fetch_retry_irq(&self->stats.tx.syncp, start)); | |
| + } while (u64_stats_fetch_retry(&self->stats.tx.syncp, start)); | |
| } | |
| return ++count; | |
| diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c | |
| index 8b7cdf015a16..21376c79f671 100644 | |
| --- a/drivers/net/ethernet/asix/ax88796c_main.c | |
| +++ b/drivers/net/ethernet/asix/ax88796c_main.c | |
| @@ -662,12 +662,12 @@ static void ax88796c_get_stats64(struct net_device *ndev, | |
| s = per_cpu_ptr(ax_local->stats, cpu); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&s->syncp); | |
| + start = u64_stats_fetch_begin(&s->syncp); | |
| rx_packets = u64_stats_read(&s->rx_packets); | |
| rx_bytes = u64_stats_read(&s->rx_bytes); | |
| tx_packets = u64_stats_read(&s->tx_packets); | |
| tx_bytes = u64_stats_read(&s->tx_bytes); | |
| - } while (u64_stats_fetch_retry_irq(&s->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&s->syncp, start)); | |
| stats->rx_packets += rx_packets; | |
| stats->rx_bytes += rx_bytes; | |
| diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c | |
| index 5b6209f5a801..6001d81bdd3a 100644 | |
| --- a/drivers/net/ethernet/broadcom/b44.c | |
| +++ b/drivers/net/ethernet/broadcom/b44.c | |
| @@ -1680,7 +1680,7 @@ static void b44_get_stats64(struct net_device *dev, | |
| unsigned int start; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&hwstat->syncp); | |
| + start = u64_stats_fetch_begin(&hwstat->syncp); | |
| /* Convert HW stats into rtnl_link_stats64 stats. */ | |
| nstat->rx_packets = hwstat->rx_pkts; | |
| @@ -1714,7 +1714,7 @@ static void b44_get_stats64(struct net_device *dev, | |
| /* Carrier lost counter seems to be broken for some devices */ | |
| nstat->tx_carrier_errors = hwstat->tx_carrier_lost; | |
| #endif | |
| - } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&hwstat->syncp, start)); | |
| } | |
| @@ -2084,12 +2084,12 @@ static void b44_get_ethtool_stats(struct net_device *dev, | |
| do { | |
| data_src = &hwstat->tx_good_octets; | |
| data_dst = data; | |
| - start = u64_stats_fetch_begin_irq(&hwstat->syncp); | |
| + start = u64_stats_fetch_begin(&hwstat->syncp); | |
| for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++) | |
| *data_dst++ = *data_src++; | |
| - } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&hwstat->syncp, start)); | |
| } | |
| static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |
| diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c | |
| index 425d6ccd5413..f8b1adc389b3 100644 | |
| --- a/drivers/net/ethernet/broadcom/bcmsysport.c | |
| +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |
| @@ -457,10 +457,10 @@ static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv, | |
| for (q = 0; q < priv->netdev->num_tx_queues; q++) { | |
| ring = &priv->tx_rings[q]; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&priv->syncp); | |
| + start = u64_stats_fetch_begin(&priv->syncp); | |
| bytes = ring->bytes; | |
| packets = ring->packets; | |
| - } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&priv->syncp, start)); | |
| *tx_bytes += bytes; | |
| *tx_packets += packets; | |
| @@ -504,9 +504,9 @@ static void bcm_sysport_get_stats(struct net_device *dev, | |
| if (s->stat_sizeof == sizeof(u64) && | |
| s->type == BCM_SYSPORT_STAT_NETDEV64) { | |
| do { | |
| - start = u64_stats_fetch_begin_irq(syncp); | |
| + start = u64_stats_fetch_begin(syncp); | |
| data[i] = *(u64 *)p; | |
| - } while (u64_stats_fetch_retry_irq(syncp, start)); | |
| + } while (u64_stats_fetch_retry(syncp, start)); | |
| } else | |
| data[i] = *(u32 *)p; | |
| j++; | |
| @@ -1878,10 +1878,10 @@ static void bcm_sysport_get_stats64(struct net_device *dev, | |
| &stats->tx_packets); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&priv->syncp); | |
| + start = u64_stats_fetch_begin(&priv->syncp); | |
| stats->rx_packets = stats64->rx_packets; | |
| stats->rx_bytes = stats64->rx_bytes; | |
| - } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&priv->syncp, start)); | |
| } | |
| static void bcm_sysport_netif_start(struct net_device *dev) | |
| diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c | |
| index 19fb8c4caab8..9eec22984b2c 100644 | |
| --- a/drivers/net/ethernet/cortina/gemini.c | |
| +++ b/drivers/net/ethernet/cortina/gemini.c | |
| @@ -1949,7 +1949,7 @@ static void gmac_get_stats64(struct net_device *netdev, | |
| /* Racing with RX NAPI */ | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp); | |
| + start = u64_stats_fetch_begin(&port->rx_stats_syncp); | |
| stats->rx_packets = port->stats.rx_packets; | |
| stats->rx_bytes = port->stats.rx_bytes; | |
| @@ -1961,11 +1961,11 @@ static void gmac_get_stats64(struct net_device *netdev, | |
| stats->rx_crc_errors = port->stats.rx_crc_errors; | |
| stats->rx_frame_errors = port->stats.rx_frame_errors; | |
| - } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start)); | |
| + } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start)); | |
| /* Racing with MIB and TX completion interrupts */ | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp); | |
| + start = u64_stats_fetch_begin(&port->ir_stats_syncp); | |
| stats->tx_errors = port->stats.tx_errors; | |
| stats->tx_packets = port->stats.tx_packets; | |
| @@ -1975,15 +1975,15 @@ static void gmac_get_stats64(struct net_device *netdev, | |
| stats->rx_missed_errors = port->stats.rx_missed_errors; | |
| stats->rx_fifo_errors = port->stats.rx_fifo_errors; | |
| - } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start)); | |
| + } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start)); | |
| /* Racing with hard_start_xmit */ | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp); | |
| + start = u64_stats_fetch_begin(&port->tx_stats_syncp); | |
| stats->tx_dropped = port->stats.tx_dropped; | |
| - } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start)); | |
| + } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start)); | |
| stats->rx_dropped += stats->rx_missed_errors; | |
| } | |
| @@ -2052,18 +2052,18 @@ static void gmac_get_ethtool_stats(struct net_device *netdev, | |
| /* Racing with MIB interrupt */ | |
| do { | |
| p = values; | |
| - start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp); | |
| + start = u64_stats_fetch_begin(&port->ir_stats_syncp); | |
| for (i = 0; i < RX_STATS_NUM; i++) | |
| *p++ = port->hw_stats[i]; | |
| - } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start)); | |
| + } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start)); | |
| values = p; | |
| /* Racing with RX NAPI */ | |
| do { | |
| p = values; | |
| - start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp); | |
| + start = u64_stats_fetch_begin(&port->rx_stats_syncp); | |
| for (i = 0; i < RX_STATUS_NUM; i++) | |
| *p++ = port->rx_stats[i]; | |
| @@ -2071,13 +2071,13 @@ static void gmac_get_ethtool_stats(struct net_device *netdev, | |
| *p++ = port->rx_csum_stats[i]; | |
| *p++ = port->rx_napi_exits; | |
| - } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start)); | |
| + } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start)); | |
| values = p; | |
| /* Racing with TX start_xmit */ | |
| do { | |
| p = values; | |
| - start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp); | |
| + start = u64_stats_fetch_begin(&port->tx_stats_syncp); | |
| for (i = 0; i < TX_MAX_FRAGS; i++) { | |
| *values++ = port->tx_frag_stats[i]; | |
| @@ -2086,7 +2086,7 @@ static void gmac_get_ethtool_stats(struct net_device *netdev, | |
| *values++ = port->tx_frags_linearized; | |
| *values++ = port->tx_hw_csummed; | |
| - } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start)); | |
| + } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start)); | |
| } | |
| static int gmac_get_ksettings(struct net_device *netdev, | |
| diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c | |
| index 77edc3d9b505..a29de29bdf23 100644 | |
| --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c | |
| +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c | |
| @@ -389,10 +389,10 @@ static void be_get_ethtool_stats(struct net_device *netdev, | |
| struct be_rx_stats *stats = rx_stats(rxo); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&stats->sync); | |
| + start = u64_stats_fetch_begin(&stats->sync); | |
| data[base] = stats->rx_bytes; | |
| data[base + 1] = stats->rx_pkts; | |
| - } while (u64_stats_fetch_retry_irq(&stats->sync, start)); | |
| + } while (u64_stats_fetch_retry(&stats->sync, start)); | |
| for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) { | |
| p = (u8 *)stats + et_rx_stats[i].offset; | |
| @@ -405,19 +405,19 @@ static void be_get_ethtool_stats(struct net_device *netdev, | |
| struct be_tx_stats *stats = tx_stats(txo); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&stats->sync_compl); | |
| + start = u64_stats_fetch_begin(&stats->sync_compl); | |
| data[base] = stats->tx_compl; | |
| - } while (u64_stats_fetch_retry_irq(&stats->sync_compl, start)); | |
| + } while (u64_stats_fetch_retry(&stats->sync_compl, start)); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&stats->sync); | |
| + start = u64_stats_fetch_begin(&stats->sync); | |
| for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) { | |
| p = (u8 *)stats + et_tx_stats[i].offset; | |
| data[base + i] = | |
| (et_tx_stats[i].size == sizeof(u64)) ? | |
| *(u64 *)p : *(u32 *)p; | |
| } | |
| - } while (u64_stats_fetch_retry_irq(&stats->sync, start)); | |
| + } while (u64_stats_fetch_retry(&stats->sync, start)); | |
| base += ETHTOOL_TXSTATS_NUM; | |
| } | |
| } | |
| diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c | |
| index a9e4e6464a04..968fecfc03bd 100644 | |
| --- a/drivers/net/ethernet/emulex/benet/be_main.c | |
| +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |
| @@ -665,10 +665,10 @@ static void be_get_stats64(struct net_device *netdev, | |
| const struct be_rx_stats *rx_stats = rx_stats(rxo); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&rx_stats->sync); | |
| + start = u64_stats_fetch_begin(&rx_stats->sync); | |
| pkts = rx_stats(rxo)->rx_pkts; | |
| bytes = rx_stats(rxo)->rx_bytes; | |
| - } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start)); | |
| + } while (u64_stats_fetch_retry(&rx_stats->sync, start)); | |
| stats->rx_packets += pkts; | |
| stats->rx_bytes += bytes; | |
| stats->multicast += rx_stats(rxo)->rx_mcast_pkts; | |
| @@ -680,10 +680,10 @@ static void be_get_stats64(struct net_device *netdev, | |
| const struct be_tx_stats *tx_stats = tx_stats(txo); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&tx_stats->sync); | |
| + start = u64_stats_fetch_begin(&tx_stats->sync); | |
| pkts = tx_stats(txo)->tx_pkts; | |
| bytes = tx_stats(txo)->tx_bytes; | |
| - } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start)); | |
| + } while (u64_stats_fetch_retry(&tx_stats->sync, start)); | |
| stats->tx_packets += pkts; | |
| stats->tx_bytes += bytes; | |
| } | |
| @@ -2156,16 +2156,16 @@ static int be_get_new_eqd(struct be_eq_obj *eqo) | |
| for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&rxo->stats.sync); | |
| + start = u64_stats_fetch_begin(&rxo->stats.sync); | |
| rx_pkts += rxo->stats.rx_pkts; | |
| - } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start)); | |
| + } while (u64_stats_fetch_retry(&rxo->stats.sync, start)); | |
| } | |
| for_all_tx_queues_on_eq(adapter, eqo, txo, i) { | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&txo->stats.sync); | |
| + start = u64_stats_fetch_begin(&txo->stats.sync); | |
| tx_pkts += txo->stats.tx_reqs; | |
| - } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start)); | |
| + } while (u64_stats_fetch_retry(&txo->stats.sync, start)); | |
| } | |
| /* Skip, if wrapped around or first calculation */ | |
| diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h | |
| index 671f51135c26..53b7e95213a8 100644 | |
| --- a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h | |
| +++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h | |
| @@ -206,9 +206,9 @@ struct funeth_rxq { | |
| #define FUN_QSTAT_READ(q, seq, stats_copy) \ | |
| do { \ | |
| - seq = u64_stats_fetch_begin_irq(&(q)->syncp); \ | |
| + seq = u64_stats_fetch_begin(&(q)->syncp); \ | |
| stats_copy = (q)->stats; \ | |
| - } while (u64_stats_fetch_retry_irq(&(q)->syncp, (seq))) | |
| + } while (u64_stats_fetch_retry(&(q)->syncp, (seq))) | |
| #define FUN_INT_NAME_LEN (IFNAMSIZ + 16) | |
| diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c | |
| index 033f17cb96be..0a5953089a24 100644 | |
| --- a/drivers/net/ethernet/google/gve/gve_ethtool.c | |
| +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c | |
| @@ -177,14 +177,14 @@ gve_get_ethtool_stats(struct net_device *netdev, | |
| struct gve_rx_ring *rx = &priv->rx[ring]; | |
| start = | |
| - u64_stats_fetch_begin_irq(&priv->rx[ring].statss); | |
| + u64_stats_fetch_begin(&priv->rx[ring].statss); | |
| tmp_rx_pkts = rx->rpackets; | |
| tmp_rx_bytes = rx->rbytes; | |
| tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; | |
| tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; | |
| tmp_rx_desc_err_dropped_pkt = | |
| rx->rx_desc_err_dropped_pkt; | |
| - } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss, | |
| + } while (u64_stats_fetch_retry(&priv->rx[ring].statss, | |
| start)); | |
| rx_pkts += tmp_rx_pkts; | |
| rx_bytes += tmp_rx_bytes; | |
| @@ -198,10 +198,10 @@ gve_get_ethtool_stats(struct net_device *netdev, | |
| if (priv->tx) { | |
| do { | |
| start = | |
| - u64_stats_fetch_begin_irq(&priv->tx[ring].statss); | |
| + u64_stats_fetch_begin(&priv->tx[ring].statss); | |
| tmp_tx_pkts = priv->tx[ring].pkt_done; | |
| tmp_tx_bytes = priv->tx[ring].bytes_done; | |
| - } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss, | |
| + } while (u64_stats_fetch_retry(&priv->tx[ring].statss, | |
| start)); | |
| tx_pkts += tmp_tx_pkts; | |
| tx_bytes += tmp_tx_bytes; | |
| @@ -259,13 +259,13 @@ gve_get_ethtool_stats(struct net_device *netdev, | |
| data[i++] = rx->fill_cnt - rx->cnt; | |
| do { | |
| start = | |
| - u64_stats_fetch_begin_irq(&priv->rx[ring].statss); | |
| + u64_stats_fetch_begin(&priv->rx[ring].statss); | |
| tmp_rx_bytes = rx->rbytes; | |
| tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; | |
| tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; | |
| tmp_rx_desc_err_dropped_pkt = | |
| rx->rx_desc_err_dropped_pkt; | |
| - } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss, | |
| + } while (u64_stats_fetch_retry(&priv->rx[ring].statss, | |
| start)); | |
| data[i++] = tmp_rx_bytes; | |
| data[i++] = rx->rx_cont_packet_cnt; | |
| @@ -331,9 +331,9 @@ gve_get_ethtool_stats(struct net_device *netdev, | |
| } | |
| do { | |
| start = | |
| - u64_stats_fetch_begin_irq(&priv->tx[ring].statss); | |
| + u64_stats_fetch_begin(&priv->tx[ring].statss); | |
| tmp_tx_bytes = tx->bytes_done; | |
| - } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss, | |
| + } while (u64_stats_fetch_retry(&priv->tx[ring].statss, | |
| start)); | |
| data[i++] = tmp_tx_bytes; | |
| data[i++] = tx->wake_queue; | |
| diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c | |
| index d3f6ad586ba1..1c2cd3ee1956 100644 | |
| --- a/drivers/net/ethernet/google/gve/gve_main.c | |
| +++ b/drivers/net/ethernet/google/gve/gve_main.c | |
| @@ -51,10 +51,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s) | |
| for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { | |
| do { | |
| start = | |
| - u64_stats_fetch_begin_irq(&priv->rx[ring].statss); | |
| + u64_stats_fetch_begin(&priv->rx[ring].statss); | |
| packets = priv->rx[ring].rpackets; | |
| bytes = priv->rx[ring].rbytes; | |
| - } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss, | |
| + } while (u64_stats_fetch_retry(&priv->rx[ring].statss, | |
| start)); | |
| s->rx_packets += packets; | |
| s->rx_bytes += bytes; | |
| @@ -64,10 +64,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s) | |
| for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) { | |
| do { | |
| start = | |
| - u64_stats_fetch_begin_irq(&priv->tx[ring].statss); | |
| + u64_stats_fetch_begin(&priv->tx[ring].statss); | |
| packets = priv->tx[ring].pkt_done; | |
| bytes = priv->tx[ring].bytes_done; | |
| - } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss, | |
| + } while (u64_stats_fetch_retry(&priv->tx[ring].statss, | |
| start)); | |
| s->tx_packets += packets; | |
| s->tx_bytes += bytes; | |
| @@ -1260,9 +1260,9 @@ void gve_handle_report_stats(struct gve_priv *priv) | |
| } | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&priv->tx[idx].statss); | |
| + start = u64_stats_fetch_begin(&priv->tx[idx].statss); | |
| tx_bytes = priv->tx[idx].bytes_done; | |
| - } while (u64_stats_fetch_retry_irq(&priv->tx[idx].statss, start)); | |
| + } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start)); | |
| stats[stats_idx++] = (struct stats) { | |
| .stat_name = cpu_to_be32(TX_WAKE_CNT), | |
| .value = cpu_to_be64(priv->tx[idx].wake_queue), | |
| diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | |
| index 0377a056aaec..e08ddd9815d0 100644 | |
| --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | |
| +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | |
| @@ -2496,7 +2496,7 @@ static void hns3_fetch_stats(struct rtnl_link_stats64 *stats, | |
| unsigned int start; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->syncp); | |
| + start = u64_stats_fetch_begin(&ring->syncp); | |
| if (is_tx) { | |
| stats->tx_bytes += ring->stats.tx_bytes; | |
| stats->tx_packets += ring->stats.tx_pkts; | |
| @@ -2530,7 +2530,7 @@ static void hns3_fetch_stats(struct rtnl_link_stats64 *stats, | |
| stats->multicast += ring->stats.rx_multicast; | |
| stats->rx_length_errors += ring->stats.err_pkt_len; | |
| } | |
| - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->syncp, start)); | |
| } | |
| static void hns3_nic_get_stats64(struct net_device *netdev, | |
| diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c | |
| index d649c6e323c8..ceec8be2a73b 100644 | |
| --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c | |
| +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c | |
| @@ -74,14 +74,14 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats) | |
| unsigned int start; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&rxq_stats->syncp); | |
| + start = u64_stats_fetch_begin(&rxq_stats->syncp); | |
| stats->pkts = rxq_stats->pkts; | |
| stats->bytes = rxq_stats->bytes; | |
| stats->errors = rxq_stats->csum_errors + | |
| rxq_stats->other_errors; | |
| stats->csum_errors = rxq_stats->csum_errors; | |
| stats->other_errors = rxq_stats->other_errors; | |
| - } while (u64_stats_fetch_retry_irq(&rxq_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); | |
| } | |
| /** | |
| diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c | |
| index e91476c8ff8b..ad47ac51a139 100644 | |
| --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c | |
| +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c | |
| @@ -99,14 +99,14 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) | |
| unsigned int start; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&txq_stats->syncp); | |
| + start = u64_stats_fetch_begin(&txq_stats->syncp); | |
| stats->pkts = txq_stats->pkts; | |
| stats->bytes = txq_stats->bytes; | |
| stats->tx_busy = txq_stats->tx_busy; | |
| stats->tx_wake = txq_stats->tx_wake; | |
| stats->tx_dropped = txq_stats->tx_dropped; | |
| stats->big_frags_pkts = txq_stats->big_frags_pkts; | |
| - } while (u64_stats_fetch_retry_irq(&txq_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); | |
| } | |
| /** | |
| diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | |
| index 2cca9e84e31e..34ab5ff9823b 100644 | |
| --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | |
| +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | |
| @@ -1229,10 +1229,10 @@ static void fm10k_get_stats64(struct net_device *netdev, | |
| continue; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->syncp); | |
| + start = u64_stats_fetch_begin(&ring->syncp); | |
| packets = ring->stats.packets; | |
| bytes = ring->stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->syncp, start)); | |
| stats->rx_packets += packets; | |
| stats->rx_bytes += bytes; | |
| @@ -1245,10 +1245,10 @@ static void fm10k_get_stats64(struct net_device *netdev, | |
| continue; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->syncp); | |
| + start = u64_stats_fetch_begin(&ring->syncp); | |
| packets = ring->stats.packets; | |
| bytes = ring->stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->syncp, start)); | |
| stats->tx_packets += packets; | |
| stats->tx_bytes += bytes; | |
| diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c | |
| index 107bcca7db8c..8f36fe90180f 100644 | |
| --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c | |
| +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c | |
| @@ -154,7 +154,7 @@ __i40e_add_ethtool_stats(u64 **data, void *pointer, | |
| * @ring: the ring to copy | |
| * | |
| * Queue statistics must be copied while protected by | |
| - * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats. | |
| + * u64_stats_fetch_begin, so we can't directly use i40e_add_ethtool_stats. | |
| * Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the | |
| * ring pointer is null, zero out the queue stat values and update the data | |
| * pointer. Otherwise safely copy the stats from the ring into the supplied | |
| @@ -172,16 +172,16 @@ i40e_add_queue_stats(u64 **data, struct i40e_ring *ring) | |
| /* To avoid invalid statistics values, ensure that we keep retrying | |
| * the copy until we get a consistent value according to | |
| - * u64_stats_fetch_retry_irq. But first, make sure our ring is | |
| + * u64_stats_fetch_retry. But first, make sure our ring is | |
| * non-null before attempting to access its syncp. | |
| */ | |
| do { | |
| - start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp); | |
| + start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp); | |
| for (i = 0; i < size; i++) { | |
| i40e_add_one_ethtool_stat(&(*data)[i], ring, | |
| &stats[i]); | |
| } | |
| - } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
| + } while (ring && u64_stats_fetch_retry(&ring->syncp, start)); | |
| /* Once we successfully copy the stats in, update the data pointer */ | |
| *data += size; | |
| diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c | |
| index 1194dcacbd29..2a304f4507e0 100644 | |
| --- a/drivers/net/ethernet/intel/i40e/i40e_main.c | |
| +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |
| @@ -425,10 +425,10 @@ static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring, | |
| unsigned int start; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->syncp); | |
| + start = u64_stats_fetch_begin(&ring->syncp); | |
| packets = ring->stats.packets; | |
| bytes = ring->stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->syncp, start)); | |
| stats->tx_packets += packets; | |
| stats->tx_bytes += bytes; | |
| @@ -478,10 +478,10 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, | |
| if (!ring) | |
| continue; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->syncp); | |
| + start = u64_stats_fetch_begin(&ring->syncp); | |
| packets = ring->stats.packets; | |
| bytes = ring->stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->syncp, start)); | |
| stats->rx_packets += packets; | |
| stats->rx_bytes += bytes; | |
| @@ -903,10 +903,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) | |
| continue; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&p->syncp); | |
| + start = u64_stats_fetch_begin(&p->syncp); | |
| packets = p->stats.packets; | |
| bytes = p->stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&p->syncp, start)); | |
| tx_b += bytes; | |
| tx_p += packets; | |
| tx_restart += p->tx_stats.restart_queue; | |
| @@ -921,10 +921,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) | |
| continue; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&p->syncp); | |
| + start = u64_stats_fetch_begin(&p->syncp); | |
| packets = p->stats.packets; | |
| bytes = p->stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&p->syncp, start)); | |
| rx_b += bytes; | |
| rx_p += packets; | |
| rx_buf += p->rx_stats.alloc_buff_failed; | |
| @@ -941,10 +941,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) | |
| continue; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&p->syncp); | |
| + start = u64_stats_fetch_begin(&p->syncp); | |
| packets = p->stats.packets; | |
| bytes = p->stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&p->syncp, start)); | |
| tx_b += bytes; | |
| tx_p += packets; | |
| tx_restart += p->tx_stats.restart_queue; | |
| diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c | |
| index f4ac2b164b3e..892c6a4f03bb 100644 | |
| --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c | |
| +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c | |
| @@ -147,7 +147,7 @@ __iavf_add_ethtool_stats(u64 **data, void *pointer, | |
| * @ring: the ring to copy | |
| * | |
| * Queue statistics must be copied while protected by | |
| - * u64_stats_fetch_begin_irq, so we can't directly use iavf_add_ethtool_stats. | |
| + * u64_stats_fetch_begin, so we can't directly use iavf_add_ethtool_stats. | |
| * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the | |
| * ring pointer is null, zero out the queue stat values and update the data | |
| * pointer. Otherwise safely copy the stats from the ring into the supplied | |
| @@ -165,14 +165,14 @@ iavf_add_queue_stats(u64 **data, struct iavf_ring *ring) | |
| /* To avoid invalid statistics values, ensure that we keep retrying | |
| * the copy until we get a consistent value according to | |
| - * u64_stats_fetch_retry_irq. But first, make sure our ring is | |
| + * u64_stats_fetch_retry. But first, make sure our ring is | |
| * non-null before attempting to access its syncp. | |
| */ | |
| do { | |
| - start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp); | |
| + start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp); | |
| for (i = 0; i < size; i++) | |
| iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]); | |
| - } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
| + } while (ring && u64_stats_fetch_retry(&ring->syncp, start)); | |
| /* Once we successfully copy the stats in, update the data pointer */ | |
| *data += size; | |
| diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c | |
| index 3f01942e4982..9a32f434869e 100644 | |
| --- a/drivers/net/ethernet/intel/ice/ice_main.c | |
| +++ b/drivers/net/ethernet/intel/ice/ice_main.c | |
| @@ -6424,10 +6424,10 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, | |
| unsigned int start; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(syncp); | |
| + start = u64_stats_fetch_begin(syncp); | |
| *pkts = stats.pkts; | |
| *bytes = stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(syncp, start)); | |
| + } while (u64_stats_fetch_retry(syncp, start)); | |
| } | |
| /** | |
| diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c | |
| index ceff537d9d22..4ee849985e2b 100644 | |
| --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c | |
| +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c | |
| @@ -2316,15 +2316,15 @@ static void igb_get_ethtool_stats(struct net_device *netdev, | |
| ring = adapter->tx_ring[j]; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->tx_syncp); | |
| + start = u64_stats_fetch_begin(&ring->tx_syncp); | |
| data[i] = ring->tx_stats.packets; | |
| data[i+1] = ring->tx_stats.bytes; | |
| data[i+2] = ring->tx_stats.restart_queue; | |
| - } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); | |
| + start = u64_stats_fetch_begin(&ring->tx_syncp2); | |
| restart2 = ring->tx_stats.restart_queue2; | |
| - } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); | |
| + } while (u64_stats_fetch_retry(&ring->tx_syncp2, start)); | |
| data[i+2] += restart2; | |
| i += IGB_TX_QUEUE_STATS_LEN; | |
| @@ -2332,13 +2332,13 @@ static void igb_get_ethtool_stats(struct net_device *netdev, | |
| for (j = 0; j < adapter->num_rx_queues; j++) { | |
| ring = adapter->rx_ring[j]; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->rx_syncp); | |
| + start = u64_stats_fetch_begin(&ring->rx_syncp); | |
| data[i] = ring->rx_stats.packets; | |
| data[i+1] = ring->rx_stats.bytes; | |
| data[i+2] = ring->rx_stats.drops; | |
| data[i+3] = ring->rx_stats.csum_err; | |
| data[i+4] = ring->rx_stats.alloc_failed; | |
| - } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); | |
| i += IGB_RX_QUEUE_STATS_LEN; | |
| } | |
| spin_unlock(&adapter->stats64_lock); | |
| diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c | |
| index f2f719a952f8..948329c0369a 100644 | |
| --- a/drivers/net/ethernet/intel/igb/igb_main.c | |
| +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |
| @@ -6664,10 +6664,10 @@ void igb_update_stats(struct igb_adapter *adapter) | |
| } | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->rx_syncp); | |
| + start = u64_stats_fetch_begin(&ring->rx_syncp); | |
| _bytes = ring->rx_stats.bytes; | |
| _packets = ring->rx_stats.packets; | |
| - } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); | |
| bytes += _bytes; | |
| packets += _packets; | |
| } | |
| @@ -6680,10 +6680,10 @@ void igb_update_stats(struct igb_adapter *adapter) | |
| for (i = 0; i < adapter->num_tx_queues; i++) { | |
| struct igb_ring *ring = adapter->tx_ring[i]; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->tx_syncp); | |
| + start = u64_stats_fetch_begin(&ring->tx_syncp); | |
| _bytes = ring->tx_stats.bytes; | |
| _packets = ring->tx_stats.packets; | |
| - } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); | |
| bytes += _bytes; | |
| packets += _packets; | |
| } | |
| diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c | |
| index 2bee9cace598..f7284fa4324a 100644 | |
| --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c | |
| +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c | |
| @@ -840,15 +840,15 @@ static void igc_ethtool_get_stats(struct net_device *netdev, | |
| ring = adapter->tx_ring[j]; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->tx_syncp); | |
| + start = u64_stats_fetch_begin(&ring->tx_syncp); | |
| data[i] = ring->tx_stats.packets; | |
| data[i + 1] = ring->tx_stats.bytes; | |
| data[i + 2] = ring->tx_stats.restart_queue; | |
| - } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); | |
| + start = u64_stats_fetch_begin(&ring->tx_syncp2); | |
| restart2 = ring->tx_stats.restart_queue2; | |
| - } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); | |
| + } while (u64_stats_fetch_retry(&ring->tx_syncp2, start)); | |
| data[i + 2] += restart2; | |
| i += IGC_TX_QUEUE_STATS_LEN; | |
| @@ -856,13 +856,13 @@ static void igc_ethtool_get_stats(struct net_device *netdev, | |
| for (j = 0; j < adapter->num_rx_queues; j++) { | |
| ring = adapter->rx_ring[j]; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->rx_syncp); | |
| + start = u64_stats_fetch_begin(&ring->rx_syncp); | |
| data[i] = ring->rx_stats.packets; | |
| data[i + 1] = ring->rx_stats.bytes; | |
| data[i + 2] = ring->rx_stats.drops; | |
| data[i + 3] = ring->rx_stats.csum_err; | |
| data[i + 4] = ring->rx_stats.alloc_failed; | |
| - } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); | |
| i += IGC_RX_QUEUE_STATS_LEN; | |
| } | |
| spin_unlock(&adapter->stats64_lock); | |
| diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c | |
| index 6ae2d0b723c8..c2e74f20472a 100644 | |
| --- a/drivers/net/ethernet/intel/igc/igc_main.c | |
| +++ b/drivers/net/ethernet/intel/igc/igc_main.c | |
| @@ -4868,10 +4868,10 @@ void igc_update_stats(struct igc_adapter *adapter) | |
| } | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->rx_syncp); | |
| + start = u64_stats_fetch_begin(&ring->rx_syncp); | |
| _bytes = ring->rx_stats.bytes; | |
| _packets = ring->rx_stats.packets; | |
| - } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); | |
| bytes += _bytes; | |
| packets += _packets; | |
| } | |
| @@ -4885,10 +4885,10 @@ void igc_update_stats(struct igc_adapter *adapter) | |
| struct igc_ring *ring = adapter->tx_ring[i]; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->tx_syncp); | |
| + start = u64_stats_fetch_begin(&ring->tx_syncp); | |
| _bytes = ring->tx_stats.bytes; | |
| _packets = ring->tx_stats.packets; | |
| - } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); | |
| bytes += _bytes; | |
| packets += _packets; | |
| } | |
| diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | |
| index f8e65e18284e..80e1003e9626 100644 | |
| --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | |
| +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | |
| @@ -1335,10 +1335,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, | |
| } | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->syncp); | |
| + start = u64_stats_fetch_begin(&ring->syncp); | |
| data[i] = ring->stats.packets; | |
| data[i+1] = ring->stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->syncp, start)); | |
| i += 2; | |
| } | |
| for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { | |
| @@ -1351,10 +1351,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, | |
| } | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->syncp); | |
| + start = u64_stats_fetch_begin(&ring->syncp); | |
| data[i] = ring->stats.packets; | |
| data[i+1] = ring->stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->syncp, start)); | |
| i += 2; | |
| } | |
| diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |
| index 086cc2573033..f48de0bca8a4 100644 | |
| --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |
| +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |
| @@ -9047,10 +9047,10 @@ static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, | |
| if (ring) { | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->syncp); | |
| + start = u64_stats_fetch_begin(&ring->syncp); | |
| packets = ring->stats.packets; | |
| bytes = ring->stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->syncp, start)); | |
| stats->tx_packets += packets; | |
| stats->tx_bytes += bytes; | |
| } | |
| @@ -9070,10 +9070,10 @@ static void ixgbe_get_stats64(struct net_device *netdev, | |
| if (ring) { | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->syncp); | |
| + start = u64_stats_fetch_begin(&ring->syncp); | |
| packets = ring->stats.packets; | |
| bytes = ring->stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->syncp, start)); | |
| stats->rx_packets += packets; | |
| stats->rx_bytes += bytes; | |
| } | |
| diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c | |
| index ccfa6b91aac6..296915414a7c 100644 | |
| --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c | |
| +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c | |
| @@ -458,10 +458,10 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, | |
| } | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->syncp); | |
| + start = u64_stats_fetch_begin(&ring->syncp); | |
| data[i] = ring->stats.packets; | |
| data[i + 1] = ring->stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->syncp, start)); | |
| i += 2; | |
| } | |
| @@ -475,10 +475,10 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, | |
| } | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->syncp); | |
| + start = u64_stats_fetch_begin(&ring->syncp); | |
| data[i] = ring->stats.packets; | |
| data[i + 1] = ring->stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->syncp, start)); | |
| i += 2; | |
| } | |
| @@ -492,10 +492,10 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, | |
| } | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->syncp); | |
| + start = u64_stats_fetch_begin(&ring->syncp); | |
| data[i] = ring->stats.packets; | |
| data[i + 1] = ring->stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->syncp, start)); | |
| i += 2; | |
| } | |
| } | |
| diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |
| index e338fa572793..a9479ddf68eb 100644 | |
| --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |
| +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |
| @@ -4350,10 +4350,10 @@ static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats, | |
| if (ring) { | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->syncp); | |
| + start = u64_stats_fetch_begin(&ring->syncp); | |
| bytes = ring->stats.bytes; | |
| packets = ring->stats.packets; | |
| - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->syncp, start)); | |
| stats->tx_bytes += bytes; | |
| stats->tx_packets += packets; | |
| } | |
| @@ -4376,10 +4376,10 @@ static void ixgbevf_get_stats(struct net_device *netdev, | |
| for (i = 0; i < adapter->num_rx_queues; i++) { | |
| ring = adapter->rx_ring[i]; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ring->syncp); | |
| + start = u64_stats_fetch_begin(&ring->syncp); | |
| bytes = ring->stats.bytes; | |
| packets = ring->stats.packets; | |
| - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ring->syncp, start)); | |
| stats->rx_bytes += bytes; | |
| stats->rx_packets += packets; | |
| } | |
| diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c | |
| index eb4ebaa1c92f..327f03f80836 100644 | |
| --- a/drivers/net/ethernet/marvell/mvneta.c | |
| +++ b/drivers/net/ethernet/marvell/mvneta.c | |
| @@ -813,14 +813,14 @@ mvneta_get_stats64(struct net_device *dev, | |
| cpu_stats = per_cpu_ptr(pp->stats, cpu); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); | |
| + start = u64_stats_fetch_begin(&cpu_stats->syncp); | |
| rx_packets = cpu_stats->es.ps.rx_packets; | |
| rx_bytes = cpu_stats->es.ps.rx_bytes; | |
| rx_dropped = cpu_stats->rx_dropped; | |
| rx_errors = cpu_stats->rx_errors; | |
| tx_packets = cpu_stats->es.ps.tx_packets; | |
| tx_bytes = cpu_stats->es.ps.tx_bytes; | |
| - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); | |
| stats->rx_packets += rx_packets; | |
| stats->rx_bytes += rx_bytes; | |
| @@ -4765,7 +4765,7 @@ mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp, | |
| stats = per_cpu_ptr(pp->stats, cpu); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&stats->syncp); | |
| + start = u64_stats_fetch_begin(&stats->syncp); | |
| skb_alloc_error = stats->es.skb_alloc_error; | |
| refill_error = stats->es.refill_error; | |
| xdp_redirect = stats->es.ps.xdp_redirect; | |
| @@ -4775,7 +4775,7 @@ mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp, | |
| xdp_xmit_err = stats->es.ps.xdp_xmit_err; | |
| xdp_tx = stats->es.ps.xdp_tx; | |
| xdp_tx_err = stats->es.ps.xdp_tx_err; | |
| - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&stats->syncp, start)); | |
| es->skb_alloc_error += skb_alloc_error; | |
| es->refill_error += refill_error; | |
| diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | |
| index bbcdab562513..6a6ad1094eac 100644 | |
| --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | |
| +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | |
| @@ -2033,7 +2033,7 @@ mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats) | |
| cpu_stats = per_cpu_ptr(port->stats, cpu); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); | |
| + start = u64_stats_fetch_begin(&cpu_stats->syncp); | |
| xdp_redirect = cpu_stats->xdp_redirect; | |
| xdp_pass = cpu_stats->xdp_pass; | |
| xdp_drop = cpu_stats->xdp_drop; | |
| @@ -2041,7 +2041,7 @@ mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats) | |
| xdp_xmit_err = cpu_stats->xdp_xmit_err; | |
| xdp_tx = cpu_stats->xdp_tx; | |
| xdp_tx_err = cpu_stats->xdp_tx_err; | |
| - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); | |
| xdp_stats->xdp_redirect += xdp_redirect; | |
| xdp_stats->xdp_pass += xdp_pass; | |
| @@ -5140,12 +5140,12 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) | |
| cpu_stats = per_cpu_ptr(port->stats, cpu); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); | |
| + start = u64_stats_fetch_begin(&cpu_stats->syncp); | |
| rx_packets = cpu_stats->rx_packets; | |
| rx_bytes = cpu_stats->rx_bytes; | |
| tx_packets = cpu_stats->tx_packets; | |
| tx_bytes = cpu_stats->tx_bytes; | |
| - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); | |
| stats->rx_packets += rx_packets; | |
| stats->rx_bytes += rx_bytes; | |
| diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c | |
| index ab33ba1c3023..ff97b140886a 100644 | |
| --- a/drivers/net/ethernet/marvell/sky2.c | |
| +++ b/drivers/net/ethernet/marvell/sky2.c | |
| @@ -3894,19 +3894,19 @@ static void sky2_get_stats(struct net_device *dev, | |
| u64 _bytes, _packets; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&sky2->rx_stats.syncp); | |
| + start = u64_stats_fetch_begin(&sky2->rx_stats.syncp); | |
| _bytes = sky2->rx_stats.bytes; | |
| _packets = sky2->rx_stats.packets; | |
| - } while (u64_stats_fetch_retry_irq(&sky2->rx_stats.syncp, start)); | |
| + } while (u64_stats_fetch_retry(&sky2->rx_stats.syncp, start)); | |
| stats->rx_packets = _packets; | |
| stats->rx_bytes = _bytes; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&sky2->tx_stats.syncp); | |
| + start = u64_stats_fetch_begin(&sky2->tx_stats.syncp); | |
| _bytes = sky2->tx_stats.bytes; | |
| _packets = sky2->tx_stats.packets; | |
| - } while (u64_stats_fetch_retry_irq(&sky2->tx_stats.syncp, start)); | |
| + } while (u64_stats_fetch_retry(&sky2->tx_stats.syncp, start)); | |
| stats->tx_packets = _packets; | |
| stats->tx_bytes = _bytes; | |
| diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |
| index fecf3dd22dfa..62726113cba7 100644 | |
| --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |
| +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |
| @@ -865,7 +865,7 @@ static void mtk_get_stats64(struct net_device *dev, | |
| } | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&hw_stats->syncp); | |
| + start = u64_stats_fetch_begin(&hw_stats->syncp); | |
| storage->rx_packets = hw_stats->rx_packets; | |
| storage->tx_packets = hw_stats->tx_packets; | |
| storage->rx_bytes = hw_stats->rx_bytes; | |
| @@ -877,7 +877,7 @@ static void mtk_get_stats64(struct net_device *dev, | |
| storage->rx_crc_errors = hw_stats->rx_fcs_errors; | |
| storage->rx_errors = hw_stats->rx_checksum_errors; | |
| storage->tx_aborted_errors = hw_stats->tx_skip; | |
| - } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&hw_stats->syncp, start)); | |
| storage->tx_errors = dev->stats.tx_errors; | |
| storage->rx_dropped = dev->stats.rx_dropped; | |
| @@ -3693,13 +3693,13 @@ static void mtk_get_ethtool_stats(struct net_device *dev, | |
| do { | |
| data_dst = data; | |
| - start = u64_stats_fetch_begin_irq(&hwstats->syncp); | |
| + start = u64_stats_fetch_begin(&hwstats->syncp); | |
| for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) | |
| *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset); | |
| if (mtk_page_pool_enabled(mac->hw)) | |
| mtk_ethtool_pp_stats(mac->hw, data_dst); | |
| - } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&hwstats->syncp, start)); | |
| } | |
| static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, | |
| diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |
| index 67ecdb9e708f..8345499563a4 100644 | |
| --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |
| +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |
| @@ -827,12 +827,12 @@ mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, | |
| for_each_possible_cpu(i) { | |
| p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&p->syncp); | |
| + start = u64_stats_fetch_begin(&p->syncp); | |
| rx_packets = p->rx_packets; | |
| rx_bytes = p->rx_bytes; | |
| tx_packets = p->tx_packets; | |
| tx_bytes = p->tx_bytes; | |
| - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&p->syncp, start)); | |
| stats->rx_packets += rx_packets; | |
| stats->rx_bytes += rx_bytes; | |
| diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c | |
| index b52612eef0a6..8bf4919dc960 100644 | |
| --- a/drivers/net/ethernet/microsoft/mana/mana_en.c | |
| +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c | |
| @@ -316,10 +316,10 @@ static void mana_get_stats64(struct net_device *ndev, | |
| rx_stats = &apc->rxqs[q]->stats; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&rx_stats->syncp); | |
| + start = u64_stats_fetch_begin(&rx_stats->syncp); | |
| packets = rx_stats->packets; | |
| bytes = rx_stats->bytes; | |
| - } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); | |
| st->rx_packets += packets; | |
| st->rx_bytes += bytes; | |
| @@ -329,10 +329,10 @@ static void mana_get_stats64(struct net_device *ndev, | |
| tx_stats = &apc->tx_qp[q].txq.stats; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&tx_stats->syncp); | |
| + start = u64_stats_fetch_begin(&tx_stats->syncp); | |
| packets = tx_stats->packets; | |
| bytes = tx_stats->bytes; | |
| - } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); | |
| st->tx_packets += packets; | |
| st->tx_bytes += bytes; | |
| diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c | |
| index c530db76880f..96d55c91c969 100644 | |
| --- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c | |
| +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c | |
| @@ -90,13 +90,13 @@ static void mana_get_ethtool_stats(struct net_device *ndev, | |
| rx_stats = &apc->rxqs[q]->stats; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&rx_stats->syncp); | |
| + start = u64_stats_fetch_begin(&rx_stats->syncp); | |
| packets = rx_stats->packets; | |
| bytes = rx_stats->bytes; | |
| xdp_drop = rx_stats->xdp_drop; | |
| xdp_tx = rx_stats->xdp_tx; | |
| xdp_redirect = rx_stats->xdp_redirect; | |
| - } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); | |
| data[i++] = packets; | |
| data[i++] = bytes; | |
| @@ -109,11 +109,11 @@ static void mana_get_ethtool_stats(struct net_device *ndev, | |
| tx_stats = &apc->tx_qp[q].txq.stats; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&tx_stats->syncp); | |
| + start = u64_stats_fetch_begin(&tx_stats->syncp); | |
| packets = tx_stats->packets; | |
| bytes = tx_stats->bytes; | |
| xdp_xmit = tx_stats->xdp_xmit; | |
| - } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); | |
| data[i++] = packets; | |
| data[i++] = bytes; | |
| diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c | |
| index 27f4786ace4f..a5ca5c4a7896 100644 | |
| --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c | |
| +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c | |
| @@ -1631,21 +1631,21 @@ static void nfp_net_stat64(struct net_device *netdev, | |
| unsigned int start; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&r_vec->rx_sync); | |
| + start = u64_stats_fetch_begin(&r_vec->rx_sync); | |
| data[0] = r_vec->rx_pkts; | |
| data[1] = r_vec->rx_bytes; | |
| data[2] = r_vec->rx_drops; | |
| - } while (u64_stats_fetch_retry_irq(&r_vec->rx_sync, start)); | |
| + } while (u64_stats_fetch_retry(&r_vec->rx_sync, start)); | |
| stats->rx_packets += data[0]; | |
| stats->rx_bytes += data[1]; | |
| stats->rx_dropped += data[2]; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&r_vec->tx_sync); | |
| + start = u64_stats_fetch_begin(&r_vec->tx_sync); | |
| data[0] = r_vec->tx_pkts; | |
| data[1] = r_vec->tx_bytes; | |
| data[2] = r_vec->tx_errors; | |
| - } while (u64_stats_fetch_retry_irq(&r_vec->tx_sync, start)); | |
| + } while (u64_stats_fetch_retry(&r_vec->tx_sync, start)); | |
| stats->tx_packets += data[0]; | |
| stats->tx_bytes += data[1]; | |
| stats->tx_errors += data[2]; | |
| diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c | |
| index af376b900067..cc97b3d00414 100644 | |
| --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c | |
| +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c | |
| @@ -881,7 +881,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) | |
| unsigned int start; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].rx_sync); | |
| + start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync); | |
| data[0] = nn->r_vecs[i].rx_pkts; | |
| tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; | |
| tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; | |
| @@ -889,10 +889,10 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) | |
| tmp[3] = nn->r_vecs[i].hw_csum_rx_error; | |
| tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail; | |
| tmp[5] = nn->r_vecs[i].hw_tls_rx; | |
| - } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].rx_sync, start)); | |
| + } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start)); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].tx_sync); | |
| + start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); | |
| data[1] = nn->r_vecs[i].tx_pkts; | |
| data[2] = nn->r_vecs[i].tx_busy; | |
| tmp[6] = nn->r_vecs[i].hw_csum_tx; | |
| @@ -902,7 +902,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) | |
| tmp[10] = nn->r_vecs[i].hw_tls_tx; | |
| tmp[11] = nn->r_vecs[i].tls_tx_fallback; | |
| tmp[12] = nn->r_vecs[i].tls_tx_no_fallback; | |
| - } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].tx_sync, start)); | |
| + } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); | |
| data += NN_RVEC_PER_Q_STATS; | |
| diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | |
| index 8b77582bdfa0..a6b6ca1fd55e 100644 | |
| --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | |
| +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | |
| @@ -134,13 +134,13 @@ nfp_repr_get_host_stats64(const struct net_device *netdev, | |
| repr_stats = per_cpu_ptr(repr->stats, i); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&repr_stats->syncp); | |
| + start = u64_stats_fetch_begin(&repr_stats->syncp); | |
| tbytes = repr_stats->tx_bytes; | |
| tpkts = repr_stats->tx_packets; | |
| tdrops = repr_stats->tx_drops; | |
| rbytes = repr_stats->rx_bytes; | |
| rpkts = repr_stats->rx_packets; | |
| - } while (u64_stats_fetch_retry_irq(&repr_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&repr_stats->syncp, start)); | |
| stats->tx_bytes += tbytes; | |
| stats->tx_packets += tpkts; | |
| diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c | |
| index 486cbc8ab224..7a549b834e97 100644 | |
| --- a/drivers/net/ethernet/nvidia/forcedeth.c | |
| +++ b/drivers/net/ethernet/nvidia/forcedeth.c | |
| @@ -1734,12 +1734,12 @@ static void nv_get_stats(int cpu, struct fe_priv *np, | |
| u64 tx_packets, tx_bytes, tx_dropped; | |
| do { | |
| - syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); | |
| + syncp_start = u64_stats_fetch_begin(&np->swstats_rx_syncp); | |
| rx_packets = src->stat_rx_packets; | |
| rx_bytes = src->stat_rx_bytes; | |
| rx_dropped = src->stat_rx_dropped; | |
| rx_missed_errors = src->stat_rx_missed_errors; | |
| - } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); | |
| + } while (u64_stats_fetch_retry(&np->swstats_rx_syncp, syncp_start)); | |
| storage->rx_packets += rx_packets; | |
| storage->rx_bytes += rx_bytes; | |
| @@ -1747,11 +1747,11 @@ static void nv_get_stats(int cpu, struct fe_priv *np, | |
| storage->rx_missed_errors += rx_missed_errors; | |
| do { | |
| - syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); | |
| + syncp_start = u64_stats_fetch_begin(&np->swstats_tx_syncp); | |
| tx_packets = src->stat_tx_packets; | |
| tx_bytes = src->stat_tx_bytes; | |
| tx_dropped = src->stat_tx_dropped; | |
| - } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); | |
| + } while (u64_stats_fetch_retry(&np->swstats_tx_syncp, syncp_start)); | |
| storage->tx_packets += tx_packets; | |
| storage->tx_bytes += tx_bytes; | |
| diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c | |
| index 1b2119b1d48a..3f5e6572d20e 100644 | |
| --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c | |
| +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c | |
| @@ -135,9 +135,9 @@ static void rmnet_get_stats64(struct net_device *dev, | |
| pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp); | |
| + start = u64_stats_fetch_begin(&pcpu_ptr->syncp); | |
| snapshot = pcpu_ptr->stats; /* struct assignment */ | |
| - } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&pcpu_ptr->syncp, start)); | |
| total_stats.rx_pkts += snapshot.rx_pkts; | |
| total_stats.rx_bytes += snapshot.rx_bytes; | |
| diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c | |
| index 469e2e229c6e..9ce0e8a64ba8 100644 | |
| --- a/drivers/net/ethernet/realtek/8139too.c | |
| +++ b/drivers/net/ethernet/realtek/8139too.c | |
| @@ -2532,16 +2532,16 @@ rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) | |
| netdev_stats_to_stats64(stats, &dev->stats); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); | |
| + start = u64_stats_fetch_begin(&tp->rx_stats.syncp); | |
| stats->rx_packets = tp->rx_stats.packets; | |
| stats->rx_bytes = tp->rx_stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start)); | |
| + } while (u64_stats_fetch_retry(&tp->rx_stats.syncp, start)); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp); | |
| + start = u64_stats_fetch_begin(&tp->tx_stats.syncp); | |
| stats->tx_packets = tp->tx_stats.packets; | |
| stats->tx_bytes = tp->tx_stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); | |
| + } while (u64_stats_fetch_retry(&tp->tx_stats.syncp, start)); | |
| } | |
| /* Set or clear the multicast filter for this adaptor. | |
| diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c | |
| index d2c6a5dfdc0e..b7e24ae92525 100644 | |
| --- a/drivers/net/ethernet/socionext/sni_ave.c | |
| +++ b/drivers/net/ethernet/socionext/sni_ave.c | |
| @@ -1508,16 +1508,16 @@ static void ave_get_stats64(struct net_device *ndev, | |
| unsigned int start; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&priv->stats_rx.syncp); | |
| + start = u64_stats_fetch_begin(&priv->stats_rx.syncp); | |
| stats->rx_packets = priv->stats_rx.packets; | |
| stats->rx_bytes = priv->stats_rx.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&priv->stats_rx.syncp, start)); | |
| + } while (u64_stats_fetch_retry(&priv->stats_rx.syncp, start)); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&priv->stats_tx.syncp); | |
| + start = u64_stats_fetch_begin(&priv->stats_tx.syncp); | |
| stats->tx_packets = priv->stats_tx.packets; | |
| stats->tx_bytes = priv->stats_tx.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&priv->stats_tx.syncp, start)); | |
| + } while (u64_stats_fetch_retry(&priv->stats_tx.syncp, start)); | |
| stats->rx_errors = priv->stats_rx.errors; | |
| stats->tx_errors = priv->stats_tx.errors; | |
| diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c | |
| index 33df06a2de13..27121feb9718 100644 | |
| --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c | |
| +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c | |
| @@ -1376,12 +1376,12 @@ static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev, | |
| cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); | |
| + start = u64_stats_fetch_begin(&cpu_stats->syncp); | |
| rx_packets = cpu_stats->rx_packets; | |
| rx_bytes = cpu_stats->rx_bytes; | |
| tx_packets = cpu_stats->tx_packets; | |
| tx_bytes = cpu_stats->tx_bytes; | |
| - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); | |
| stats->rx_packets += rx_packets; | |
| stats->rx_bytes += rx_bytes; | |
| diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c | |
| index 9eb9eaff4dc9..1bb596a9d8a2 100644 | |
| --- a/drivers/net/ethernet/ti/netcp_core.c | |
| +++ b/drivers/net/ethernet/ti/netcp_core.c | |
| @@ -1916,16 +1916,16 @@ netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats) | |
| unsigned int start; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&p->syncp_rx); | |
| + start = u64_stats_fetch_begin(&p->syncp_rx); | |
| rxpackets = p->rx_packets; | |
| rxbytes = p->rx_bytes; | |
| - } while (u64_stats_fetch_retry_irq(&p->syncp_rx, start)); | |
| + } while (u64_stats_fetch_retry(&p->syncp_rx, start)); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&p->syncp_tx); | |
| + start = u64_stats_fetch_begin(&p->syncp_tx); | |
| txpackets = p->tx_packets; | |
| txbytes = p->tx_bytes; | |
| - } while (u64_stats_fetch_retry_irq(&p->syncp_tx, start)); | |
| + } while (u64_stats_fetch_retry(&p->syncp_tx, start)); | |
| stats->rx_packets = rxpackets; | |
| stats->rx_bytes = rxbytes; | |
| diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c | |
| index 0fb15a17b547..d716e6fe26e1 100644 | |
| --- a/drivers/net/ethernet/via/via-rhine.c | |
| +++ b/drivers/net/ethernet/via/via-rhine.c | |
| @@ -2217,16 +2217,16 @@ rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) | |
| netdev_stats_to_stats64(stats, &dev->stats); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp); | |
| + start = u64_stats_fetch_begin(&rp->rx_stats.syncp); | |
| stats->rx_packets = rp->rx_stats.packets; | |
| stats->rx_bytes = rp->rx_stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start)); | |
| + } while (u64_stats_fetch_retry(&rp->rx_stats.syncp, start)); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp); | |
| + start = u64_stats_fetch_begin(&rp->tx_stats.syncp); | |
| stats->tx_packets = rp->tx_stats.packets; | |
| stats->tx_bytes = rp->tx_stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start)); | |
| + } while (u64_stats_fetch_retry(&rp->tx_stats.syncp, start)); | |
| } | |
| static void rhine_set_rx_mode(struct net_device *dev) | |
| diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c | |
| index b631d80de337..977f100cb53d 100644 | |
| --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c | |
| +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c | |
| @@ -1310,16 +1310,16 @@ axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) | |
| netdev_stats_to_stats64(stats, &dev->stats); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&lp->rx_stat_sync); | |
| + start = u64_stats_fetch_begin(&lp->rx_stat_sync); | |
| stats->rx_packets = u64_stats_read(&lp->rx_packets); | |
| stats->rx_bytes = u64_stats_read(&lp->rx_bytes); | |
| - } while (u64_stats_fetch_retry_irq(&lp->rx_stat_sync, start)); | |
| + } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start)); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&lp->tx_stat_sync); | |
| + start = u64_stats_fetch_begin(&lp->tx_stat_sync); | |
| stats->tx_packets = u64_stats_read(&lp->tx_packets); | |
| stats->tx_bytes = u64_stats_read(&lp->tx_bytes); | |
| - } while (u64_stats_fetch_retry_irq(&lp->tx_stat_sync, start)); | |
| + } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start)); | |
| } | |
| static const struct net_device_ops axienet_netdev_ops = { | |
| diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c | |
| index 1b7405539984..453ff84b784a 100644 | |
| --- a/drivers/net/hyperv/netvsc_drv.c | |
| +++ b/drivers/net/hyperv/netvsc_drv.c | |
| @@ -1268,12 +1268,12 @@ static void netvsc_get_vf_stats(struct net_device *net, | |
| unsigned int start; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&stats->syncp); | |
| + start = u64_stats_fetch_begin(&stats->syncp); | |
| rx_packets = stats->rx_packets; | |
| tx_packets = stats->tx_packets; | |
| rx_bytes = stats->rx_bytes; | |
| tx_bytes = stats->tx_bytes; | |
| - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&stats->syncp, start)); | |
| tot->rx_packets += rx_packets; | |
| tot->tx_packets += tx_packets; | |
| @@ -1298,12 +1298,12 @@ static void netvsc_get_pcpu_stats(struct net_device *net, | |
| unsigned int start; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&stats->syncp); | |
| + start = u64_stats_fetch_begin(&stats->syncp); | |
| this_tot->vf_rx_packets = stats->rx_packets; | |
| this_tot->vf_tx_packets = stats->tx_packets; | |
| this_tot->vf_rx_bytes = stats->rx_bytes; | |
| this_tot->vf_tx_bytes = stats->tx_bytes; | |
| - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&stats->syncp, start)); | |
| this_tot->rx_packets = this_tot->vf_rx_packets; | |
| this_tot->tx_packets = this_tot->vf_tx_packets; | |
| this_tot->rx_bytes = this_tot->vf_rx_bytes; | |
| @@ -1322,20 +1322,20 @@ static void netvsc_get_pcpu_stats(struct net_device *net, | |
| tx_stats = &nvchan->tx_stats; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&tx_stats->syncp); | |
| + start = u64_stats_fetch_begin(&tx_stats->syncp); | |
| packets = tx_stats->packets; | |
| bytes = tx_stats->bytes; | |
| - } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); | |
| this_tot->tx_bytes += bytes; | |
| this_tot->tx_packets += packets; | |
| rx_stats = &nvchan->rx_stats; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&rx_stats->syncp); | |
| + start = u64_stats_fetch_begin(&rx_stats->syncp); | |
| packets = rx_stats->packets; | |
| bytes = rx_stats->bytes; | |
| - } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); | |
| this_tot->rx_bytes += bytes; | |
| this_tot->rx_packets += packets; | |
| @@ -1374,21 +1374,21 @@ static void netvsc_get_stats64(struct net_device *net, | |
| tx_stats = &nvchan->tx_stats; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&tx_stats->syncp); | |
| + start = u64_stats_fetch_begin(&tx_stats->syncp); | |
| packets = tx_stats->packets; | |
| bytes = tx_stats->bytes; | |
| - } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); | |
| t->tx_bytes += bytes; | |
| t->tx_packets += packets; | |
| rx_stats = &nvchan->rx_stats; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&rx_stats->syncp); | |
| + start = u64_stats_fetch_begin(&rx_stats->syncp); | |
| packets = rx_stats->packets; | |
| bytes = rx_stats->bytes; | |
| multicast = rx_stats->multicast + rx_stats->broadcast; | |
| - } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); | |
| t->rx_bytes += bytes; | |
| t->rx_packets += packets; | |
| @@ -1531,24 +1531,24 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, | |
| tx_stats = &nvdev->chan_table[j].tx_stats; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&tx_stats->syncp); | |
| + start = u64_stats_fetch_begin(&tx_stats->syncp); | |
| packets = tx_stats->packets; | |
| bytes = tx_stats->bytes; | |
| xdp_xmit = tx_stats->xdp_xmit; | |
| - } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); | |
| data[i++] = packets; | |
| data[i++] = bytes; | |
| data[i++] = xdp_xmit; | |
| rx_stats = &nvdev->chan_table[j].rx_stats; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&rx_stats->syncp); | |
| + start = u64_stats_fetch_begin(&rx_stats->syncp); | |
| packets = rx_stats->packets; | |
| bytes = rx_stats->bytes; | |
| xdp_drop = rx_stats->xdp_drop; | |
| xdp_redirect = rx_stats->xdp_redirect; | |
| xdp_tx = rx_stats->xdp_tx; | |
| - } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); | |
| data[i++] = packets; | |
| data[i++] = bytes; | |
| data[i++] = xdp_drop; | |
| diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c | |
| index 1c64d5347b8e..78253ad57b2e 100644 | |
| --- a/drivers/net/ifb.c | |
| +++ b/drivers/net/ifb.c | |
| @@ -162,18 +162,18 @@ static void ifb_stats64(struct net_device *dev, | |
| for (i = 0; i < dev->num_tx_queues; i++,txp++) { | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&txp->rx_stats.sync); | |
| + start = u64_stats_fetch_begin(&txp->rx_stats.sync); | |
| packets = txp->rx_stats.packets; | |
| bytes = txp->rx_stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&txp->rx_stats.sync, start)); | |
| + } while (u64_stats_fetch_retry(&txp->rx_stats.sync, start)); | |
| stats->rx_packets += packets; | |
| stats->rx_bytes += bytes; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&txp->tx_stats.sync); | |
| + start = u64_stats_fetch_begin(&txp->tx_stats.sync); | |
| packets = txp->tx_stats.packets; | |
| bytes = txp->tx_stats.bytes; | |
| - } while (u64_stats_fetch_retry_irq(&txp->tx_stats.sync, start)); | |
| + } while (u64_stats_fetch_retry(&txp->tx_stats.sync, start)); | |
| stats->tx_packets += packets; | |
| stats->tx_bytes += bytes; | |
| } | |
| @@ -245,12 +245,12 @@ static void ifb_fill_stats_data(u64 **data, | |
| int j; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&q_stats->sync); | |
| + start = u64_stats_fetch_begin(&q_stats->sync); | |
| for (j = 0; j < IFB_Q_STATS_LEN; j++) { | |
| offset = ifb_q_stats_desc[j].offset; | |
| (*data)[j] = *(u64 *)(stats_base + offset); | |
| } | |
| - } while (u64_stats_fetch_retry_irq(&q_stats->sync, start)); | |
| + } while (u64_stats_fetch_retry(&q_stats->sync, start)); | |
| *data += IFB_Q_STATS_LEN; | |
| } | |
| diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c | |
| index fbf2d5b67aaf..57c79f5f2991 100644 | |
| --- a/drivers/net/ipvlan/ipvlan_main.c | |
| +++ b/drivers/net/ipvlan/ipvlan_main.c | |
| @@ -301,13 +301,13 @@ static void ipvlan_get_stats64(struct net_device *dev, | |
| for_each_possible_cpu(idx) { | |
| pcptr = per_cpu_ptr(ipvlan->pcpu_stats, idx); | |
| do { | |
| - strt= u64_stats_fetch_begin_irq(&pcptr->syncp); | |
| + strt = u64_stats_fetch_begin(&pcptr->syncp); | |
| rx_pkts = u64_stats_read(&pcptr->rx_pkts); | |
| rx_bytes = u64_stats_read(&pcptr->rx_bytes); | |
| rx_mcast = u64_stats_read(&pcptr->rx_mcast); | |
| tx_pkts = u64_stats_read(&pcptr->tx_pkts); | |
| tx_bytes = u64_stats_read(&pcptr->tx_bytes); | |
| - } while (u64_stats_fetch_retry_irq(&pcptr->syncp, | |
| + } while (u64_stats_fetch_retry(&pcptr->syncp, | |
| strt)); | |
| s->rx_packets += rx_pkts; | |
| diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c | |
| index 2e9742952c4e..f6d53e63ef4e 100644 | |
| --- a/drivers/net/loopback.c | |
| +++ b/drivers/net/loopback.c | |
| @@ -106,10 +106,10 @@ void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes) | |
| lb_stats = per_cpu_ptr(dev->lstats, i); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&lb_stats->syncp); | |
| + start = u64_stats_fetch_begin(&lb_stats->syncp); | |
| tpackets = u64_stats_read(&lb_stats->packets); | |
| tbytes = u64_stats_read(&lb_stats->bytes); | |
| - } while (u64_stats_fetch_retry_irq(&lb_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&lb_stats->syncp, start)); | |
| *bytes += tbytes; | |
| *packets += tpackets; | |
| } | |
| diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c | |
| index 8a8fd74110e2..e277f6da79f3 100644 | |
| --- a/drivers/net/macsec.c | |
| +++ b/drivers/net/macsec.c | |
| @@ -2832,9 +2832,9 @@ static void get_rx_sc_stats(struct net_device *dev, | |
| stats = per_cpu_ptr(rx_sc->stats, cpu); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&stats->syncp); | |
| + start = u64_stats_fetch_begin(&stats->syncp); | |
| memcpy(&tmp, &stats->stats, sizeof(tmp)); | |
| - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&stats->syncp, start)); | |
| sum->InOctetsValidated += tmp.InOctetsValidated; | |
| sum->InOctetsDecrypted += tmp.InOctetsDecrypted; | |
| @@ -2913,9 +2913,9 @@ static void get_tx_sc_stats(struct net_device *dev, | |
| stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&stats->syncp); | |
| + start = u64_stats_fetch_begin(&stats->syncp); | |
| memcpy(&tmp, &stats->stats, sizeof(tmp)); | |
| - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&stats->syncp, start)); | |
| sum->OutPktsProtected += tmp.OutPktsProtected; | |
| sum->OutPktsEncrypted += tmp.OutPktsEncrypted; | |
| @@ -2969,9 +2969,9 @@ static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum) | |
| stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&stats->syncp); | |
| + start = u64_stats_fetch_begin(&stats->syncp); | |
| memcpy(&tmp, &stats->stats, sizeof(tmp)); | |
| - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&stats->syncp, start)); | |
| sum->OutPktsUntagged += tmp.OutPktsUntagged; | |
| sum->InPktsUntagged += tmp.InPktsUntagged; | |
| diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c | |
| index 012830d12fde..9bea3f1b0a8a 100644 | |
| --- a/drivers/net/macvlan.c | |
| +++ b/drivers/net/macvlan.c | |
| @@ -948,13 +948,13 @@ static void macvlan_dev_get_stats64(struct net_device *dev, | |
| for_each_possible_cpu(i) { | |
| p = per_cpu_ptr(vlan->pcpu_stats, i); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&p->syncp); | |
| + start = u64_stats_fetch_begin(&p->syncp); | |
| rx_packets = u64_stats_read(&p->rx_packets); | |
| rx_bytes = u64_stats_read(&p->rx_bytes); | |
| rx_multicast = u64_stats_read(&p->rx_multicast); | |
| tx_packets = u64_stats_read(&p->tx_packets); | |
| tx_bytes = u64_stats_read(&p->tx_bytes); | |
| - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&p->syncp, start)); | |
| stats->rx_packets += rx_packets; | |
| stats->rx_bytes += rx_bytes; | |
| diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c | |
| index 0b9d37979133..3d322ac4f6a5 100644 | |
| --- a/drivers/net/mhi_net.c | |
| +++ b/drivers/net/mhi_net.c | |
| @@ -104,19 +104,19 @@ static void mhi_ndo_get_stats64(struct net_device *ndev, | |
| unsigned int start; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.rx_syncp); | |
| + start = u64_stats_fetch_begin(&mhi_netdev->stats.rx_syncp); | |
| stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets); | |
| stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes); | |
| stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors); | |
| - } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start)); | |
| + } while (u64_stats_fetch_retry(&mhi_netdev->stats.rx_syncp, start)); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.tx_syncp); | |
| + start = u64_stats_fetch_begin(&mhi_netdev->stats.tx_syncp); | |
| stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets); | |
| stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes); | |
| stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors); | |
| stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped); | |
| - } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.tx_syncp, start)); | |
| + } while (u64_stats_fetch_retry(&mhi_netdev->stats.tx_syncp, start)); | |
| } | |
| static const struct net_device_ops mhi_netdev_ops = { | |
| diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c | |
| index 9a1a5b203624..e470e3398abc 100644 | |
| --- a/drivers/net/netdevsim/netdev.c | |
| +++ b/drivers/net/netdevsim/netdev.c | |
| @@ -67,10 +67,10 @@ nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) | |
| unsigned int start; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&ns->syncp); | |
| + start = u64_stats_fetch_begin(&ns->syncp); | |
| stats->tx_bytes = ns->tx_bytes; | |
| stats->tx_packets = ns->tx_packets; | |
| - } while (u64_stats_fetch_retry_irq(&ns->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&ns->syncp, start)); | |
| } | |
| static int | |
| diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c | |
| index 293eaf6b3ec9..eccf9df0c88c 100644 | |
| --- a/drivers/net/team/team.c | |
| +++ b/drivers/net/team/team.c | |
| @@ -1868,13 +1868,13 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) | |
| for_each_possible_cpu(i) { | |
| p = per_cpu_ptr(team->pcpu_stats, i); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&p->syncp); | |
| + start = u64_stats_fetch_begin(&p->syncp); | |
| rx_packets = u64_stats_read(&p->rx_packets); | |
| rx_bytes = u64_stats_read(&p->rx_bytes); | |
| rx_multicast = u64_stats_read(&p->rx_multicast); | |
| tx_packets = u64_stats_read(&p->tx_packets); | |
| tx_bytes = u64_stats_read(&p->tx_bytes); | |
| - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&p->syncp, start)); | |
| stats->rx_packets += rx_packets; | |
| stats->rx_bytes += rx_bytes; | |
| diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c | |
| index b095a4b4957b..18d99fda997c 100644 | |
| --- a/drivers/net/team/team_mode_loadbalance.c | |
| +++ b/drivers/net/team/team_mode_loadbalance.c | |
| @@ -466,9 +466,9 @@ static void __lb_one_cpu_stats_add(struct lb_stats *acc_stats, | |
| struct lb_stats tmp; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(syncp); | |
| + start = u64_stats_fetch_begin(syncp); | |
| tmp.tx_bytes = cpu_stats->tx_bytes; | |
| - } while (u64_stats_fetch_retry_irq(syncp, start)); | |
| + } while (u64_stats_fetch_retry(syncp, start)); | |
| acc_stats->tx_bytes += tmp.tx_bytes; | |
| } | |
| diff --git a/drivers/net/veth.c b/drivers/net/veth.c | |
| index 8dcd3b6e143b..cfc81e45d461 100644 | |
| --- a/drivers/net/veth.c | |
| +++ b/drivers/net/veth.c | |
| @@ -182,12 +182,12 @@ static void veth_get_ethtool_stats(struct net_device *dev, | |
| size_t offset; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&rq_stats->syncp); | |
| + start = u64_stats_fetch_begin(&rq_stats->syncp); | |
| for (j = 0; j < VETH_RQ_STATS_LEN; j++) { | |
| offset = veth_rq_stats_desc[j].offset; | |
| data[idx + j] = *(u64 *)(stats_base + offset); | |
| } | |
| - } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&rq_stats->syncp, start)); | |
| idx += VETH_RQ_STATS_LEN; | |
| } | |
| @@ -203,12 +203,12 @@ static void veth_get_ethtool_stats(struct net_device *dev, | |
| tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&rq_stats->syncp); | |
| + start = u64_stats_fetch_begin(&rq_stats->syncp); | |
| for (j = 0; j < VETH_TQ_STATS_LEN; j++) { | |
| offset = veth_tq_stats_desc[j].offset; | |
| data[tx_idx + j] += *(u64 *)(base + offset); | |
| } | |
| - } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&rq_stats->syncp, start)); | |
| } | |
| } | |
| @@ -381,13 +381,13 @@ static void veth_stats_rx(struct veth_stats *result, struct net_device *dev) | |
| unsigned int start; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&stats->syncp); | |
| + start = u64_stats_fetch_begin(&stats->syncp); | |
| peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err; | |
| xdp_tx_err = stats->vs.xdp_tx_err; | |
| packets = stats->vs.xdp_packets; | |
| bytes = stats->vs.xdp_bytes; | |
| drops = stats->vs.rx_drops; | |
| - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&stats->syncp, start)); | |
| result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err; | |
| result->xdp_tx_err += xdp_tx_err; | |
| result->xdp_packets += packets; | |
| diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c | |
| index e3e5107adaca..0f1c6f88423f 100644 | |
| --- a/drivers/net/virtio_net.c | |
| +++ b/drivers/net/virtio_net.c | |
| @@ -2107,18 +2107,18 @@ static void virtnet_stats(struct net_device *dev, | |
| struct send_queue *sq = &vi->sq[i]; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&sq->stats.syncp); | |
| + start = u64_stats_fetch_begin(&sq->stats.syncp); | |
| tpackets = sq->stats.packets; | |
| tbytes = sq->stats.bytes; | |
| terrors = sq->stats.tx_timeouts; | |
| - } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); | |
| + } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&rq->stats.syncp); | |
| + start = u64_stats_fetch_begin(&rq->stats.syncp); | |
| rpackets = rq->stats.packets; | |
| rbytes = rq->stats.bytes; | |
| rdrops = rq->stats.drops; | |
| - } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); | |
| + } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); | |
| tot->rx_packets += rpackets; | |
| tot->tx_packets += tpackets; | |
| @@ -2726,12 +2726,12 @@ static void virtnet_get_ethtool_stats(struct net_device *dev, | |
| stats_base = (u8 *)&rq->stats; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&rq->stats.syncp); | |
| + start = u64_stats_fetch_begin(&rq->stats.syncp); | |
| for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { | |
| offset = virtnet_rq_stats_desc[j].offset; | |
| data[idx + j] = *(u64 *)(stats_base + offset); | |
| } | |
| - } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); | |
| + } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); | |
| idx += VIRTNET_RQ_STATS_LEN; | |
| } | |
| @@ -2740,12 +2740,12 @@ static void virtnet_get_ethtool_stats(struct net_device *dev, | |
| stats_base = (u8 *)&sq->stats; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&sq->stats.syncp); | |
| + start = u64_stats_fetch_begin(&sq->stats.syncp); | |
| for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { | |
| offset = virtnet_sq_stats_desc[j].offset; | |
| data[idx + j] = *(u64 *)(stats_base + offset); | |
| } | |
| - } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); | |
| + } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); | |
| idx += VIRTNET_SQ_STATS_LEN; | |
| } | |
| } | |
| diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c | |
| index 208df4d41939..6043e63b42f9 100644 | |
| --- a/drivers/net/vrf.c | |
| +++ b/drivers/net/vrf.c | |
| @@ -159,13 +159,13 @@ static void vrf_get_stats64(struct net_device *dev, | |
| dstats = per_cpu_ptr(dev->dstats, i); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&dstats->syncp); | |
| + start = u64_stats_fetch_begin(&dstats->syncp); | |
| tbytes = dstats->tx_bytes; | |
| tpkts = dstats->tx_pkts; | |
| tdrops = dstats->tx_drps; | |
| rbytes = dstats->rx_bytes; | |
| rpkts = dstats->rx_pkts; | |
| - } while (u64_stats_fetch_retry_irq(&dstats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&dstats->syncp, start)); | |
| stats->tx_bytes += tbytes; | |
| stats->tx_packets += tpkts; | |
| stats->tx_dropped += tdrops; | |
| diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c | |
| index c5cf55030158..c3ff30ab782e 100644 | |
| --- a/drivers/net/vxlan/vxlan_vnifilter.c | |
| +++ b/drivers/net/vxlan/vxlan_vnifilter.c | |
| @@ -129,9 +129,9 @@ static void vxlan_vnifilter_stats_get(const struct vxlan_vni_node *vninode, | |
| pstats = per_cpu_ptr(vninode->stats, i); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&pstats->syncp); | |
| + start = u64_stats_fetch_begin(&pstats->syncp); | |
| memcpy(&temp, &pstats->stats, sizeof(temp)); | |
| - } while (u64_stats_fetch_retry_irq(&pstats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&pstats->syncp, start)); | |
| dest->rx_packets += temp.rx_packets; | |
| dest->rx_bytes += temp.rx_bytes; | |
| diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c | |
| index ef70bb7c88ad..3f72ae943b29 100644 | |
| --- a/drivers/net/wwan/mhi_wwan_mbim.c | |
| +++ b/drivers/net/wwan/mhi_wwan_mbim.c | |
| @@ -456,19 +456,19 @@ static void mhi_mbim_ndo_get_stats64(struct net_device *ndev, | |
| unsigned int start; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&link->rx_syncp); | |
| + start = u64_stats_fetch_begin(&link->rx_syncp); | |
| stats->rx_packets = u64_stats_read(&link->rx_packets); | |
| stats->rx_bytes = u64_stats_read(&link->rx_bytes); | |
| stats->rx_errors = u64_stats_read(&link->rx_errors); | |
| - } while (u64_stats_fetch_retry_irq(&link->rx_syncp, start)); | |
| + } while (u64_stats_fetch_retry(&link->rx_syncp, start)); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&link->tx_syncp); | |
| + start = u64_stats_fetch_begin(&link->tx_syncp); | |
| stats->tx_packets = u64_stats_read(&link->tx_packets); | |
| stats->tx_bytes = u64_stats_read(&link->tx_bytes); | |
| stats->tx_errors = u64_stats_read(&link->tx_errors); | |
| stats->tx_dropped = u64_stats_read(&link->tx_dropped); | |
| - } while (u64_stats_fetch_retry_irq(&link->tx_syncp, start)); | |
| + } while (u64_stats_fetch_retry(&link->tx_syncp, start)); | |
| } | |
| static void mhi_mbim_ul_callback(struct mhi_device *mhi_dev, | |
| diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c | |
| index 95b5ab4b964e..cb4b06994e0d 100644 | |
| --- a/drivers/net/xen-netfront.c | |
| +++ b/drivers/net/xen-netfront.c | |
| @@ -1393,16 +1393,16 @@ static void xennet_get_stats64(struct net_device *dev, | |
| unsigned int start; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&tx_stats->syncp); | |
| + start = u64_stats_fetch_begin(&tx_stats->syncp); | |
| tx_packets = tx_stats->packets; | |
| tx_bytes = tx_stats->bytes; | |
| - } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&rx_stats->syncp); | |
| + start = u64_stats_fetch_begin(&rx_stats->syncp); | |
| rx_packets = rx_stats->packets; | |
| rx_bytes = rx_stats->bytes; | |
| - } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); | |
| tot->rx_packets += rx_packets; | |
| tot->tx_packets += tx_packets; | |
| diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c | |
| index 50fe5aa450f8..c1ce65b2ef70 100644 | |
| --- a/drivers/spi/spi.c | |
| +++ b/drivers/spi/spi.c | |
| @@ -127,10 +127,10 @@ do { \ | |
| unsigned int start; \ | |
| pcpu_stats = per_cpu_ptr(in, i); \ | |
| do { \ | |
| - start = u64_stats_fetch_begin_irq( \ | |
| + start = u64_stats_fetch_begin( \ | |
| &pcpu_stats->syncp); \ | |
| inc = u64_stats_read(&pcpu_stats->field); \ | |
| - } while (u64_stats_fetch_retry_irq( \ | |
| + } while (u64_stats_fetch_retry( \ | |
| &pcpu_stats->syncp, start)); \ | |
| ret += inc; \ | |
| } \ | |
| diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h | |
| index eeb7b43ebe53..b17715d340c3 100644 | |
| --- a/drivers/tty/serial/8250/8250.h | |
| +++ b/drivers/tty/serial/8250/8250.h | |
| @@ -179,6 +179,43 @@ | |
| up->dl_write(up, value); | |
| } | |
| +static inline int serial8250_in_IER(struct uart_8250_port *up) | |
| +{ | |
| + struct uart_port *port = &up->port; | |
| + unsigned long flags; | |
| + bool is_console; | |
| + int ier; | |
| + | |
| + is_console = uart_console(port); | |
| + | |
| + if (is_console) | |
| + printk_cpu_sync_get_irqsave(flags); | |
| + | |
| + ier = serial_in(up, UART_IER); | |
| + | |
| + if (is_console) | |
| + printk_cpu_sync_put_irqrestore(flags); | |
| + | |
| + return ier; | |
| +} | |
| + | |
| +static inline void serial8250_set_IER(struct uart_8250_port *up, int ier) | |
| +{ | |
| + struct uart_port *port = &up->port; | |
| + unsigned long flags; | |
| + bool is_console; | |
| + | |
| + is_console = uart_console(port); | |
| + | |
| + if (is_console) | |
| + printk_cpu_sync_get_irqsave(flags); | |
| + | |
| + serial_out(up, UART_IER, ier); | |
| + | |
| + if (is_console) | |
| + printk_cpu_sync_put_irqrestore(flags); | |
| +} | |
| + | |
| static inline bool serial8250_set_THRI(struct uart_8250_port *up) | |
| { | |
| if (up->ier & UART_IER_THRI) | |
| @@ -187,7 +224,7 @@ | |
| #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI) | |
| up->ier |= UART_IER_PTIME; | |
| #endif | |
| - serial_out(up, UART_IER, up->ier); | |
| + serial8250_set_IER(up, up->ier); | |
| return true; | |
| } | |
| @@ -199,7 +236,7 @@ | |
| #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI) | |
| up->ier &= ~UART_IER_PTIME; | |
| #endif | |
| - serial_out(up, UART_IER, up->ier); | |
| + serial8250_set_IER(up, up->ier); | |
| return true; | |
| } | |
| @@ -432,3 +469,4 @@ | |
| { | |
| return port->minor - 64; | |
| } | |
| + | |
| diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c | |
| index 9d2a7856784f..7cc6b527c088 100644 | |
| --- a/drivers/tty/serial/8250/8250_aspeed_vuart.c | |
| +++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c | |
| @@ -278,7 +278,7 @@ static void __aspeed_vuart_set_throttle(struct uart_8250_port *up, | |
| up->ier &= ~irqs; | |
| if (!throttle) | |
| up->ier |= irqs; | |
| - serial_out(up, UART_IER, up->ier); | |
| + serial8250_set_IER(up, up->ier); | |
| } | |
| static void aspeed_vuart_set_throttle(struct uart_port *port, bool throttle) | |
| { | |
| diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c | |
| index a28f115f6194..2f2558e555e0 100644 | |
| --- a/drivers/tty/serial/8250/8250_bcm7271.c | |
| +++ b/drivers/tty/serial/8250/8250_bcm7271.c | |
| @@ -609,7 +609,7 @@ static int brcmuart_startup(struct uart_port *port) | |
| * will handle this. | |
| */ | |
| up->ier &= ~UART_IER_RDI; | |
| - serial_port_out(port, UART_IER, up->ier); | |
| + serial8250_set_IER(up, up->ier); | |
| priv->tx_running = false; | |
| priv->dma.rx_dma = NULL; | |
| @@ -794,10 +794,12 @@ static int brcmuart_handle_irq(struct uart_port *p) | |
| unsigned int iir = serial_port_in(p, UART_IIR); | |
| struct brcmuart_priv *priv = p->private_data; | |
| struct uart_8250_port *up = up_to_u8250p(p); | |
| + unsigned long cs_flags; | |
| unsigned int status; | |
| unsigned long flags; | |
| unsigned int ier; | |
| unsigned int mcr; | |
| + bool is_console; | |
| int handled = 0; | |
| /* | |
| @@ -808,6 +810,10 @@ static int brcmuart_handle_irq(struct uart_port *p) | |
| spin_lock_irqsave(&p->lock, flags); | |
| status = serial_port_in(p, UART_LSR); | |
| if ((status & UART_LSR_DR) == 0) { | |
| + is_console = uart_console(p); | |
| + | |
| + if (is_console) | |
| + printk_cpu_sync_get_irqsave(cs_flags); | |
| ier = serial_port_in(p, UART_IER); | |
| /* | |
| @@ -828,6 +834,9 @@ static int brcmuart_handle_irq(struct uart_port *p) | |
| serial_port_in(p, UART_RX); | |
| } | |
| + if (is_console) | |
| + printk_cpu_sync_put_irqrestore(cs_flags); | |
| + | |
| handled = 1; | |
| } | |
| spin_unlock_irqrestore(&p->lock, flags); | |
| @@ -842,8 +851,10 @@ static enum hrtimer_restart brcmuart_hrtimer_func(struct hrtimer *t) | |
| struct brcmuart_priv *priv = container_of(t, struct brcmuart_priv, hrt); | |
| struct uart_port *p = priv->up; | |
| struct uart_8250_port *up = up_to_u8250p(p); | |
| + unsigned long cs_flags; | |
| unsigned int status; | |
| unsigned long flags; | |
| + bool is_console; | |
| if (priv->shutdown) | |
| return HRTIMER_NORESTART; | |
| @@ -865,12 +876,20 @@ static enum hrtimer_restart brcmuart_hrtimer_func(struct hrtimer *t) | |
| /* re-enable receive unless upper layer has disabled it */ | |
| if ((up->ier & (UART_IER_RLSI | UART_IER_RDI)) == | |
| (UART_IER_RLSI | UART_IER_RDI)) { | |
| + is_console = uart_console(p); | |
| + | |
| + if (is_console) | |
| + printk_cpu_sync_get_irqsave(cs_flags); | |
| + | |
| status = serial_port_in(p, UART_IER); | |
| status |= (UART_IER_RLSI | UART_IER_RDI); | |
| serial_port_out(p, UART_IER, status); | |
| status = serial_port_in(p, UART_MCR); | |
| status |= UART_MCR_RTS; | |
| serial_port_out(p, UART_MCR, status); | |
| + | |
| + if (is_console) | |
| + printk_cpu_sync_put_irqrestore(cs_flags); | |
| } | |
| spin_unlock_irqrestore(&p->lock, flags); | |
| return HRTIMER_NORESTART; | |
| diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c | |
| index 81a5dab1a828..536f639ff56c 100644 | |
| --- a/drivers/tty/serial/8250/8250_core.c | |
| +++ b/drivers/tty/serial/8250/8250_core.c | |
| @@ -255,8 +255,11 @@ static void serial8250_timeout(struct timer_list *t) | |
| static void serial8250_backup_timeout(struct timer_list *t) | |
| { | |
| struct uart_8250_port *up = from_timer(up, t, timer); | |
| + struct uart_port *port = &up->port; | |
| unsigned int iir, ier = 0, lsr; | |
| + unsigned long cs_flags; | |
| unsigned long flags; | |
| + bool is_console; | |
| spin_lock_irqsave(&up->port.lock, flags); | |
| @@ -265,8 +268,16 @@ static void serial8250_backup_timeout(struct timer_list *t) | |
| * based handler. | |
| */ | |
| if (up->port.irq) { | |
| + is_console = uart_console(port); | |
| + | |
| + if (is_console) | |
| + printk_cpu_sync_get_irqsave(cs_flags); | |
| + | |
| ier = serial_in(up, UART_IER); | |
| serial_out(up, UART_IER, 0); | |
| + | |
| + if (is_console) | |
| + printk_cpu_sync_put_irqrestore(cs_flags); | |
| } | |
| iir = serial_in(up, UART_IIR); | |
| @@ -289,7 +300,7 @@ static void serial8250_backup_timeout(struct timer_list *t) | |
| serial8250_tx_chars(up); | |
| if (up->port.irq) | |
| - serial_out(up, UART_IER, ier); | |
| + serial8250_set_IER(up, ier); | |
| spin_unlock_irqrestore(&up->port.lock, flags); | |
| @@ -575,6 +586,14 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev) | |
| #ifdef CONFIG_SERIAL_8250_CONSOLE | |
| +static void univ8250_console_write_atomic(struct console *co, const char *s, | |
| + unsigned int count) | |
| +{ | |
| + struct uart_8250_port *up = &serial8250_ports[co->index]; | |
| + | |
| + serial8250_console_write_atomic(up, s, count); | |
| +} | |
| + | |
| static void univ8250_console_write(struct console *co, const char *s, | |
| unsigned int count) | |
| { | |
| @@ -668,6 +687,7 @@ static int univ8250_console_match(struct console *co, char *name, int idx, | |
| static struct console univ8250_console = { | |
| .name = "ttyS", | |
| + .write_atomic = univ8250_console_write_atomic, | |
| .write = univ8250_console_write, | |
| .device = uart_console_device, | |
| .setup = univ8250_console_setup, | |
| @@ -961,7 +981,7 @@ static void serial_8250_overrun_backoff_work(struct work_struct *work) | |
| spin_lock_irqsave(&port->lock, flags); | |
| up->ier |= UART_IER_RLSI | UART_IER_RDI; | |
| up->port.read_status_mask |= UART_LSR_DR; | |
| - serial_out(up, UART_IER, up->ier); | |
| + serial8250_set_IER(up, up->ier); | |
| spin_unlock_irqrestore(&port->lock, flags); | |
| } | |
| diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c | |
| index b5ae6ec61c9f..20875e783909 100644 | |
| --- a/drivers/tty/serial/8250/8250_exar.c | |
| +++ b/drivers/tty/serial/8250/8250_exar.c | |
| @@ -231,6 +231,8 @@ static void xr17v35x_set_divisor(struct uart_port *p, unsigned int baud, | |
| static int xr17v35x_startup(struct uart_port *port) | |
| { | |
| + struct uart_8250_port *up = up_to_u8250p(port); | |
| + | |
| /* | |
| * First enable access to IER [7:5], ISR [5:4], FCR [5:4], | |
| * MCR [7:5] and MSR [7:0] | |
| @@ -241,7 +243,7 @@ static int xr17v35x_startup(struct uart_port *port) | |
| * Make sure all interrups are masked until initialization is | |
| * complete and the FIFOs are cleared | |
| */ | |
| - serial_port_out(port, UART_IER, 0); | |
| + serial8250_set_IER(up, 0); | |
| return serial8250_do_startup(port); | |
| } | |
| diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c | |
| index 8adfaa183f77..eaf148245a10 100644 | |
| --- a/drivers/tty/serial/8250/8250_fsl.c | |
| +++ b/drivers/tty/serial/8250/8250_fsl.c | |
| @@ -58,7 +58,8 @@ int fsl8250_handle_irq(struct uart_port *port) | |
| if ((orig_lsr & UART_LSR_OE) && (up->overrun_backoff_time_ms > 0)) { | |
| unsigned long delay; | |
| - up->ier = port->serial_in(port, UART_IER); | |
| + up->ier = serial8250_in_IER(up); | |
| + | |
| if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) { | |
| port->ops->stop_rx(port); | |
| } else { | |
| diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c | |
| index 2b2f5d8d24b9..2b78e6c394fb 100644 | |
| --- a/drivers/tty/serial/8250/8250_ingenic.c | |
| +++ b/drivers/tty/serial/8250/8250_ingenic.c | |
| @@ -146,6 +146,7 @@ OF_EARLYCON_DECLARE(x1000_uart, "ingenic,x1000-uart", | |
| static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value) | |
| { | |
| + struct uart_8250_port *up = up_to_u8250p(p); | |
| int ier; | |
| switch (offset) { | |
| @@ -167,7 +168,7 @@ static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value) | |
| * If we have enabled modem status IRQs we should enable | |
| * modem mode. | |
| */ | |
| - ier = p->serial_in(p, UART_IER); | |
| + ier = serial8250_in_IER(up); | |
| if (ier & UART_IER_MSI) | |
| value |= UART_MCR_MDCE | UART_MCR_FCM; | |
| diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c | |
| index 295b9ba1b4f3..a5bb7c7a6441 100644 | |
| --- a/drivers/tty/serial/8250/8250_mtk.c | |
| +++ b/drivers/tty/serial/8250/8250_mtk.c | |
| @@ -226,12 +226,40 @@ static void mtk8250_shutdown(struct uart_port *port) | |
| static void mtk8250_disable_intrs(struct uart_8250_port *up, int mask) | |
| { | |
| - serial_out(up, UART_IER, serial_in(up, UART_IER) & (~mask)); | |
| + struct uart_port *port = &up->port; | |
| + unsigned long flags; | |
| + bool is_console; | |
| + int ier; | |
| + | |
| + is_console = uart_console(port); | |
| + | |
| + if (is_console) | |
| + printk_cpu_sync_get_irqsave(flags); | |
| + | |
| + ier = serial_in(up, UART_IER); | |
| + serial_out(up, UART_IER, ier & (~mask)); | |
| + | |
| + if (is_console) | |
| + printk_cpu_sync_put_irqrestore(flags); | |
| } | |
| static void mtk8250_enable_intrs(struct uart_8250_port *up, int mask) | |
| { | |
| - serial_out(up, UART_IER, serial_in(up, UART_IER) | mask); | |
| + struct uart_port *port = &up->port; | |
| + unsigned long flags; | |
| + bool is_console; | |
| + int ier; | |
| + | |
| + is_console = uart_console(port); | |
| + | |
| + if (is_console) | |
| + printk_cpu_sync_get_irqsave(flags); | |
| + | |
| + ier = serial_in(up, UART_IER); | |
| + serial_out(up, UART_IER, ier | mask); | |
| + | |
| + if (is_console) | |
| + printk_cpu_sync_put_irqrestore(flags); | |
| } | |
| static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode) | |
| diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c | |
| index a3eaf293f204..034f430203c9 100644 | |
| --- a/drivers/tty/serial/8250/8250_omap.c | |
| +++ b/drivers/tty/serial/8250/8250_omap.c | |
| @@ -334,7 +334,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up) | |
| /* drop TCR + TLR access, we setup XON/XOFF later */ | |
| serial8250_out_MCR(up, mcr); | |
| - serial_out(up, UART_IER, up->ier); | |
| + serial8250_set_IER(up, up->ier); | |
| serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); | |
| serial_dl_write(up, priv->quot); | |
| @@ -524,7 +524,7 @@ static void omap_8250_pm(struct uart_port *port, unsigned int state, | |
| serial_out(up, UART_EFR, efr | UART_EFR_ECB); | |
| serial_out(up, UART_LCR, 0); | |
| - serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0); | |
| + serial8250_set_IER(up, (state != 0) ? UART_IERX_SLEEP : 0); | |
| serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); | |
| serial_out(up, UART_EFR, efr); | |
| serial_out(up, UART_LCR, 0); | |
| @@ -719,7 +719,7 @@ static int omap_8250_startup(struct uart_port *port) | |
| goto err; | |
| up->ier = UART_IER_RLSI | UART_IER_RDI; | |
| - serial_out(up, UART_IER, up->ier); | |
| + serial8250_set_IER(up, up->ier); | |
| #ifdef CONFIG_PM | |
| up->capabilities |= UART_CAP_RPM; | |
| @@ -760,7 +760,7 @@ static void omap_8250_shutdown(struct uart_port *port) | |
| serial_out(up, UART_OMAP_EFR2, 0x0); | |
| up->ier = 0; | |
| - serial_out(up, UART_IER, 0); | |
| + serial8250_set_IER(up, 0); | |
| if (up->dma) | |
| serial8250_release_dma(up); | |
| @@ -808,7 +808,7 @@ static void omap_8250_unthrottle(struct uart_port *port) | |
| up->dma->rx_dma(up); | |
| up->ier |= UART_IER_RLSI | UART_IER_RDI; | |
| port->read_status_mask |= UART_LSR_DR; | |
| - serial_out(up, UART_IER, up->ier); | |
| + serial8250_set_IER(up, up->ier); | |
| spin_unlock_irqrestore(&port->lock, flags); | |
| pm_runtime_mark_last_busy(port->dev); | |
| @@ -899,7 +899,7 @@ static void __dma_rx_complete(void *param) | |
| __dma_rx_do_complete(p); | |
| if (!priv->throttled) { | |
| p->ier |= UART_IER_RLSI | UART_IER_RDI; | |
| - serial_out(p, UART_IER, p->ier); | |
| + serial8250_set_IER(p, p->ier); | |
| if (!(priv->habit & UART_HAS_EFR2)) | |
| omap_8250_rx_dma(p); | |
| } | |
| @@ -956,7 +956,7 @@ static int omap_8250_rx_dma(struct uart_8250_port *p) | |
| * callback to run. | |
| */ | |
| p->ier &= ~(UART_IER_RLSI | UART_IER_RDI); | |
| - serial_out(p, UART_IER, p->ier); | |
| + serial8250_set_IER(p, p->ier); | |
| } | |
| goto out; | |
| } | |
| @@ -1169,12 +1169,12 @@ static void am654_8250_handle_rx_dma(struct uart_8250_port *up, u8 iir, | |
| * periodic timeouts, re-enable interrupts. | |
| */ | |
| up->ier &= ~(UART_IER_RLSI | UART_IER_RDI); | |
| - serial_out(up, UART_IER, up->ier); | |
| + serial8250_set_IER(up, up->ier); | |
| omap_8250_rx_dma_flush(up); | |
| serial_in(up, UART_IIR); | |
| serial_out(up, UART_OMAP_EFR2, 0x0); | |
| up->ier |= UART_IER_RLSI | UART_IER_RDI; | |
| - serial_out(up, UART_IER, up->ier); | |
| + serial8250_set_IER(up, up->ier); | |
| } | |
| } | |
| diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c | |
| index c744feabd7cd..975c16267196 100644 | |
| --- a/drivers/tty/serial/8250/8250_port.c | |
| +++ b/drivers/tty/serial/8250/8250_port.c | |
| @@ -756,7 +756,7 @@ | |
| serial_out(p, UART_EFR, UART_EFR_ECB); | |
| serial_out(p, UART_LCR, 0); | |
| } | |
| - serial_out(p, UART_IER, sleep ? UART_IERX_SLEEP : 0); | |
| + serial8250_set_IER(p, sleep ? UART_IERX_SLEEP : 0); | |
| if (p->capabilities & UART_CAP_EFR) { | |
| serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B); | |
| serial_out(p, UART_EFR, efr); | |
| @@ -767,12 +767,29 @@ | |
| serial8250_rpm_put(p); | |
| } | |
| -static void serial8250_clear_IER(struct uart_8250_port *up) | |
| +static unsigned int serial8250_clear_IER(struct uart_8250_port *up) | |
| { | |
| + struct uart_port *port = &up->port; | |
| + unsigned int clearval = 0; | |
| + unsigned long flags; | |
| + bool is_console; | |
| + unsigned int prior; | |
| + | |
| + is_console = uart_console(port); | |
| + | |
| if (up->capabilities & UART_CAP_UUE) | |
| - serial_out(up, UART_IER, UART_IER_UUE); | |
| - else | |
| - serial_out(up, UART_IER, 0); | |
| + clearval = UART_IER_UUE; | |
| + | |
| + if (is_console) | |
| + printk_cpu_sync_get_irqsave(flags); | |
| + | |
| + prior = serial_in(up, UART_IER); | |
| + serial_out(up, UART_IER, clearval); | |
| + | |
| + if (is_console) | |
| + printk_cpu_sync_put_irqrestore(flags); | |
| + | |
| + return prior; | |
| } | |
| #ifdef CONFIG_SERIAL_8250_RSA | |
| @@ -1038,8 +1055,11 @@ | |
| */ | |
| static void autoconfig_16550a(struct uart_8250_port *up) | |
| { | |
| + struct uart_port *port = &up->port; | |
| unsigned char status1, status2; | |
| unsigned int iersave; | |
| + unsigned long flags; | |
| + bool is_console; | |
| up->port.type = PORT_16550A; | |
| up->capabilities |= UART_CAP_FIFO; | |
| @@ -1151,6 +1171,11 @@ | |
| return; | |
| } | |
| + is_console = uart_console(port); | |
| + | |
| + if (is_console) | |
| + printk_cpu_sync_get_irqsave(flags); | |
| + | |
| /* | |
| * Try writing and reading the UART_IER_UUE bit (b6). | |
| * If it works, this is probably one of the Xscale platform's | |
| @@ -1186,6 +1211,9 @@ | |
| } | |
| serial_out(up, UART_IER, iersave); | |
| + if (is_console) | |
| + printk_cpu_sync_put_irqrestore(flags); | |
| + | |
| /* | |
| * We distinguish between 16550A and U6 16550A by counting | |
| * how many bytes are in the FIFO. | |
| @@ -1208,8 +1236,10 @@ | |
| unsigned char status1, scratch, scratch2, scratch3; | |
| unsigned char save_lcr, save_mcr; | |
| struct uart_port *port = &up->port; | |
| + unsigned long cs_flags; | |
| unsigned long flags; | |
| unsigned int old_capabilities; | |
| + bool is_console; | |
| if (!port->iobase && !port->mapbase && !port->membase) | |
| return; | |
| @@ -1227,6 +1257,11 @@ | |
| up->bugs = 0; | |
| if (!(port->flags & UPF_BUGGY_UART)) { | |
| + is_console = uart_console(port); | |
| + | |
| + if (is_console) | |
| + printk_cpu_sync_get_irqsave(cs_flags); | |
| + | |
| /* | |
| * Do a simple existence test first; if we fail this, | |
| * there's no point trying anything else. | |
| @@ -1256,6 +1291,10 @@ | |
| #endif | |
| scratch3 = serial_in(up, UART_IER) & 0x0f; | |
| serial_out(up, UART_IER, scratch); | |
| + | |
| + if (is_console) | |
| + printk_cpu_sync_put_irqrestore(cs_flags); | |
| + | |
| if (scratch2 != 0 || scratch3 != 0x0F) { | |
| /* | |
| * We failed; there's nothing here | |
| @@ -1379,7 +1418,9 @@ | |
| unsigned char save_mcr, save_ier; | |
| unsigned char save_ICP = 0; | |
| unsigned int ICP = 0; | |
| + unsigned long flags; | |
| unsigned long irqs; | |
| + bool is_console; | |
| int irq; | |
| if (port->flags & UPF_FOURPORT) { | |
| @@ -1389,6 +1430,13 @@ | |
| inb_p(ICP); | |
| } | |
| + is_console = uart_console(port); | |
| + | |
| + if (is_console) { | |
| + console_lock(); | |
| + printk_cpu_sync_get_irqsave(flags); | |
| + } | |
| + | |
| /* forget possible initially masked and pending IRQ */ | |
| probe_irq_off(probe_irq_on()); | |
| save_mcr = serial8250_in_MCR(up); | |
| @@ -1419,6 +1467,11 @@ | |
| if (port->flags & UPF_FOURPORT) | |
| outb_p(save_ICP, ICP); | |
| + if (is_console) { | |
| + printk_cpu_sync_put_irqrestore(flags); | |
| + console_unlock(); | |
| + } | |
| + | |
| port->irq = (irq > 0) ? irq : 0; | |
| } | |
| @@ -1430,7 +1483,7 @@ | |
| up->ier &= ~(UART_IER_RLSI | UART_IER_RDI); | |
| up->port.read_status_mask &= ~UART_LSR_DR; | |
| - serial_port_out(port, UART_IER, up->ier); | |
| + serial8250_set_IER(up, up->ier); | |
| serial8250_rpm_put(up); | |
| } | |
| @@ -1460,7 +1513,7 @@ | |
| serial8250_clear_and_reinit_fifos(p); | |
| p->ier |= UART_IER_RLSI | UART_IER_RDI; | |
| - serial_port_out(&p->port, UART_IER, p->ier); | |
| + serial8250_set_IER(p, p->ier); | |
| } | |
| } | |
| EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx); | |
| @@ -1714,7 +1767,7 @@ | |
| mctrl_gpio_disable_ms(up->gpios); | |
| up->ier &= ~UART_IER_MSI; | |
| - serial_port_out(port, UART_IER, up->ier); | |
| + serial8250_set_IER(up, up->ier); | |
| } | |
| static void serial8250_enable_ms(struct uart_port *port) | |
| @@ -1730,7 +1783,7 @@ | |
| up->ier |= UART_IER_MSI; | |
| serial8250_rpm_get(up); | |
| - serial_port_out(port, UART_IER, up->ier); | |
| + serial8250_set_IER(up, up->ier); | |
| serial8250_rpm_put(up); | |
| } | |
| @@ -2231,8 +2284,7 @@ | |
| /* | |
| * First save the IER then disable the interrupts | |
| */ | |
| - ier = serial_port_in(port, UART_IER); | |
| - serial8250_clear_IER(up); | |
| + ier = serial8250_clear_IER(up); | |
| wait_for_xmitr(up, UART_LSR_BOTH_EMPTY); | |
| /* | |
| @@ -2245,7 +2297,7 @@ | |
| * and restore the IER | |
| */ | |
| wait_for_xmitr(up, UART_LSR_BOTH_EMPTY); | |
| - serial_port_out(port, UART_IER, ier); | |
| + serial8250_set_IER(up, ier); | |
| serial8250_rpm_put(up); | |
| } | |
| @@ -2254,8 +2306,10 @@ | |
| int serial8250_do_startup(struct uart_port *port) | |
| { | |
| struct uart_8250_port *up = up_to_u8250p(port); | |
| + unsigned long cs_flags; | |
| unsigned long flags; | |
| unsigned char iir; | |
| + bool is_console; | |
| int retval; | |
| u16 lsr; | |
| @@ -2276,7 +2330,7 @@ | |
| up->acr = 0; | |
| serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B); | |
| serial_port_out(port, UART_EFR, UART_EFR_ECB); | |
| - serial_port_out(port, UART_IER, 0); | |
| + serial8250_set_IER(up, 0); | |
| serial_port_out(port, UART_LCR, 0); | |
| serial_icr_write(up, UART_CSR, 0); /* Reset the UART */ | |
| serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B); | |
| @@ -2286,7 +2340,7 @@ | |
| if (port->type == PORT_DA830) { | |
| /* Reset the port */ | |
| - serial_port_out(port, UART_IER, 0); | |
| + serial8250_set_IER(up, 0); | |
| serial_port_out(port, UART_DA830_PWREMU_MGMT, 0); | |
| mdelay(10); | |
| @@ -2385,6 +2439,8 @@ | |
| if (retval) | |
| goto out; | |
| + is_console = uart_console(port); | |
| + | |
| if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) { | |
| unsigned char iir1; | |
| @@ -2401,6 +2457,9 @@ | |
| */ | |
| spin_lock_irqsave(&port->lock, flags); | |
| + if (is_console) | |
| + printk_cpu_sync_get_irqsave(cs_flags); | |
| + | |
| wait_for_xmitr(up, UART_LSR_THRE); | |
| serial_port_out_sync(port, UART_IER, UART_IER_THRI); | |
| udelay(1); /* allow THRE to set */ | |
| @@ -2411,6 +2470,9 @@ | |
| iir = serial_port_in(port, UART_IIR); | |
| serial_port_out(port, UART_IER, 0); | |
| + if (is_console) | |
| + printk_cpu_sync_put_irqrestore(cs_flags); | |
| + | |
| spin_unlock_irqrestore(&port->lock, flags); | |
| if (port->irqflags & IRQF_SHARED) | |
| @@ -2465,10 +2527,14 @@ | |
| * Do a quick test to see if we receive an interrupt when we enable | |
| * the TX irq. | |
| */ | |
| + if (is_console) | |
| + printk_cpu_sync_get_irqsave(cs_flags); | |
| serial_port_out(port, UART_IER, UART_IER_THRI); | |
| lsr = serial_port_in(port, UART_LSR); | |
| iir = serial_port_in(port, UART_IIR); | |
| serial_port_out(port, UART_IER, 0); | |
| + if (is_console) | |
| + printk_cpu_sync_put_irqrestore(cs_flags); | |
| if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) { | |
| if (!(up->bugs & UART_BUG_TXEN)) { | |
| @@ -2500,7 +2566,7 @@ | |
| if (up->dma) { | |
| const char *msg = NULL; | |
| - if (uart_console(port)) | |
| + if (is_console) | |
| msg = "forbid DMA for kernel console"; | |
| else if (serial8250_request_dma(up)) | |
| #ifdef CONFIG_ARCH_ROCKCHIP | |
| @@ -2555,7 +2621,7 @@ | |
| */ | |
| spin_lock_irqsave(&port->lock, flags); | |
| up->ier = 0; | |
| - serial_port_out(port, UART_IER, 0); | |
| + serial8250_set_IER(up, 0); | |
| spin_unlock_irqrestore(&port->lock, flags); | |
| synchronize_irq(port->irq); | |
| @@ -2932,7 +2998,7 @@ | |
| if (up->capabilities & UART_CAP_RTOIE) | |
| up->ier |= UART_IER_RTOIE; | |
| - serial_port_out(port, UART_IER, up->ier); | |
| + serial8250_set_IER(up, up->ier); | |
| #endif | |
| if (up->capabilities & UART_CAP_EFR) { | |
| @@ -3432,7 +3498,7 @@ | |
| #ifdef CONFIG_SERIAL_8250_CONSOLE | |
| -static void serial8250_console_putchar(struct uart_port *port, unsigned char ch) | |
| +static void serial8250_console_putchar_locked(struct uart_port *port, unsigned char ch) | |
| { | |
| struct uart_8250_port *up = up_to_u8250p(port); | |
| @@ -3440,6 +3506,18 @@ | |
| serial_port_out(port, UART_TX, ch); | |
| } | |
| +static void serial8250_console_putchar(struct uart_port *port, unsigned char ch) | |
| +{ | |
| + struct uart_8250_port *up = up_to_u8250p(port); | |
| + unsigned long flags; | |
| + | |
| + wait_for_xmitr(up, UART_LSR_THRE); | |
| + | |
| + printk_cpu_sync_get_irqsave(flags); | |
| + serial8250_console_putchar_locked(port, ch); | |
| + printk_cpu_sync_put_irqrestore(flags); | |
| +} | |
| + | |
| /* | |
| * Restore serial console when h/w power-off detected | |
| */ | |
| @@ -3466,6 +3544,32 @@ | |
| serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS); | |
| } | |
| +void serial8250_console_write_atomic(struct uart_8250_port *up, | |
| + const char *s, unsigned int count) | |
| +{ | |
| + struct uart_port *port = &up->port; | |
| + unsigned long flags; | |
| + unsigned int ier; | |
| + | |
| + printk_cpu_sync_get_irqsave(flags); | |
| + | |
| + touch_nmi_watchdog(); | |
| + | |
| + ier = serial8250_clear_IER(up); | |
| + | |
| + if (atomic_fetch_inc(&up->console_printing)) { | |
| + uart_console_write(port, "\n", 1, | |
| + serial8250_console_putchar_locked); | |
| + } | |
| + uart_console_write(port, s, count, serial8250_console_putchar_locked); | |
| + atomic_dec(&up->console_printing); | |
| + | |
| + wait_for_xmitr(up, UART_LSR_BOTH_EMPTY); | |
| + serial8250_set_IER(up, ier); | |
| + | |
| + printk_cpu_sync_put_irqrestore(flags); | |
| +} | |
| + | |
| /* | |
| * Print a string to the serial port using the device FIFO | |
| * | |
| @@ -3511,20 +3615,15 @@ | |
| struct uart_port *port = &up->port; | |
| unsigned long flags; | |
| unsigned int ier, use_fifo; | |
| - int locked = 1; | |
| touch_nmi_watchdog(); | |
| - if (oops_in_progress) | |
| - locked = spin_trylock_irqsave(&port->lock, flags); | |
| - else | |
| - spin_lock_irqsave(&port->lock, flags); | |
| + spin_lock_irqsave(&port->lock, flags); | |
| /* | |
| * First save the IER then disable the interrupts | |
| */ | |
| - ier = serial_port_in(port, UART_IER); | |
| - serial8250_clear_IER(up); | |
| + ier = serial8250_clear_IER(up); | |
| /* check scratch reg to see if port powered off during system sleep */ | |
| if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) { | |
| @@ -3558,10 +3657,12 @@ | |
| */ | |
| !(up->port.flags & UPF_CONS_FLOW); | |
| + atomic_inc(&up->console_printing); | |
| if (likely(use_fifo)) | |
| serial8250_console_fifo_write(up, s, count); | |
| else | |
| uart_console_write(port, s, count, serial8250_console_putchar); | |
| + atomic_dec(&up->console_printing); | |
| /* | |
| * Finally, wait for transmitter to become empty | |
| @@ -3574,8 +3675,7 @@ | |
| if (em485->tx_stopped) | |
| up->rs485_stop_tx(up); | |
| } | |
| - | |
| - serial_port_out(port, UART_IER, ier); | |
| + serial8250_set_IER(up, ier); | |
| /* | |
| * The receive handling will happen properly because the | |
| @@ -3587,8 +3687,7 @@ | |
| if (up->msr_saved_flags) | |
| serial8250_modem_status(up); | |
| - if (locked) | |
| - spin_unlock_irqrestore(&port->lock, flags); | |
| + spin_unlock_irqrestore(&port->lock, flags); | |
| } | |
| static unsigned int probe_baud(struct uart_port *port) | |
| @@ -3608,6 +3707,7 @@ | |
| int serial8250_console_setup(struct uart_port *port, char *options, bool probe) | |
| { | |
| + struct uart_8250_port *up = up_to_u8250p(port); | |
| int baud = 9600; | |
| int bits = 8; | |
| int parity = 'n'; | |
| @@ -3617,6 +3717,8 @@ | |
| if (!port->iobase && !port->membase) | |
| return -ENODEV; | |
| + atomic_set(&up->console_printing, 0); | |
| + | |
| if (options) | |
| uart_parse_options(options, &baud, &parity, &bits, &flow); | |
| else if (probe) | |
| diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig | |
| index 583a340f9934..1f31320820a6 100644 | |
| --- a/drivers/tty/serial/8250/Kconfig | |
| +++ b/drivers/tty/serial/8250/Kconfig | |
| @@ -9,6 +9,7 @@ config SERIAL_8250 | |
| depends on !S390 | |
| select SERIAL_CORE | |
| select SERIAL_MCTRL_GPIO if GPIOLIB | |
| + select HAVE_ATOMIC_CONSOLE | |
| help | |
| This selects whether you want to include the driver for the standard | |
| serial ports. The standard answer is Y. People who might say N | |
| diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c | |
| index 2f0f05259778..eee5141ecd3a 100644 | |
| --- a/drivers/tty/serial/amba-pl011.c | |
| +++ b/drivers/tty/serial/amba-pl011.c | |
| @@ -2316,18 +2316,24 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) | |
| { | |
| struct uart_amba_port *uap = amba_ports[co->index]; | |
| unsigned int old_cr = 0, new_cr; | |
| - unsigned long flags; | |
| + unsigned long flags = 0; | |
| int locked = 1; | |
| clk_enable(uap->clk); | |
| - local_irq_save(flags); | |
| + /* | |
| + * local_irq_save(flags); | |
| + * | |
| + * This local_irq_save() is nonsense. If we come in via sysrq | |
| + * handling then interrupts are already disabled. Aside of | |
| + * that the port.sysrq check is racy on SMP regardless. | |
| + */ | |
| if (uap->port.sysrq) | |
| locked = 0; | |
| else if (oops_in_progress) | |
| - locked = spin_trylock(&uap->port.lock); | |
| + locked = spin_trylock_irqsave(&uap->port.lock, flags); | |
| else | |
| - spin_lock(&uap->port.lock); | |
| + spin_lock_irqsave(&uap->port.lock, flags); | |
| /* | |
| * First save the CR then disable the interrupts | |
| @@ -2353,8 +2359,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) | |
| pl011_write(old_cr, uap, REG_CR); | |
| if (locked) | |
| - spin_unlock(&uap->port.lock); | |
| - local_irq_restore(flags); | |
| + spin_unlock_irqrestore(&uap->port.lock, flags); | |
| clk_disable(uap->clk); | |
| } | |
| diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c | |
| index beb7896ebf8a..ba5b843820c1 100644 | |
| --- a/drivers/tty/serial/omap-serial.c | |
| +++ b/drivers/tty/serial/omap-serial.c | |
| @@ -1241,13 +1241,10 @@ serial_omap_console_write(struct console *co, const char *s, | |
| unsigned int ier; | |
| int locked = 1; | |
| - local_irq_save(flags); | |
| - if (up->port.sysrq) | |
| - locked = 0; | |
| - else if (oops_in_progress) | |
| - locked = spin_trylock(&up->port.lock); | |
| + if (up->port.sysrq || oops_in_progress) | |
| + locked = spin_trylock_irqsave(&up->port.lock, flags); | |
| else | |
| - spin_lock(&up->port.lock); | |
| + spin_lock_irqsave(&up->port.lock, flags); | |
| /* | |
| * First save the IER then disable the interrupts | |
| @@ -1274,8 +1271,7 @@ serial_omap_console_write(struct console *co, const char *s, | |
| check_modem_status(up); | |
| if (locked) | |
| - spin_unlock(&up->port.lock); | |
| - local_irq_restore(flags); | |
| + spin_unlock_irqrestore(&up->port.lock, flags); | |
| } | |
| static int __init | |
| diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c | |
| index 248067197287..0db9dad8c99f 100644 | |
| --- a/drivers/tty/sysrq.c | |
| +++ b/drivers/tty/sysrq.c | |
| @@ -582,6 +582,7 @@ void __handle_sysrq(int key, bool check_mask) | |
| rcu_sysrq_start(); | |
| rcu_read_lock(); | |
| + printk_prefer_direct_enter(); | |
| /* | |
| * Raise the apparent loglevel to maximum so that the sysrq header | |
| * is shown to provide the user with positive feedback. We do not | |
| @@ -623,6 +624,7 @@ void __handle_sysrq(int key, bool check_mask) | |
| pr_cont("\n"); | |
| console_loglevel = orig_log_level; | |
| } | |
| + printk_prefer_direct_exit(); | |
| rcu_read_unlock(); | |
| rcu_sysrq_end(); | |
| diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h | |
| index 4e0e50e7ac15..173e979b84a9 100644 | |
| --- a/drivers/vdpa/vdpa_user/iova_domain.h | |
| +++ b/drivers/vdpa/vdpa_user/iova_domain.h | |
| @@ -14,7 +14,6 @@ | |
| #include <linux/iova.h> | |
| #include <linux/dma-mapping.h> | |
| #include <linux/vhost_iotlb.h> | |
| -#include <linux/rwlock.h> | |
| #define IOVA_START_PFN 1 | |
| diff --git a/include/linux/console.h b/include/linux/console.h | |
| index 8c1686e2c233..8a813cbaf928 100644 | |
| --- a/include/linux/console.h | |
| +++ b/include/linux/console.h | |
| @@ -16,6 +16,7 @@ | |
| #include <linux/atomic.h> | |
| #include <linux/types.h> | |
| +#include <linux/mutex.h> | |
| struct vc_data; | |
| struct console_font_op; | |
| @@ -137,9 +138,19 @@ static inline int con_debug_leave(void) | |
| #define CON_BRL (32) /* Used for a braille device */ | |
| #define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */ | |
| +#ifdef CONFIG_HAVE_ATOMIC_CONSOLE | |
| +struct console_atomic_data { | |
| + u64 seq; | |
| + char *text; | |
| + char *ext_text; | |
| + char *dropped_text; | |
| +}; | |
| +#endif | |
| + | |
| struct console { | |
| char name[16]; | |
| void (*write)(struct console *, const char *, unsigned); | |
| + void (*write_atomic)(struct console *, const char *, unsigned); | |
| int (*read)(struct console *, char *, unsigned); | |
| struct tty_driver *(*device)(struct console *, int *); | |
| void (*unblank)(void); | |
| @@ -152,7 +163,26 @@ struct console { | |
| uint ispeed; | |
| uint ospeed; | |
| u64 seq; | |
| - unsigned long dropped; | |
| + atomic_long_t dropped; | |
| +#ifdef CONFIG_HAVE_ATOMIC_CONSOLE | |
| + struct console_atomic_data *atomic_data; | |
| +#endif | |
| + struct task_struct *thread; | |
| + bool blocked; | |
| + | |
| + /* | |
| + * The per-console lock is used by printing kthreads to synchronize | |
| + * this console with callers of console_lock(). This is necessary in | |
| + * order to allow printing kthreads to run in parallel to each other, | |
| + * while each safely accessing the @blocked field and synchronizing | |
| + * against direct printing via console_lock/console_unlock. | |
| + * | |
| + * Note: For synchronizing against direct printing via | |
| + * console_trylock/console_unlock, see the static global | |
| + * variable @console_kthreads_active. | |
| + */ | |
| + struct mutex lock; | |
| + | |
| void *data; | |
| struct console *next; | |
| }; | |
| @@ -167,6 +197,7 @@ extern int console_set_on_cmdline; | |
| extern struct console *early_console; | |
| enum con_flush_mode { | |
| + CONSOLE_ATOMIC_FLUSH_PENDING, | |
| CONSOLE_FLUSH_PENDING, | |
| CONSOLE_REPLAY_ALL, | |
| }; | |
| diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h | |
| index d95ab85f96ba..3dc3704a3cdb 100644 | |
| --- a/include/linux/entry-common.h | |
| +++ b/include/linux/entry-common.h | |
| @@ -57,9 +57,15 @@ | |
| # define ARCH_EXIT_TO_USER_MODE_WORK (0) | |
| #endif | |
| +#ifdef CONFIG_PREEMPT_LAZY | |
| +# define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) | |
| +#else | |
| +# define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED) | |
| +#endif | |
| + | |
| #define EXIT_TO_USER_MODE_WORK \ | |
| (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ | |
| - _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ | |
| + _TIF_NEED_RESCHED_MASK | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ | |
| ARCH_EXIT_TO_USER_MODE_WORK) | |
| /** | |
| diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h | |
| index 4a1dc88ddbff..a5091ac97fc6 100644 | |
| --- a/include/linux/interrupt.h | |
| +++ b/include/linux/interrupt.h | |
| @@ -609,6 +609,35 @@ extern void __raise_softirq_irqoff(unsigned int nr); | |
| extern void raise_softirq_irqoff(unsigned int nr); | |
| extern void raise_softirq(unsigned int nr); | |
| +#ifdef CONFIG_PREEMPT_RT | |
| +DECLARE_PER_CPU(struct task_struct *, timersd); | |
| +DECLARE_PER_CPU(unsigned long, pending_timer_softirq); | |
| + | |
| +extern void raise_timer_softirq(void); | |
| +extern void raise_hrtimer_softirq(void); | |
| + | |
| +static inline unsigned int local_pending_timers(void) | |
| +{ | |
| + return __this_cpu_read(pending_timer_softirq); | |
| +} | |
| + | |
| +#else | |
| +static inline void raise_timer_softirq(void) | |
| +{ | |
| + raise_softirq(TIMER_SOFTIRQ); | |
| +} | |
| + | |
| +static inline void raise_hrtimer_softirq(void) | |
| +{ | |
| + raise_softirq_irqoff(HRTIMER_SOFTIRQ); | |
| +} | |
| + | |
| +static inline unsigned int local_pending_timers(void) | |
| +{ | |
| + return local_softirq_pending(); | |
| +} | |
| +#endif | |
| + | |
| DECLARE_PER_CPU(struct task_struct *, ksoftirqd); | |
| static inline struct task_struct *this_cpu_ksoftirqd(void) | |
| diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h | |
| index 66a774d2710e..b08532b8fba7 100644 | |
| --- a/include/linux/io-mapping.h | |
| +++ b/include/linux/io-mapping.h | |
| @@ -69,7 +69,10 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping, | |
| BUG_ON(offset >= mapping->size); | |
| phys_addr = mapping->base + offset; | |
| - preempt_disable(); | |
| + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) | |
| + preempt_disable(); | |
| + else | |
| + migrate_disable(); | |
| pagefault_disable(); | |
| return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); | |
| } | |
| @@ -79,7 +82,10 @@ io_mapping_unmap_atomic(void __iomem *vaddr) | |
| { | |
| kunmap_local_indexed((void __force *)vaddr); | |
| pagefault_enable(); | |
| - preempt_enable(); | |
| + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) | |
| + preempt_enable(); | |
| + else | |
| + migrate_enable(); | |
| } | |
| static inline void __iomem * | |
| @@ -162,7 +168,10 @@ static inline void __iomem * | |
| io_mapping_map_atomic_wc(struct io_mapping *mapping, | |
| unsigned long offset) | |
| { | |
| - preempt_disable(); | |
| + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) | |
| + preempt_disable(); | |
| + else | |
| + migrate_disable(); | |
| pagefault_disable(); | |
| return io_mapping_map_wc(mapping, offset, PAGE_SIZE); | |
| } | |
| @@ -172,7 +181,10 @@ io_mapping_unmap_atomic(void __iomem *vaddr) | |
| { | |
| io_mapping_unmap(vaddr); | |
| pagefault_enable(); | |
| - preempt_enable(); | |
| + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) | |
| + preempt_enable(); | |
| + else | |
| + migrate_enable(); | |
| } | |
| static inline void __iomem * | |
| diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h | |
| index 1f1099dac3f0..a3329fb49b33 100644 | |
| --- a/include/linux/lockdep.h | |
| +++ b/include/linux/lockdep.h | |
| @@ -339,6 +339,16 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); | |
| #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) | |
| #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) | |
| +/* | |
| + * Must use lock_map_aquire_try() with override maps to avoid | |
| + * lockdep thinking they participate in the block chain. | |
| + */ | |
| +#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \ | |
| + struct lockdep_map _name = { \ | |
| + .name = #_name "-wait-type-override", \ | |
| + .wait_type_inner = _wait_type, \ | |
| + .lock_type = LD_LOCK_WAIT_OVERRIDE, } | |
| + | |
| #else /* !CONFIG_LOCKDEP */ | |
| static inline void lockdep_init_task(struct task_struct *task) | |
| @@ -427,6 +437,9 @@ extern int lockdep_is_held(const void *); | |
| #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) | |
| #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) | |
| +#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \ | |
| + struct lockdep_map __maybe_unused _name = {} | |
| + | |
| #endif /* !LOCKDEP */ | |
| enum xhlock_context_t { | |
| @@ -435,7 +448,6 @@ enum xhlock_context_t { | |
| XHLOCK_CTX_NR, | |
| }; | |
| -#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) | |
| /* | |
| * To initialize a lockdep_map statically use this macro. | |
| * Note that _name must not be NULL. | |
| @@ -552,6 +564,7 @@ do { \ | |
| #define rwsem_release(l, i) lock_release(l, i) | |
| #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) | |
| +#define lock_map_acquire_try(l) lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_) | |
| #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) | |
| #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) | |
| #define lock_map_release(l) lock_release(l, _THIS_IP_) | |
| diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h | |
| index d22430840b53..59f4fb1626ea 100644 | |
| --- a/include/linux/lockdep_types.h | |
| +++ b/include/linux/lockdep_types.h | |
| @@ -33,6 +33,7 @@ enum lockdep_wait_type { | |
| enum lockdep_lock_type { | |
| LD_LOCK_NORMAL = 0, /* normal, catch all */ | |
| LD_LOCK_PERCPU, /* percpu */ | |
| + LD_LOCK_WAIT_OVERRIDE, /* annotation */ | |
| LD_LOCK_MAX, | |
| }; | |
| diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h | |
| index 0373e0935990..55d698367883 100644 | |
| --- a/include/linux/netdevice.h | |
| +++ b/include/linux/netdevice.h | |
| @@ -3169,7 +3169,11 @@ struct softnet_data { | |
| int defer_count; | |
| int defer_ipi_scheduled; | |
| struct sk_buff *defer_list; | |
| +#ifndef CONFIG_PREEMPT_RT | |
| call_single_data_t defer_csd; | |
| +#else | |
| + struct work_struct defer_work; | |
| +#endif | |
| }; | |
| static inline void input_queue_head_incr(struct softnet_data *sd) | |
| diff --git a/include/linux/preempt.h b/include/linux/preempt.h | |
| index 9aa6358a1a16..e9f0d08733f4 100644 | |
| --- a/include/linux/preempt.h | |
| +++ b/include/linux/preempt.h | |
| @@ -208,6 +208,20 @@ extern void preempt_count_sub(int val); | |
| #define preempt_count_inc() preempt_count_add(1) | |
| #define preempt_count_dec() preempt_count_sub(1) | |
| +#ifdef CONFIG_PREEMPT_LAZY | |
| +#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) | |
| +#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) | |
| +#define inc_preempt_lazy_count() add_preempt_lazy_count(1) | |
| +#define dec_preempt_lazy_count() sub_preempt_lazy_count(1) | |
| +#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) | |
| +#else | |
| +#define add_preempt_lazy_count(val) do { } while (0) | |
| +#define sub_preempt_lazy_count(val) do { } while (0) | |
| +#define inc_preempt_lazy_count() do { } while (0) | |
| +#define dec_preempt_lazy_count() do { } while (0) | |
| +#define preempt_lazy_count() (0) | |
| +#endif | |
| + | |
| #ifdef CONFIG_PREEMPT_COUNT | |
| #define preempt_disable() \ | |
| @@ -216,6 +230,12 @@ do { \ | |
| barrier(); \ | |
| } while (0) | |
| +#define preempt_lazy_disable() \ | |
| +do { \ | |
| + inc_preempt_lazy_count(); \ | |
| + barrier(); \ | |
| +} while (0) | |
| + | |
| #define sched_preempt_enable_no_resched() \ | |
| do { \ | |
| barrier(); \ | |
| @@ -247,6 +267,18 @@ do { \ | |
| __preempt_schedule(); \ | |
| } while (0) | |
| +/* | |
| + * open code preempt_check_resched() because it is not exported to modules and | |
| + * used by local_unlock() or bpf_enable_instrumentation(). | |
| + */ | |
| +#define preempt_lazy_enable() \ | |
| +do { \ | |
| + dec_preempt_lazy_count(); \ | |
| + barrier(); \ | |
| + if (should_resched(0)) \ | |
| + __preempt_schedule(); \ | |
| +} while (0) | |
| + | |
| #else /* !CONFIG_PREEMPTION */ | |
| #define preempt_enable() \ | |
| do { \ | |
| @@ -254,6 +286,12 @@ do { \ | |
| preempt_count_dec(); \ | |
| } while (0) | |
| +#define preempt_lazy_enable() \ | |
| +do { \ | |
| + dec_preempt_lazy_count(); \ | |
| + barrier(); \ | |
| +} while (0) | |
| + | |
| #define preempt_enable_notrace() \ | |
| do { \ | |
| barrier(); \ | |
| @@ -294,6 +332,9 @@ do { \ | |
| #define preempt_enable_notrace() barrier() | |
| #define preemptible() 0 | |
| +#define preempt_lazy_disable() barrier() | |
| +#define preempt_lazy_enable() barrier() | |
| + | |
| #endif /* CONFIG_PREEMPT_COUNT */ | |
| #ifdef MODULE | |
| @@ -312,7 +353,7 @@ do { \ | |
| } while (0) | |
| #define preempt_fold_need_resched() \ | |
| do { \ | |
| - if (tif_need_resched()) \ | |
| + if (tif_need_resched_now()) \ | |
| set_preempt_need_resched(); \ | |
| } while (0) | |
| @@ -428,8 +469,15 @@ extern void migrate_enable(void); | |
| #else | |
| -static inline void migrate_disable(void) { } | |
| -static inline void migrate_enable(void) { } | |
| +static inline void migrate_disable(void) | |
| +{ | |
| + preempt_lazy_disable(); | |
| +} | |
| + | |
| +static inline void migrate_enable(void) | |
| +{ | |
| + preempt_lazy_enable(); | |
| +} | |
| #endif /* CONFIG_SMP */ | |
| diff --git a/include/linux/printk.h b/include/linux/printk.h | |
| index b1a12916f036..3c4ab778fab6 100644 | |
| --- a/include/linux/printk.h | |
| +++ b/include/linux/printk.h | |
| @@ -168,6 +168,9 @@ extern void __printk_safe_exit(void); | |
| */ | |
| #define printk_deferred_enter __printk_safe_enter | |
| #define printk_deferred_exit __printk_safe_exit | |
| +extern void printk_prefer_direct_enter(void); | |
| +extern void printk_prefer_direct_exit(void); | |
| +extern void try_block_console_kthreads(int timeout_ms); | |
| /* | |
| * Please don't use printk_ratelimit(), because it shares ratelimiting state | |
| @@ -219,6 +222,18 @@ static inline void printk_deferred_exit(void) | |
| { | |
| } | |
| +static inline void printk_prefer_direct_enter(void) | |
| +{ | |
| +} | |
| + | |
| +static inline void printk_prefer_direct_exit(void) | |
| +{ | |
| +} | |
| + | |
| +static inline void try_block_console_kthreads(int timeout_ms) | |
| +{ | |
| +} | |
| + | |
| static inline int printk_ratelimit(void) | |
| { | |
| return 0; | |
| diff --git a/include/linux/sched.h b/include/linux/sched.h | |
| index 0cac69902ec5..67ec36dbfacf 100644 | |
| --- a/include/linux/sched.h | |
| +++ b/include/linux/sched.h | |
| @@ -2061,6 +2061,43 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) | |
| return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); | |
| } | |
| +#ifdef CONFIG_PREEMPT_LAZY | |
| +static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) | |
| +{ | |
| + set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); | |
| +} | |
| + | |
| +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) | |
| +{ | |
| + clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); | |
| +} | |
| + | |
| +static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) | |
| +{ | |
| + return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); | |
| +} | |
| + | |
| +static inline int need_resched_lazy(void) | |
| +{ | |
| + return test_thread_flag(TIF_NEED_RESCHED_LAZY); | |
| +} | |
| + | |
| +static inline int need_resched_now(void) | |
| +{ | |
| + return test_thread_flag(TIF_NEED_RESCHED); | |
| +} | |
| + | |
| +#else | |
| +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } | |
| +static inline int need_resched_lazy(void) { return 0; } | |
| + | |
| +static inline int need_resched_now(void) | |
| +{ | |
| + return test_thread_flag(TIF_NEED_RESCHED); | |
| +} | |
| + | |
| +#endif | |
| + | |
| /* | |
| * cond_resched() and cond_resched_lock(): latency reduction via | |
| * explicit rescheduling in places that are safe. The return | |
| diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h | |
| index aaa25ed1a8fe..a51c36397324 100644 | |
| --- a/include/linux/sched/task.h | |
| +++ b/include/linux/sched/task.h | |
| @@ -141,8 +141,12 @@ static inline void put_task_struct(struct task_struct *t) | |
| */ | |
| if (IS_ENABLED(CONFIG_PREEMPT_RT) && !preemptible()) | |
| call_rcu(&t->rcu, __put_task_struct_rcu_cb); | |
| - else | |
| + else { | |
| + static DEFINE_WAIT_OVERRIDE_MAP(put_task_map, LD_WAIT_SLEEP); | |
| + lock_map_acquire_try(&put_task_map); | |
| __put_task_struct(t); | |
| + lock_map_release(&put_task_map); | |
| + } | |
| } | |
| DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T)) | |
| diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h | |
| index 79b328861c5f..35f44352e641 100644 | |
| --- a/include/linux/serial_8250.h | |
| +++ b/include/linux/serial_8250.h | |
| @@ -7,6 +7,7 @@ | |
| #ifndef _LINUX_SERIAL_8250_H | |
| #define _LINUX_SERIAL_8250_H | |
| +#include <linux/atomic.h> | |
| #include <linux/serial_core.h> | |
| #include <linux/serial_reg.h> | |
| #include <linux/platform_device.h> | |
| @@ -124,6 +125,8 @@ struct uart_8250_port { | |
| #define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA | |
| unsigned char msr_saved_flags; | |
| + atomic_t console_printing; | |
| + | |
| struct uart_8250_dma *dma; | |
| const struct uart_8250_ops *ops; | |
| @@ -179,6 +182,8 @@ void serial8250_init_port(struct uart_8250_port *up); | |
| void serial8250_set_defaults(struct uart_8250_port *up); | |
| void serial8250_console_write(struct uart_8250_port *up, const char *s, | |
| unsigned int count); | |
| +void serial8250_console_write_atomic(struct uart_8250_port *up, const char *s, | |
| + unsigned int count); | |
| int serial8250_console_setup(struct uart_port *port, char *options, bool probe); | |
| int serial8250_console_exit(struct uart_port *port); | |
| diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h | |
| index 9f392ec76f2b..779e0e96b9cb 100644 | |
| --- a/include/linux/thread_info.h | |
| +++ b/include/linux/thread_info.h | |
| @@ -177,7 +177,17 @@ static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti | |
| clear_ti_thread_flag(task_thread_info(t), TIF_##fl) | |
| #endif /* !CONFIG_GENERIC_ENTRY */ | |
| -#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) | |
| +#ifdef CONFIG_PREEMPT_LAZY | |
| +#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \ | |
| + test_thread_flag(TIF_NEED_RESCHED_LAZY)) | |
| +#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED)) | |
| +#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY) | |
| + | |
| +#else | |
| +#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) | |
| +#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED) | |
| +#define tif_need_resched_lazy() 0 | |
| +#endif | |
| #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES | |
| static inline int arch_within_stack_frames(const void * const stack, | |
| diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h | |
| index f70624ec4188..1d4253b47214 100644 | |
| --- a/include/linux/trace_events.h | |
| +++ b/include/linux/trace_events.h | |
| @@ -70,6 +70,7 @@ struct trace_entry { | |
| unsigned char flags; | |
| unsigned char preempt_count; | |
| int pid; | |
| + unsigned char preempt_lazy_count; | |
| }; | |
| #define TRACE_EVENT_TYPE_MAX \ | |
| @@ -159,9 +160,10 @@ static inline void tracing_generic_entry_update(struct trace_entry *entry, | |
| unsigned int trace_ctx) | |
| { | |
| entry->preempt_count = trace_ctx & 0xff; | |
| + entry->preempt_lazy_count = (trace_ctx >> 16) & 0xff; | |
| entry->pid = current->pid; | |
| entry->type = type; | |
| - entry->flags = trace_ctx >> 16; | |
| + entry->flags = trace_ctx >> 24; | |
| } | |
| unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status); | |
| @@ -172,7 +174,13 @@ enum trace_flag_type { | |
| TRACE_FLAG_NEED_RESCHED = 0x04, | |
| TRACE_FLAG_HARDIRQ = 0x08, | |
| TRACE_FLAG_SOFTIRQ = 0x10, | |
| +#ifdef CONFIG_PREEMPT_LAZY | |
| + TRACE_FLAG_PREEMPT_RESCHED = 0x00, | |
| + TRACE_FLAG_NEED_RESCHED_LAZY = 0x20, | |
| +#else | |
| + TRACE_FLAG_NEED_RESCHED_LAZY = 0x00, | |
| TRACE_FLAG_PREEMPT_RESCHED = 0x20, | |
| +#endif | |
| TRACE_FLAG_NMI = 0x40, | |
| TRACE_FLAG_BH_OFF = 0x80, | |
| }; | |
| diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h | |
| index 79c3bbaa7e13..457879938fc1 100644 | |
| --- a/include/linux/u64_stats_sync.h | |
| +++ b/include/linux/u64_stats_sync.h | |
| @@ -214,16 +214,4 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, | |
| return __u64_stats_fetch_retry(syncp, start); | |
| } | |
| -/* Obsolete interfaces */ | |
| -static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) | |
| -{ | |
| - return u64_stats_fetch_begin(syncp); | |
| -} | |
| - | |
| -static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, | |
| - unsigned int start) | |
| -{ | |
| - return u64_stats_fetch_retry(syncp, start); | |
| -} | |
| - | |
| #endif /* _LINUX_U64_STATS_SYNC_H */ | |
| diff --git a/init/Kconfig b/init/Kconfig | |
| index 2825c8cfde3b..d0e087065f9f 100644 | |
| --- a/init/Kconfig | |
| +++ b/init/Kconfig | |
| @@ -1591,6 +1591,10 @@ config PRINTK | |
| very difficult to diagnose system problems, saying N here is | |
| strongly discouraged. | |
| +config HAVE_ATOMIC_CONSOLE | |
| + bool | |
| + default n | |
| + | |
| config BUG | |
| bool "BUG() support" if EXPERT | |
| default y | |
| diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt | |
| index c2f1fd95a821..260c08efeb48 100644 | |
| --- a/kernel/Kconfig.preempt | |
| +++ b/kernel/Kconfig.preempt | |
| @@ -1,5 +1,11 @@ | |
| # SPDX-License-Identifier: GPL-2.0-only | |
| +config HAVE_PREEMPT_LAZY | |
| + bool | |
| + | |
| +config PREEMPT_LAZY | |
| + def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT | |
| + | |
| config PREEMPT_NONE_BUILD | |
| bool | |
| diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c | |
| index d77597daa002..2d1ada6c9d02 100644 | |
| --- a/kernel/bpf/syscall.c | |
| +++ b/kernel/bpf/syscall.c | |
| @@ -2158,11 +2158,11 @@ static void bpf_prog_get_stats(const struct bpf_prog *prog, | |
| st = per_cpu_ptr(prog->stats, cpu); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&st->syncp); | |
| + start = u64_stats_fetch_begin(&st->syncp); | |
| tnsecs = u64_stats_read(&st->nsecs); | |
| tcnt = u64_stats_read(&st->cnt); | |
| tmisses = u64_stats_read(&st->misses); | |
| - } while (u64_stats_fetch_retry_irq(&st->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&st->syncp, start)); | |
| nsecs += tnsecs; | |
| cnt += tcnt; | |
| misses += tmisses; | |
| @@ -2775,28 +2775,31 @@ static void bpf_link_put_deferred(struct work_struct *work) | |
| bpf_link_free(link); | |
| } | |
| -/* bpf_link_put can be called from atomic context, but ensures that resources | |
| - * are freed from process context | |
| +/* bpf_link_put might be called from atomic context. It needs to be called | |
| + * from sleepable context in order to acquire sleeping locks during the process. | |
| */ | |
| void bpf_link_put(struct bpf_link *link) | |
| { | |
| if (!atomic64_dec_and_test(&link->refcnt)) | |
| return; | |
| - if (in_atomic()) { | |
| - INIT_WORK(&link->work, bpf_link_put_deferred); | |
| - schedule_work(&link->work); | |
| - } else { | |
| - bpf_link_free(link); | |
| - } | |
| + INIT_WORK(&link->work, bpf_link_put_deferred); | |
| + schedule_work(&link->work); | |
| } | |
| EXPORT_SYMBOL(bpf_link_put); | |
| +static void bpf_link_put_direct(struct bpf_link *link) | |
| +{ | |
| + if (!atomic64_dec_and_test(&link->refcnt)) | |
| + return; | |
| + bpf_link_free(link); | |
| +} | |
| + | |
| static int bpf_link_release(struct inode *inode, struct file *filp) | |
| { | |
| struct bpf_link *link = filp->private_data; | |
| - bpf_link_put(link); | |
| + bpf_link_put_direct(link); | |
| return 0; | |
| } | |
| @@ -4726,7 +4729,7 @@ static int link_update(union bpf_attr *attr) | |
| if (ret) | |
| bpf_prog_put(new_prog); | |
| out_put_link: | |
| - bpf_link_put(link); | |
| + bpf_link_put_direct(link); | |
| return ret; | |
| } | |
| @@ -4749,7 +4752,7 @@ static int link_detach(union bpf_attr *attr) | |
| else | |
| ret = -EOPNOTSUPP; | |
| - bpf_link_put(link); | |
| + bpf_link_put_direct(link); | |
| return ret; | |
| } | |
| @@ -4819,7 +4822,7 @@ static int bpf_link_get_fd_by_id(const union bpf_attr *attr) | |
| fd = bpf_link_new_fd(link); | |
| if (fd < 0) | |
| - bpf_link_put(link); | |
| + bpf_link_put_direct(link); | |
| return fd; | |
| } | |
| @@ -4896,7 +4899,7 @@ static int bpf_iter_create(union bpf_attr *attr) | |
| return PTR_ERR(link); | |
| err = bpf_iter_new_fd(link); | |
| - bpf_link_put(link); | |
| + bpf_link_put_direct(link); | |
| return err; | |
| } | |
| diff --git a/kernel/entry/common.c b/kernel/entry/common.c | |
| index ccf2b1e1b40b..36d1e787d3fd 100644 | |
| --- a/kernel/entry/common.c | |
| +++ b/kernel/entry/common.c | |
| @@ -161,7 +161,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, | |
| local_irq_enable_exit_to_user(ti_work); | |
| - if (ti_work & _TIF_NEED_RESCHED) | |
| + if (ti_work & _TIF_NEED_RESCHED_MASK) | |
| schedule(); | |
| if (ti_work & _TIF_UPROBE) | |
| @@ -392,7 +392,7 @@ void raw_irqentry_exit_cond_resched(void) | |
| rcu_irq_exit_check_preempt(); | |
| if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) | |
| WARN_ON_ONCE(!on_thread_stack()); | |
| - if (need_resched()) | |
| + if (should_resched(0)) | |
| preempt_schedule_irq(); | |
| } | |
| } | |
| diff --git a/kernel/hung_task.c b/kernel/hung_task.c | |
| index c71889f3f3fc..e2d2344cb9f4 100644 | |
| --- a/kernel/hung_task.c | |
| +++ b/kernel/hung_task.c | |
| @@ -127,6 +127,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) | |
| * complain: | |
| */ | |
| if (sysctl_hung_task_warnings) { | |
| + printk_prefer_direct_enter(); | |
| + | |
| if (sysctl_hung_task_warnings > 0) | |
| sysctl_hung_task_warnings--; | |
| pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n", | |
| @@ -142,6 +144,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) | |
| if (sysctl_hung_task_all_cpu_backtrace) | |
| hung_task_show_all_bt = true; | |
| + | |
| + printk_prefer_direct_exit(); | |
| } | |
| touch_nmi_watchdog(); | |
| @@ -212,12 +216,17 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) | |
| } | |
| unlock: | |
| rcu_read_unlock(); | |
| - if (hung_task_show_lock) | |
| + if (hung_task_show_lock) { | |
| + printk_prefer_direct_enter(); | |
| debug_show_all_locks(); | |
| + printk_prefer_direct_exit(); | |
| + } | |
| if (hung_task_show_all_bt) { | |
| hung_task_show_all_bt = false; | |
| + printk_prefer_direct_enter(); | |
| trigger_all_cpu_backtrace(); | |
| + printk_prefer_direct_exit(); | |
| } | |
| if (hung_task_call_panic) | |
| diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c | |
| index 65dba9076f31..ab18048e2186 100644 | |
| --- a/kernel/ksysfs.c | |
| +++ b/kernel/ksysfs.c | |
| @@ -142,6 +142,15 @@ KERNEL_ATTR_RO(vmcoreinfo); | |
| #endif /* CONFIG_CRASH_CORE */ | |
| +#if defined(CONFIG_PREEMPT_RT) | |
| +static ssize_t realtime_show(struct kobject *kobj, | |
| + struct kobj_attribute *attr, char *buf) | |
| +{ | |
| + return sprintf(buf, "%d\n", 1); | |
| +} | |
| +KERNEL_ATTR_RO(realtime); | |
| +#endif | |
| + | |
| /* whether file capabilities are enabled */ | |
| static ssize_t fscaps_show(struct kobject *kobj, | |
| struct kobj_attribute *attr, char *buf) | |
| @@ -232,6 +241,9 @@ static struct attribute * kernel_attrs[] = { | |
| #ifndef CONFIG_TINY_RCU | |
| &rcu_expedited_attr.attr, | |
| &rcu_normal_attr.attr, | |
| +#endif | |
| +#ifdef CONFIG_PREEMPT_RT | |
| + &realtime_attr.attr, | |
| #endif | |
| NULL | |
| }; | |
| diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c | |
| index 3b38303ed27b..a046e03c7ead 100644 | |
| --- a/kernel/locking/lockdep.c | |
| +++ b/kernel/locking/lockdep.c | |
| @@ -2245,6 +2245,9 @@ static inline bool usage_match(struct lock_list *entry, void *mask) | |
| static inline bool usage_skip(struct lock_list *entry, void *mask) | |
| { | |
| + if (entry->class->lock_type == LD_LOCK_NORMAL) | |
| + return false; | |
| + | |
| /* | |
| * Skip local_lock() for irq inversion detection. | |
| * | |
| @@ -2271,14 +2274,16 @@ static inline bool usage_skip(struct lock_list *entry, void *mask) | |
| * As a result, we will skip local_lock(), when we search for irq | |
| * inversion bugs. | |
| */ | |
| - if (entry->class->lock_type == LD_LOCK_PERCPU) { | |
| - if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG)) | |
| - return false; | |
| + if (entry->class->lock_type == LD_LOCK_PERCPU && | |
| + DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG)) | |
| + return false; | |
| - return true; | |
| - } | |
| + /* | |
| + * Skip WAIT_OVERRIDE for irq inversion detection -- it's not actually | |
| + * a lock and only used to override the wait_type. | |
| + */ | |
| - return false; | |
| + return true; | |
| } | |
| /* | |
| @@ -4745,7 +4750,8 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next) | |
| for (; depth < curr->lockdep_depth; depth++) { | |
| struct held_lock *prev = curr->held_locks + depth; | |
| - u8 prev_inner = hlock_class(prev)->wait_type_inner; | |
| + struct lock_class *class = hlock_class(prev); | |
| + u8 prev_inner = class->wait_type_inner; | |
| if (prev_inner) { | |
| /* | |
| @@ -4755,6 +4761,14 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next) | |
| * Also due to trylocks. | |
| */ | |
| curr_inner = min(curr_inner, prev_inner); | |
| + | |
| + /* | |
| + * Allow override for annotations -- this is typically | |
| + * only valid/needed for code that only exists when | |
| + * CONFIG_PREEMPT_RT=n. | |
| + */ | |
| + if (unlikely(class->lock_type == LD_LOCK_WAIT_OVERRIDE)) | |
| + curr_inner = prev_inner; | |
| } | |
| } | |
| diff --git a/kernel/locking/rwbase_rt.c b/kernel/locking/rwbase_rt.c | |
| index c201aadb9301..25ec0239477c 100644 | |
| --- a/kernel/locking/rwbase_rt.c | |
| +++ b/kernel/locking/rwbase_rt.c | |
| @@ -72,15 +72,6 @@ static int __sched __rwbase_read_lock(struct rwbase_rt *rwb, | |
| int ret; | |
| raw_spin_lock_irq(&rtm->wait_lock); | |
| - /* | |
| - * Allow readers, as long as the writer has not completely | |
| - * acquired the semaphore for write. | |
| - */ | |
| - if (atomic_read(&rwb->readers) != WRITER_BIAS) { | |
| - atomic_inc(&rwb->readers); | |
| - raw_spin_unlock_irq(&rtm->wait_lock); | |
| - return 0; | |
| - } | |
| /* | |
| * Call into the slow lock path with the rtmutex->wait_lock | |
| diff --git a/kernel/panic.c b/kernel/panic.c | |
| index e6c2bf04a32c..8dbe642c861d 100644 | |
| --- a/kernel/panic.c | |
| +++ b/kernel/panic.c | |
| @@ -322,7 +322,6 @@ void panic(const char *fmt, ...) | |
| panic_smp_self_stop(); | |
| console_verbose(); | |
| - bust_spinlocks(1); | |
| va_start(args, fmt); | |
| len = vscnprintf(buf, sizeof(buf), fmt, args); | |
| va_end(args); | |
| @@ -339,6 +338,11 @@ void panic(const char *fmt, ...) | |
| dump_stack(); | |
| #endif | |
| + /* If atomic consoles are available, flush the kernel log. */ | |
| + console_flush_on_panic(CONSOLE_ATOMIC_FLUSH_PENDING); | |
| + | |
| + bust_spinlocks(1); | |
| + | |
| /* | |
| * If kgdb is enabled, give it a chance to run before we stop all | |
| * the other CPUs or else we won't be able to debug processes left | |
| @@ -661,6 +665,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint, | |
| { | |
| disable_trace_on_warning(); | |
| + printk_prefer_direct_enter(); | |
| + | |
| if (file) | |
| pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n", | |
| raw_smp_processor_id(), current->pid, file, line, | |
| @@ -689,6 +695,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint, | |
| /* Just a warning, don't kill lockdep. */ | |
| add_taint(taint, LOCKDEP_STILL_OK); | |
| + | |
| + printk_prefer_direct_exit(); | |
| } | |
| #ifndef __WARN_FLAGS | |
| diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h | |
| index d947ca6c84f9..e7d8578860ad 100644 | |
| --- a/kernel/printk/internal.h | |
| +++ b/kernel/printk/internal.h | |
| @@ -20,6 +20,8 @@ enum printk_info_flags { | |
| LOG_CONT = 8, /* text is a fragment of a continuation line */ | |
| }; | |
| +extern bool block_console_kthreads; | |
| + | |
| __printf(4, 0) | |
| int vprintk_store(int facility, int level, | |
| const struct dev_printk_info *dev_info, | |
| diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c | |
| index 0ae06d5046bb..701e82b40309 100644 | |
| --- a/kernel/printk/printk.c | |
| +++ b/kernel/printk/printk.c | |
| @@ -44,6 +44,7 @@ | |
| #include <linux/irq_work.h> | |
| #include <linux/ctype.h> | |
| #include <linux/uio.h> | |
| +#include <linux/clocksource.h> | |
| #include <linux/sched/clock.h> | |
| #include <linux/sched/debug.h> | |
| #include <linux/sched/task_stack.h> | |
| @@ -220,6 +221,36 @@ int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, | |
| } | |
| #endif /* CONFIG_PRINTK && CONFIG_SYSCTL */ | |
| +/* | |
| + * Used to synchronize printing kthreads against direct printing via | |
| + * console_trylock/console_unlock. | |
| + * | |
| + * Values: | |
| + * -1 = console kthreads atomically blocked (via global trylock) | |
| + * 0 = no kthread printing, console not locked (via trylock) | |
| + * >0 = kthread(s) actively printing | |
| + * | |
| + * Note: For synchronizing against direct printing via | |
| + * console_lock/console_unlock, see the @lock variable in | |
| + * struct console. | |
| + */ | |
| +static atomic_t console_kthreads_active = ATOMIC_INIT(0); | |
| + | |
| +#define console_kthreads_atomic_tryblock() \ | |
| + (atomic_cmpxchg(&console_kthreads_active, 0, -1) == 0) | |
| +#define console_kthreads_atomic_unblock() \ | |
| + atomic_cmpxchg(&console_kthreads_active, -1, 0) | |
| +#define console_kthreads_atomically_blocked() \ | |
| + (atomic_read(&console_kthreads_active) == -1) | |
| + | |
| +#define console_kthread_printing_tryenter() \ | |
| + atomic_inc_unless_negative(&console_kthreads_active) | |
| +#define console_kthread_printing_exit() \ | |
| + atomic_dec(&console_kthreads_active) | |
| + | |
| +/* Block console kthreads to avoid processing new messages. */ | |
| +bool block_console_kthreads; | |
| + | |
| /* | |
| * Helper macros to handle lockdep when locking/unlocking console_sem. We use | |
| * macros instead of functions so that _RET_IP_ contains useful information. | |
| @@ -268,14 +299,49 @@ static bool panic_in_progress(void) | |
| } | |
| /* | |
| - * This is used for debugging the mess that is the VT code by | |
| - * keeping track if we have the console semaphore held. It's | |
| - * definitely not the perfect debug tool (we don't know if _WE_ | |
| - * hold it and are racing, but it helps tracking those weird code | |
| - * paths in the console code where we end up in places I want | |
| - * locked without the console semaphore held). | |
| + * Tracks whether kthread printers are all blocked. A value of true implies | |
| + * that the console is locked via console_lock() or the console is suspended. | |
| + * Writing to this variable requires holding @console_sem. | |
| + */ | |
| +static bool console_kthreads_blocked; | |
| + | |
| +/* | |
| + * Block all kthread printers from a schedulable context. | |
| + * | |
| + * Requires holding @console_sem. | |
| */ | |
| -static int console_locked, console_suspended; | |
| +static void console_kthreads_block(void) | |
| +{ | |
| + struct console *con; | |
| + | |
| + for_each_console(con) { | |
| + mutex_lock(&con->lock); | |
| + con->blocked = true; | |
| + mutex_unlock(&con->lock); | |
| + } | |
| + | |
| + console_kthreads_blocked = true; | |
| +} | |
| + | |
| +/* | |
| + * Unblock all kthread printers from a schedulable context. | |
| + * | |
| + * Requires holding @console_sem. | |
| + */ | |
| +static void console_kthreads_unblock(void) | |
| +{ | |
| + struct console *con; | |
| + | |
| + for_each_console(con) { | |
| + mutex_lock(&con->lock); | |
| + con->blocked = false; | |
| + mutex_unlock(&con->lock); | |
| + } | |
| + | |
| + console_kthreads_blocked = false; | |
| +} | |
| + | |
| +static int console_suspended; | |
| /* | |
| * Array of consoles built from command line options (console=) | |
| @@ -358,7 +424,75 @@ static int console_msg_format = MSG_FORMAT_DEFAULT; | |
| /* syslog_lock protects syslog_* variables and write access to clear_seq. */ | |
| static DEFINE_MUTEX(syslog_lock); | |
| +/* | |
| + * A flag to signify if printk_activate_kthreads() has already started the | |
| + * kthread printers. If true, any later registered consoles must start their | |
| + * own kthread directly. The flag is write protected by the console_lock. | |
| + */ | |
| +static bool printk_kthreads_available; | |
| + | |
| #ifdef CONFIG_PRINTK | |
| +static atomic_t printk_prefer_direct = ATOMIC_INIT(0); | |
| + | |
| +/** | |
| + * printk_prefer_direct_enter - cause printk() calls to attempt direct | |
| + * printing to all enabled consoles | |
| + * | |
| + * Since it is not possible to call into the console printing code from any | |
| + * context, there is no guarantee that direct printing will occur. | |
| + * | |
| + * This globally effects all printk() callers. | |
| + * | |
| + * Context: Any context. | |
| + */ | |
| +void printk_prefer_direct_enter(void) | |
| +{ | |
| + atomic_inc(&printk_prefer_direct); | |
| +} | |
| + | |
| +/** | |
| + * printk_prefer_direct_exit - restore printk() behavior | |
| + * | |
| + * Context: Any context. | |
| + */ | |
| +void printk_prefer_direct_exit(void) | |
| +{ | |
| + WARN_ON(atomic_dec_if_positive(&printk_prefer_direct) < 0); | |
| +} | |
| + | |
| +/* | |
| + * Calling printk() always wakes kthread printers so that they can | |
| + * flush the new message to their respective consoles. Also, if direct | |
| + * printing is allowed, printk() tries to flush the messages directly. | |
| + * | |
| + * Direct printing is allowed in situations when the kthreads | |
| + * are not available or the system is in a problematic state. | |
| + * | |
| + * See the implementation about possible races. | |
| + */ | |
| +static inline bool allow_direct_printing(void) | |
| +{ | |
| + /* | |
| + * Checking kthread availability is a possible race because the | |
| + * kthread printers can become permanently disabled during runtime. | |
| + * However, doing that requires holding the console_lock, so any | |
| + * pending messages will be direct printed by console_unlock(). | |
| + */ | |
| + if (!printk_kthreads_available) | |
| + return true; | |
| + | |
| + /* | |
| + * Prefer direct printing when the system is in a problematic state. | |
| + * The context that sets this state will always see the updated value. | |
| + * The other contexts do not care. Anyway, direct printing is just a | |
| + * best effort. The direct output is only possible when console_lock | |
| + * is not already taken and no kthread printers are actively printing. | |
| + */ | |
| + return (system_state > SYSTEM_RUNNING || | |
| + oops_in_progress || | |
| + atomic_read(&printk_prefer_direct)); | |
| +} | |
| + | |
| DECLARE_WAIT_QUEUE_HEAD(log_wait); | |
| /* All 3 protected by @syslog_lock. */ | |
| /* the next printk record to read by syslog(READ) or /proc/kmsg */ | |
| @@ -1876,6 +2010,7 @@ static int console_lock_spinning_disable_and_check(void) | |
| return 1; | |
| } | |
| +#if !IS_ENABLED(CONFIG_PREEMPT_RT) | |
| /** | |
| * console_trylock_spinning - try to get console_lock by busy waiting | |
| * | |
| @@ -1955,6 +2090,7 @@ static int console_trylock_spinning(void) | |
| return 1; | |
| } | |
| +#endif /* CONFIG_PREEMPT_RT */ | |
| /* | |
| * Call the specified console driver, asking it to write out the specified | |
| @@ -1962,19 +2098,28 @@ static int console_trylock_spinning(void) | |
| * dropped, a dropped message will be written out first. | |
| */ | |
| static void call_console_driver(struct console *con, const char *text, size_t len, | |
| - char *dropped_text) | |
| + char *dropped_text, bool atomic_printing) | |
| { | |
| + unsigned long dropped = 0; | |
| size_t dropped_len; | |
| - if (con->dropped && dropped_text) { | |
| + if (dropped_text) | |
| + dropped = atomic_long_xchg_relaxed(&con->dropped, 0); | |
| + | |
| + if (dropped) { | |
| dropped_len = snprintf(dropped_text, DROPPED_TEXT_MAX, | |
| "** %lu printk messages dropped **\n", | |
| - con->dropped); | |
| - con->dropped = 0; | |
| - con->write(con, dropped_text, dropped_len); | |
| + dropped); | |
| + if (atomic_printing) | |
| + con->write_atomic(con, dropped_text, dropped_len); | |
| + else | |
| + con->write(con, dropped_text, dropped_len); | |
| } | |
| - con->write(con, text, len); | |
| + if (atomic_printing) | |
| + con->write_atomic(con, text, len); | |
| + else | |
| + con->write(con, text, len); | |
| } | |
| /* | |
| @@ -2284,10 +2429,22 @@ asmlinkage int vprintk_emit(int facility, int level, | |
| printed_len = vprintk_store(facility, level, dev_info, fmt, args); | |
| /* If called from the scheduler, we can not call up(). */ | |
| - if (!in_sched) { | |
| + if (!in_sched && allow_direct_printing()) { | |
| +#if IS_ENABLED(CONFIG_PREEMPT_RT) | |
| + /* | |
| + * Use the non-spinning trylock since PREEMPT_RT does not | |
| + * support console lock handovers. | |
| + * | |
| + * Direct printing will most likely involve taking spinlocks. | |
| + * For PREEMPT_RT, this is only allowed if in a preemptible | |
| + * context. | |
| + */ | |
| + if (preemptible() && console_trylock()) | |
| + console_unlock(); | |
| +#else | |
| /* | |
| * The caller may be holding system-critical or | |
| - * timing-sensitive locks. Disable preemption during | |
| + * timing-sensitive locks. Disable preemption during direct | |
| * printing of all remaining records to all consoles so that | |
| * this context can return as soon as possible. Hopefully | |
| * another printk() caller will take over the printing. | |
| @@ -2302,6 +2459,7 @@ asmlinkage int vprintk_emit(int facility, int level, | |
| if (console_trylock_spinning()) | |
| console_unlock(); | |
| preempt_enable(); | |
| +#endif | |
| } | |
| if (in_sched) | |
| @@ -2332,9 +2490,81 @@ asmlinkage __visible int _printk(const char *fmt, ...) | |
| } | |
| EXPORT_SYMBOL(_printk); | |
| +#ifdef CONFIG_HAVE_ATOMIC_CONSOLE | |
| +static void __free_atomic_data(struct console_atomic_data *d) | |
| +{ | |
| + kfree(d->text); | |
| + kfree(d->ext_text); | |
| + kfree(d->dropped_text); | |
| +} | |
| + | |
| +static void free_atomic_data(struct console_atomic_data *d) | |
| +{ | |
| + int count = 1; | |
| + int i; | |
| + | |
| + if (!d) | |
| + return; | |
| + | |
| +#ifdef CONFIG_HAVE_NMI | |
| + count = 2; | |
| +#endif | |
| + | |
| + for (i = 0; i < count; i++) | |
| + __free_atomic_data(&d[i]); | |
| + kfree(d); | |
| +} | |
| + | |
| +static int __alloc_atomic_data(struct console_atomic_data *d, short flags) | |
| +{ | |
| + d->text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL); | |
| + if (!d->text) | |
| + return -1; | |
| + | |
| + if (flags & CON_EXTENDED) { | |
| + d->ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL); | |
| + if (!d->ext_text) | |
| + return -1; | |
| + } else { | |
| + d->dropped_text = kmalloc(DROPPED_TEXT_MAX, GFP_KERNEL); | |
| + if (!d->dropped_text) | |
| + return -1; | |
| + } | |
| + | |
| + return 0; | |
| +} | |
| + | |
| +static struct console_atomic_data *alloc_atomic_data(short flags) | |
| +{ | |
| + struct console_atomic_data *d; | |
| + int count = 1; | |
| + int i; | |
| + | |
| +#ifdef CONFIG_HAVE_NMI | |
| + count = 2; | |
| +#endif | |
| + | |
| + d = kzalloc(sizeof(*d) * count, GFP_KERNEL); | |
| + if (!d) | |
| + goto err_out; | |
| + | |
| + for (i = 0; i < count; i++) { | |
| + if (__alloc_atomic_data(&d[i], flags) != 0) | |
| + goto err_out; | |
| + } | |
| + | |
| + return d; | |
| +err_out: | |
| + free_atomic_data(d); | |
| + return NULL; | |
| +} | |
| +#endif /* CONFIG_HAVE_ATOMIC_CONSOLE */ | |
| + | |
| static bool pr_flush(int timeout_ms, bool reset_on_progress); | |
| static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress); | |
| +static void printk_start_kthread(struct console *con); | |
| + | |
| #else /* CONFIG_PRINTK */ | |
| #define CONSOLE_LOG_MAX 0 | |
| @@ -2345,6 +2575,8 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre | |
| #define prb_first_valid_seq(rb) 0 | |
| #define prb_next_seq(rb) 0 | |
| +#define free_atomic_data(d) | |
| + | |
| static u64 syslog_seq; | |
| static size_t record_print_text(const struct printk_record *r, | |
| @@ -2363,12 +2595,14 @@ static ssize_t msg_print_ext_body(char *buf, size_t size, | |
| static void console_lock_spinning_enable(void) { } | |
| static int console_lock_spinning_disable_and_check(void) { return 0; } | |
| static void call_console_driver(struct console *con, const char *text, size_t len, | |
| - char *dropped_text) | |
| + char *dropped_text, bool atomic_printing) | |
| { | |
| } | |
| static bool suppress_message_printing(int level) { return false; } | |
| static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; } | |
| static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; } | |
| +static void printk_start_kthread(struct console *con) { } | |
| +static bool allow_direct_printing(void) { return true; } | |
| #endif /* CONFIG_PRINTK */ | |
| @@ -2587,6 +2821,14 @@ static int console_cpu_notify(unsigned int cpu) | |
| /* If trylock fails, someone else is doing the printing */ | |
| if (console_trylock()) | |
| console_unlock(); | |
| + else { | |
| + /* | |
| + * If a new CPU comes online, the conditions for | |
| + * printer_should_wake() may have changed for some | |
| + * kthread printer with !CON_ANYTIME. | |
| + */ | |
| + wake_up_klogd(); | |
| + } | |
| } | |
| return 0; | |
| } | |
| @@ -2629,7 +2871,7 @@ void console_lock(void) | |
| down_console_sem(); | |
| if (console_suspended) | |
| return; | |
| - console_locked = 1; | |
| + console_kthreads_block(); | |
| console_may_schedule = 1; | |
| } | |
| EXPORT_SYMBOL(console_lock); | |
| @@ -2653,15 +2895,30 @@ int console_trylock(void) | |
| up_console_sem(); | |
| return 0; | |
| } | |
| - console_locked = 1; | |
| + if (!console_kthreads_atomic_tryblock()) { | |
| + up_console_sem(); | |
| + return 0; | |
| + } | |
| console_may_schedule = 0; | |
| return 1; | |
| } | |
| EXPORT_SYMBOL(console_trylock); | |
| +/* | |
| + * This is used to help to make sure that certain paths within the VT code are | |
| + * running with the console lock held. It is definitely not the perfect debug | |
| + * tool (it is not known if the VT code is the task holding the console lock), | |
| + * but it helps tracking those weird code paths in the console code such as | |
| + * when the console is suspended: where the console is not locked but no | |
| + * console printing may occur. | |
| + * | |
| + * Note: This returns true when the console is suspended but is not locked. | |
| + * This is intentional because the VT code must consider that situation | |
| + * the same as if the console was locked. | |
| + */ | |
| int is_console_locked(void) | |
| { | |
| - return console_locked; | |
| + return (console_kthreads_blocked || atomic_read(&console_kthreads_active)); | |
| } | |
| EXPORT_SYMBOL(is_console_locked); | |
| @@ -2671,12 +2928,9 @@ EXPORT_SYMBOL(is_console_locked); | |
| * | |
| * Requires the console_lock. | |
| */ | |
| -static inline bool console_is_usable(struct console *con) | |
| +static inline bool __console_is_usable(short flags) | |
| { | |
| - if (!(con->flags & CON_ENABLED)) | |
| - return false; | |
| - | |
| - if (!con->write) | |
| + if (!(flags & CON_ENABLED)) | |
| return false; | |
| /* | |
| @@ -2685,18 +2939,116 @@ static inline bool console_is_usable(struct console *con) | |
| * cope (CON_ANYTIME) don't call them until this CPU is officially up. | |
| */ | |
| if (!cpu_online(raw_smp_processor_id()) && | |
| - !(con->flags & CON_ANYTIME)) | |
| + !(flags & CON_ANYTIME)) | |
| return false; | |
| return true; | |
| } | |
| +/* | |
| + * Check if the given console is currently capable and allowed to print | |
| + * records. | |
| + * | |
| + * Requires holding the console_lock. | |
| + */ | |
| +static inline bool console_is_usable(struct console *con, bool atomic_printing) | |
| +{ | |
| + if (atomic_printing) { | |
| +#ifdef CONFIG_HAVE_ATOMIC_CONSOLE | |
| + if (!con->write_atomic) | |
| + return false; | |
| + if (!con->atomic_data) | |
| + return false; | |
| +#else | |
| + return false; | |
| +#endif | |
| + } else if (!con->write) { | |
| + return false; | |
| + } | |
| + | |
| + return __console_is_usable(con->flags); | |
| +} | |
| + | |
| static void __console_unlock(void) | |
| { | |
| - console_locked = 0; | |
| + /* | |
| + * Depending on whether console_lock() or console_trylock() was used, | |
| + * appropriately allow the kthread printers to continue. | |
| + */ | |
| + if (console_kthreads_blocked) | |
| + console_kthreads_unblock(); | |
| + else | |
| + console_kthreads_atomic_unblock(); | |
| + | |
| + /* | |
| + * New records may have arrived while the console was locked. | |
| + * Wake the kthread printers to print them. | |
| + */ | |
| + wake_up_klogd(); | |
| + | |
| up_console_sem(); | |
| } | |
| +static u64 read_console_seq(struct console *con) | |
| +{ | |
| +#ifdef CONFIG_HAVE_ATOMIC_CONSOLE | |
| + unsigned long flags; | |
| + u64 seq2; | |
| + u64 seq; | |
| + | |
| + if (!con->atomic_data) | |
| + return con->seq; | |
| + | |
| + printk_cpu_sync_get_irqsave(flags); | |
| + | |
| + seq = con->seq; | |
| + seq2 = con->atomic_data[0].seq; | |
| + if (seq2 > seq) | |
| + seq = seq2; | |
| +#ifdef CONFIG_HAVE_NMI | |
| + seq2 = con->atomic_data[1].seq; | |
| + if (seq2 > seq) | |
| + seq = seq2; | |
| +#endif | |
| + | |
| + printk_cpu_sync_put_irqrestore(flags); | |
| + | |
| + return seq; | |
| +#else /* CONFIG_HAVE_ATOMIC_CONSOLE */ | |
| + return con->seq; | |
| +#endif | |
| +} | |
| + | |
| +static void write_console_seq(struct console *con, u64 val, bool atomic_printing) | |
| +{ | |
| +#ifdef CONFIG_HAVE_ATOMIC_CONSOLE | |
| + unsigned long flags; | |
| + u64 *seq; | |
| + | |
| + if (!con->atomic_data) { | |
| + con->seq = val; | |
| + return; | |
| + } | |
| + | |
| + printk_cpu_sync_get_irqsave(flags); | |
| + | |
| + if (atomic_printing) { | |
| + seq = &con->atomic_data[0].seq; | |
| +#ifdef CONFIG_HAVE_NMI | |
| + if (in_nmi()) | |
| + seq = &con->atomic_data[1].seq; | |
| +#endif | |
| + } else { | |
| + seq = &con->seq; | |
| + } | |
| + *seq = val; | |
| + | |
| + printk_cpu_sync_put_irqrestore(flags); | |
| +#else /* CONFIG_HAVE_ATOMIC_CONSOLE */ | |
| + con->seq = val; | |
| +#endif | |
| +} | |
| + | |
| /* | |
| * Print one record for the given console. The record printed is whatever | |
| * record is the next available record for the given console. | |
| @@ -2709,36 +3061,47 @@ static void __console_unlock(void) | |
| * If dropped messages should be printed, @dropped_text is a buffer of size | |
| * DROPPED_TEXT_MAX. Otherwise @dropped_text must be NULL. | |
| * | |
| + * @atomic_printing specifies if atomic printing should be used. | |
| + * | |
| * @handover will be set to true if a printk waiter has taken over the | |
| * console_lock, in which case the caller is no longer holding the | |
| - * console_lock. Otherwise it is set to false. | |
| + * console_lock. Otherwise it is set to false. A NULL pointer may be provided | |
| + * to disable allowing the console_lock to be taken over by a printk waiter. | |
| * | |
| * Returns false if the given console has no next record to print, otherwise | |
| * true. | |
| * | |
| - * Requires the console_lock. | |
| + * Requires the console_lock if @handover is non-NULL. | |
| + * Requires con->lock otherwise. | |
| */ | |
| -static bool console_emit_next_record(struct console *con, char *text, char *ext_text, | |
| - char *dropped_text, bool *handover) | |
| +static bool __console_emit_next_record(struct console *con, char *text, char *ext_text, | |
| + char *dropped_text, bool atomic_printing, | |
| + bool *handover) | |
| { | |
| - static int panic_console_dropped; | |
| + static atomic_t panic_console_dropped = ATOMIC_INIT(0); | |
| struct printk_info info; | |
| struct printk_record r; | |
| unsigned long flags; | |
| char *write_text; | |
| size_t len; | |
| + u64 seq; | |
| prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX); | |
| - *handover = false; | |
| + if (handover) | |
| + *handover = false; | |
| - if (!prb_read_valid(prb, con->seq, &r)) | |
| + seq = read_console_seq(con); | |
| + | |
| + if (!prb_read_valid(prb, seq, &r)) | |
| return false; | |
| - if (con->seq != r.info->seq) { | |
| - con->dropped += r.info->seq - con->seq; | |
| - con->seq = r.info->seq; | |
| - if (panic_in_progress() && panic_console_dropped++ > 10) { | |
| + if (seq != r.info->seq) { | |
| + atomic_long_add((unsigned long)(r.info->seq - seq), &con->dropped); | |
| + write_console_seq(con, r.info->seq, atomic_printing); | |
| + seq = r.info->seq; | |
| + if (panic_in_progress() && | |
| + atomic_fetch_inc_relaxed(&panic_console_dropped) > 10) { | |
| suppress_panic_printk = 1; | |
| pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n"); | |
| } | |
| @@ -2746,7 +3109,7 @@ static bool console_emit_next_record(struct console *con, char *text, char *ext_ | |
| /* Skip record that has level above the console loglevel. */ | |
| if (suppress_message_printing(r.info->level)) { | |
| - con->seq++; | |
| + write_console_seq(con, seq + 1, atomic_printing); | |
| goto skip; | |
| } | |
| @@ -2760,31 +3123,65 @@ static bool console_emit_next_record(struct console *con, char *text, char *ext_ | |
| len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time); | |
| } | |
| - /* | |
| - * While actively printing out messages, if another printk() | |
| - * were to occur on another CPU, it may wait for this one to | |
| - * finish. This task can not be preempted if there is a | |
| - * waiter waiting to take over. | |
| - * | |
| - * Interrupts are disabled because the hand over to a waiter | |
| - * must not be interrupted until the hand over is completed | |
| - * (@console_waiter is cleared). | |
| - */ | |
| - printk_safe_enter_irqsave(flags); | |
| - console_lock_spinning_enable(); | |
| + if (handover) { | |
| + /* | |
| + * While actively printing out messages, if another printk() | |
| + * were to occur on another CPU, it may wait for this one to | |
| + * finish. This task can not be preempted if there is a | |
| + * waiter waiting to take over. | |
| + * | |
| + * Interrupts are disabled because the hand over to a waiter | |
| + * must not be interrupted until the hand over is completed | |
| + * (@console_waiter is cleared). | |
| + */ | |
| + printk_safe_enter_irqsave(flags); | |
| + console_lock_spinning_enable(); | |
| - stop_critical_timings(); /* don't trace print latency */ | |
| - call_console_driver(con, write_text, len, dropped_text); | |
| - start_critical_timings(); | |
| + /* don't trace irqsoff print latency */ | |
| + stop_critical_timings(); | |
| + } | |
| - con->seq++; | |
| + call_console_driver(con, write_text, len, dropped_text, atomic_printing); | |
| - *handover = console_lock_spinning_disable_and_check(); | |
| - printk_safe_exit_irqrestore(flags); | |
| + write_console_seq(con, seq + 1, atomic_printing); | |
| + | |
| + if (handover) { | |
| + start_critical_timings(); | |
| + *handover = console_lock_spinning_disable_and_check(); | |
| + printk_safe_exit_irqrestore(flags); | |
| + } | |
| skip: | |
| return true; | |
| } | |
| +/* | |
| + * Print a record for a given console, but allow another printk() caller to | |
| + * take over the console_lock and continue printing. | |
| + * | |
| + * Requires the console_lock, but depending on @handover after the call, the | |
| + * caller may no longer have the console_lock. | |
| + * | |
| + * See __console_emit_next_record() for argument and return details. | |
| + */ | |
| +static bool console_emit_next_record_transferable(struct console *con, char *text, char *ext_text, | |
| + char *dropped_text, bool *handover) | |
| +{ | |
| + /* | |
| + * Handovers are only supported if threaded printers are atomically | |
| + * blocked. The context taking over the console_lock may be atomic. | |
| + * | |
| + * PREEMPT_RT also does not support handovers because the spinning | |
| + * waiter can cause large latencies. | |
| + */ | |
| + if (!console_kthreads_atomically_blocked() || | |
| + IS_ENABLED(CONFIG_PREEMPT_RT)) { | |
| + *handover = false; | |
| + handover = NULL; | |
| + } | |
| + | |
| + return __console_emit_next_record(con, text, ext_text, dropped_text, false, handover); | |
| +} | |
| + | |
| /* | |
| * Print out all remaining records to all consoles. | |
| * | |
| @@ -2803,8 +3200,8 @@ static bool console_emit_next_record(struct console *con, char *text, char *ext_ | |
| * were flushed to all usable consoles. A returned false informs the caller | |
| * that everything was not flushed (either there were no usable consoles or | |
| * another context has taken over printing or it is a panic situation and this | |
| - * is not the panic CPU). Regardless the reason, the caller should assume it | |
| - * is not useful to immediately try again. | |
| + * is not the panic CPU or direct printing is not preferred). Regardless the | |
| + * reason, the caller should assume it is not useful to immediately try again. | |
| * | |
| * Requires the console_lock. | |
| */ | |
| @@ -2821,24 +3218,26 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove | |
| *handover = false; | |
| do { | |
| + /* Let the kthread printers do the work if they can. */ | |
| + if (!allow_direct_printing()) | |
| + return false; | |
| + | |
| any_progress = false; | |
| for_each_console(con) { | |
| bool progress; | |
| - if (!console_is_usable(con)) | |
| + if (!console_is_usable(con, false)) | |
| continue; | |
| any_usable = true; | |
| if (con->flags & CON_EXTENDED) { | |
| /* Extended consoles do not print "dropped messages". */ | |
| - progress = console_emit_next_record(con, &text[0], | |
| - &ext_text[0], NULL, | |
| - handover); | |
| + progress = console_emit_next_record_transferable(con, &text[0], | |
| + &ext_text[0], NULL, handover); | |
| } else { | |
| - progress = console_emit_next_record(con, &text[0], | |
| - NULL, &dropped_text[0], | |
| - handover); | |
| + progress = console_emit_next_record_transferable(con, &text[0], | |
| + NULL, &dropped_text[0], handover); | |
| } | |
| if (*handover) | |
| return false; | |
| @@ -2863,6 +3262,68 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove | |
| return any_usable; | |
| } | |
| +#if defined(CONFIG_HAVE_ATOMIC_CONSOLE) && defined(CONFIG_PRINTK) | |
| +static bool console_emit_next_record(struct console *con, char *text, char *ext_text, | |
| + char *dropped_text, bool atomic_printing); | |
| + | |
| +static void atomic_console_flush_all(void) | |
| +{ | |
| + unsigned long flags; | |
| + struct console *con; | |
| + bool any_progress; | |
| + int index = 0; | |
| + | |
| + if (console_suspended) | |
| + return; | |
| + | |
| +#ifdef CONFIG_HAVE_NMI | |
| + if (in_nmi()) | |
| + index = 1; | |
| +#endif | |
| + | |
| + printk_cpu_sync_get_irqsave(flags); | |
| + | |
| + do { | |
| + any_progress = false; | |
| + | |
| + for_each_console(con) { | |
| + bool progress; | |
| + | |
| + if (!console_is_usable(con, true)) | |
| + continue; | |
| + | |
| + if (con->flags & CON_EXTENDED) { | |
| + /* Extended consoles do not print "dropped messages". */ | |
| + progress = console_emit_next_record(con, | |
| + &con->atomic_data->text[index], | |
| + &con->atomic_data->ext_text[index], | |
| + NULL, | |
| + true); | |
| + } else { | |
| + progress = console_emit_next_record(con, | |
| + &con->atomic_data->text[index], | |
| + NULL, | |
| + &con->atomic_data->dropped_text[index], | |
| + true); | |
| + } | |
| + | |
| + if (!progress) | |
| + continue; | |
| + any_progress = true; | |
| + | |
| + touch_softlockup_watchdog_sync(); | |
| + clocksource_touch_watchdog(); | |
| + rcu_cpu_stall_reset(); | |
| + touch_nmi_watchdog(); | |
| + } | |
| + } while (any_progress); | |
| + | |
| + printk_cpu_sync_put_irqrestore(flags); | |
| +} | |
| +#else /* CONFIG_HAVE_ATOMIC_CONSOLE && CONFIG_PRINTK */ | |
| +#define atomic_console_flush_all() | |
| +#endif | |
| + | |
| /** | |
| * console_unlock - unlock the console system | |
| * | |
| @@ -2953,10 +3414,13 @@ void console_unblank(void) | |
| if (oops_in_progress) { | |
| if (down_trylock_console_sem() != 0) | |
| return; | |
| + if (!console_kthreads_atomic_tryblock()) { | |
| + up_console_sem(); | |
| + return; | |
| + } | |
| } else | |
| console_lock(); | |
| - console_locked = 1; | |
| console_may_schedule = 0; | |
| for_each_console(c) | |
| if ((c->flags & CON_ENABLED) && c->unblank) | |
| @@ -2975,6 +3439,11 @@ void console_unblank(void) | |
| */ | |
| void console_flush_on_panic(enum con_flush_mode mode) | |
| { | |
| + if (mode == CONSOLE_ATOMIC_FLUSH_PENDING) { | |
| + atomic_console_flush_all(); | |
| + return; | |
| + } | |
| + | |
| /* | |
| * If someone else is holding the console lock, trylock will fail | |
| * and may_schedule may be set. Ignore and proceed to unlock so | |
| @@ -2991,7 +3460,7 @@ void console_flush_on_panic(enum con_flush_mode mode) | |
| seq = prb_first_valid_seq(prb); | |
| for_each_console(c) | |
| - c->seq = seq; | |
| + write_console_seq(c, seq, false); | |
| } | |
| console_unlock(); | |
| } | |
| @@ -3246,16 +3715,27 @@ void register_console(struct console *newcon) | |
| console_drivers->next = newcon; | |
| } | |
| - newcon->dropped = 0; | |
| + atomic_long_set(&newcon->dropped, 0); | |
| + newcon->thread = NULL; | |
| + newcon->blocked = true; | |
| + mutex_init(&newcon->lock); | |
| +#ifdef CONFIG_HAVE_ATOMIC_CONSOLE | |
| + newcon->atomic_data = NULL; | |
| +#endif | |
| + | |
| if (newcon->flags & CON_PRINTBUFFER) { | |
| /* Get a consistent copy of @syslog_seq. */ | |
| mutex_lock(&syslog_lock); | |
| - newcon->seq = syslog_seq; | |
| + write_console_seq(newcon, syslog_seq, false); | |
| mutex_unlock(&syslog_lock); | |
| } else { | |
| /* Begin with next message. */ | |
| - newcon->seq = prb_next_seq(prb); | |
| + write_console_seq(newcon, prb_next_seq(prb), false); | |
| } | |
| + | |
| + if (printk_kthreads_available) | |
| + printk_start_kthread(newcon); | |
| + | |
| console_unlock(); | |
| console_sysfs_notify(); | |
| @@ -3279,6 +3759,7 @@ EXPORT_SYMBOL(register_console); | |
| int unregister_console(struct console *console) | |
| { | |
| + struct task_struct *thd; | |
| struct console *con; | |
| int res; | |
| @@ -3316,9 +3797,26 @@ int unregister_console(struct console *console) | |
| console_drivers->flags |= CON_CONSDEV; | |
| console->flags &= ~CON_ENABLED; | |
| + | |
| + /* | |
| + * console->thread can only be cleared under the console lock. But | |
| + * stopping the thread must be done without the console lock. The | |
| + * task that clears @thread is the task that stops the kthread. | |
| + */ | |
| + thd = console->thread; | |
| + console->thread = NULL; | |
| + | |
| console_unlock(); | |
| + | |
| + if (thd) | |
| + kthread_stop(thd); | |
| + | |
| console_sysfs_notify(); | |
| +#ifdef CONFIG_HAVE_ATOMIC_CONSOLE | |
| + free_atomic_data(console->atomic_data); | |
| +#endif | |
| + | |
| if (console->exit) | |
| res = console->exit(console); | |
| @@ -3412,6 +3910,20 @@ static int __init printk_late_init(void) | |
| } | |
| late_initcall(printk_late_init); | |
| +static int __init printk_activate_kthreads(void) | |
| +{ | |
| + struct console *con; | |
| + | |
| + console_lock(); | |
| + printk_kthreads_available = true; | |
| + for_each_console(con) | |
| + printk_start_kthread(con); | |
| + console_unlock(); | |
| + | |
| + return 0; | |
| +} | |
| +early_initcall(printk_activate_kthreads); | |
| + | |
| #if defined CONFIG_PRINTK | |
| /* If @con is specified, only wait for that console. Otherwise wait for all. */ | |
| static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) | |
| @@ -3435,7 +3947,7 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre | |
| for_each_console(c) { | |
| if (con && con != c) | |
| continue; | |
| - if (!console_is_usable(c)) | |
| + if (!console_is_usable(c, false)) | |
| continue; | |
| printk_seq = c->seq; | |
| if (printk_seq < seq) | |
| @@ -3494,11 +4006,214 @@ static bool pr_flush(int timeout_ms, bool reset_on_progress) | |
| return __pr_flush(NULL, timeout_ms, reset_on_progress); | |
| } | |
| +static void __printk_fallback_preferred_direct(void) | |
| +{ | |
| + printk_prefer_direct_enter(); | |
| + pr_err("falling back to preferred direct printing\n"); | |
| + printk_kthreads_available = false; | |
| +} | |
| + | |
| +/* | |
| + * Enter preferred direct printing, but never exit. Mark console threads as | |
| + * unavailable. The system is then forever in preferred direct printing and | |
| + * any printing threads will exit. | |
| + * | |
| + * Must *not* be called under console_lock. Use | |
| + * __printk_fallback_preferred_direct() if already holding console_lock. | |
| + */ | |
| +static void printk_fallback_preferred_direct(void) | |
| +{ | |
| + console_lock(); | |
| + __printk_fallback_preferred_direct(); | |
| + console_unlock(); | |
| +} | |
| + | |
| +/* | |
| + * Print a record for a given console, not allowing another printk() caller | |
| + * to take over. This is appropriate for contexts that do not have the | |
| + * console_lock. | |
| + * | |
| + * See __console_emit_next_record() for argument and return details. | |
| + */ | |
| +static bool console_emit_next_record(struct console *con, char *text, char *ext_text, | |
| + char *dropped_text, bool atomic_printing) | |
| +{ | |
| + return __console_emit_next_record(con, text, ext_text, dropped_text, | |
| + atomic_printing, NULL); | |
| +} | |
| + | |
| +static bool printer_should_wake(struct console *con, u64 seq) | |
| +{ | |
| + short flags; | |
| + | |
| + if (kthread_should_stop() || !printk_kthreads_available) | |
| + return true; | |
| + | |
| + if (con->blocked || | |
| + console_kthreads_atomically_blocked() || | |
| + block_console_kthreads || | |
| + system_state > SYSTEM_RUNNING || | |
| + oops_in_progress) { | |
| + return false; | |
| + } | |
| + | |
| + /* | |
| + * This is an unsafe read from con->flags, but a false positive is | |
| + * not a problem. Worst case it would allow the printer to wake up | |
| + * although it is disabled. But the printer will notice that when | |
| + * attempting to print and instead go back to sleep. | |
| + */ | |
| + flags = data_race(READ_ONCE(con->flags)); | |
| + | |
| + if (!__console_is_usable(flags)) | |
| + return false; | |
| + | |
| + return prb_read_valid(prb, seq, NULL); | |
| +} | |
| + | |
| +static int printk_kthread_func(void *data) | |
| +{ | |
| + struct console *con = data; | |
| + char *dropped_text = NULL; | |
| + char *ext_text = NULL; | |
| + u64 seq = 0; | |
| + char *text; | |
| + int error; | |
| + | |
| +#ifdef CONFIG_HAVE_ATOMIC_CONSOLE | |
| + if (con->write_atomic) | |
| + con->atomic_data = alloc_atomic_data(con->flags); | |
| +#endif | |
| + | |
| + text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL); | |
| + if (!text) { | |
| + con_printk(KERN_ERR, con, "failed to allocate text buffer\n"); | |
| + printk_fallback_preferred_direct(); | |
| + goto out; | |
| + } | |
| + | |
| + if (con->flags & CON_EXTENDED) { | |
| + ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL); | |
| + if (!ext_text) { | |
| + con_printk(KERN_ERR, con, "failed to allocate ext_text buffer\n"); | |
| + printk_fallback_preferred_direct(); | |
| + goto out; | |
| + } | |
| + } else { | |
| + dropped_text = kmalloc(DROPPED_TEXT_MAX, GFP_KERNEL); | |
| + if (!dropped_text) { | |
| + con_printk(KERN_ERR, con, "failed to allocate dropped_text buffer\n"); | |
| + printk_fallback_preferred_direct(); | |
| + goto out; | |
| + } | |
| + } | |
| + | |
| + con_printk(KERN_INFO, con, "printing thread started\n"); | |
| + for (;;) { | |
| + /* | |
| + * Guarantee this task is visible on the waitqueue before | |
| + * checking the wake condition. | |
| + * | |
| + * The full memory barrier within set_current_state() of | |
| + * prepare_to_wait_event() pairs with the full memory barrier | |
| + * within wq_has_sleeper(). | |
| + * | |
| + * This pairs with __wake_up_klogd:A. | |
| + */ | |
| + error = wait_event_interruptible(log_wait, | |
| + printer_should_wake(con, seq)); /* LMM(printk_kthread_func:A) */ | |
| + | |
| + if (kthread_should_stop() || !printk_kthreads_available) | |
| + break; | |
| + | |
| + if (error) | |
| + continue; | |
| + | |
| + error = mutex_lock_interruptible(&con->lock); | |
| + if (error) | |
| + continue; | |
| + | |
| + if (con->blocked || | |
| + !console_kthread_printing_tryenter()) { | |
| + /* Another context has locked the console_lock. */ | |
| + mutex_unlock(&con->lock); | |
| + continue; | |
| + } | |
| + | |
| + /* | |
| + * Although this context has not locked the console_lock, it | |
| + * is known that the console_lock is not locked and it is not | |
| + * possible for any other context to lock the console_lock. | |
| + * Therefore it is safe to read con->flags. | |
| + */ | |
| + | |
| + if (!__console_is_usable(con->flags)) { | |
| + console_kthread_printing_exit(); | |
| + mutex_unlock(&con->lock); | |
| + continue; | |
| + } | |
| + | |
| + /* | |
| + * Even though the printk kthread is always preemptible, it is | |
| + * still not allowed to call cond_resched() from within | |
| + * console drivers. The task may become non-preemptible in the | |
| + * console driver call chain. For example, vt_console_print() | |
| + * takes a spinlock and then can call into fbcon_redraw(), | |
| + * which can conditionally invoke cond_resched(). | |
| + */ | |
| + console_may_schedule = 0; | |
| + console_emit_next_record(con, text, ext_text, dropped_text, false); | |
| + | |
| + seq = con->seq; | |
| + | |
| + console_kthread_printing_exit(); | |
| + | |
| + mutex_unlock(&con->lock); | |
| + } | |
| + | |
| + con_printk(KERN_INFO, con, "printing thread stopped\n"); | |
| +out: | |
| + kfree(dropped_text); | |
| + kfree(ext_text); | |
| + kfree(text); | |
| + | |
| + console_lock(); | |
| + /* | |
| + * If this kthread is being stopped by another task, con->thread will | |
| + * already be NULL. That is fine. The important thing is that it is | |
| + * NULL after the kthread exits. | |
| + */ | |
| + con->thread = NULL; | |
| + console_unlock(); | |
| + | |
| + return 0; | |
| +} | |
| + | |
| +/* Must be called under console_lock. */ | |
| +static void printk_start_kthread(struct console *con) | |
| +{ | |
| + /* | |
| + * Do not start a kthread if there is no write() callback. The | |
| + * kthreads assume the write() callback exists. | |
| + */ | |
| + if (!con->write) | |
| + return; | |
| + | |
| + con->thread = kthread_run(printk_kthread_func, con, | |
| + "pr/%s%d", con->name, con->index); | |
| + if (IS_ERR(con->thread)) { | |
| + con->thread = NULL; | |
| + con_printk(KERN_ERR, con, "unable to start printing thread\n"); | |
| + __printk_fallback_preferred_direct(); | |
| + return; | |
| + } | |
| +} | |
| + | |
| /* | |
| * Delayed printk version, for scheduler-internal messages: | |
| */ | |
| -#define PRINTK_PENDING_WAKEUP 0x01 | |
| -#define PRINTK_PENDING_OUTPUT 0x02 | |
| +#define PRINTK_PENDING_WAKEUP 0x01 | |
| +#define PRINTK_PENDING_DIRECT_OUTPUT 0x02 | |
| static DEFINE_PER_CPU(int, printk_pending); | |
| @@ -3506,10 +4221,14 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work) | |
| { | |
| int pending = this_cpu_xchg(printk_pending, 0); | |
| - if (pending & PRINTK_PENDING_OUTPUT) { | |
| + if (pending & PRINTK_PENDING_DIRECT_OUTPUT) { | |
| + printk_prefer_direct_enter(); | |
| + | |
| /* If trylock fails, someone else is doing the printing */ | |
| if (console_trylock()) | |
| console_unlock(); | |
| + | |
| + printk_prefer_direct_exit(); | |
| } | |
| if (pending & PRINTK_PENDING_WAKEUP) | |
| @@ -3534,10 +4253,11 @@ static void __wake_up_klogd(int val) | |
| * prepare_to_wait_event(), which is called after ___wait_event() adds | |
| * the waiter but before it has checked the wait condition. | |
| * | |
| - * This pairs with devkmsg_read:A and syslog_print:A. | |
| + * This pairs with devkmsg_read:A, syslog_print:A, and | |
| + * printk_kthread_func:A. | |
| */ | |
| if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */ | |
| - (val & PRINTK_PENDING_OUTPUT)) { | |
| + (val & PRINTK_PENDING_DIRECT_OUTPUT)) { | |
| this_cpu_or(printk_pending, val); | |
| irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); | |
| } | |
| @@ -3577,7 +4297,17 @@ void defer_console_output(void) | |
| * New messages may have been added directly to the ringbuffer | |
| * using vprintk_store(), so wake any waiters as well. | |
| */ | |
| - __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT); | |
| + int val = PRINTK_PENDING_WAKEUP; | |
| + | |
| + /* | |
| + * Make sure that some context will print the messages when direct | |
| + * printing is allowed. This happens in situations when the kthreads | |
| + * may not be as reliable or perhaps unusable. | |
| + */ | |
| + if (allow_direct_printing()) | |
| + val |= PRINTK_PENDING_DIRECT_OUTPUT; | |
| + | |
| + __wake_up_klogd(val); | |
| } | |
| void printk_trigger_flush(void) | |
| diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c | |
| index 6d10927a07d8..8e8fd2fb0a5b 100644 | |
| --- a/kernel/printk/printk_safe.c | |
| +++ b/kernel/printk/printk_safe.c | |
| @@ -8,7 +8,9 @@ | |
| #include <linux/smp.h> | |
| #include <linux/cpumask.h> | |
| #include <linux/printk.h> | |
| +#include <linux/console.h> | |
| #include <linux/kprobes.h> | |
| +#include <linux/delay.h> | |
| #include "internal.h" | |
| @@ -45,3 +47,33 @@ asmlinkage int vprintk(const char *fmt, va_list args) | |
| return vprintk_default(fmt, args); | |
| } | |
| EXPORT_SYMBOL(vprintk); | |
| + | |
| +/** | |
| + * try_block_console_kthreads() - Try to block console kthreads and | |
| + * make the global console_lock() avaialble | |
| + * | |
| + * @timeout_ms: The maximum time (in ms) to wait. | |
| + * | |
| + * Prevent console kthreads from starting processing new messages. Wait | |
| + * until the global console_lock() become available. | |
| + * | |
| + * Context: Can be called in any context. | |
| + */ | |
| +void try_block_console_kthreads(int timeout_ms) | |
| +{ | |
| + block_console_kthreads = true; | |
| + | |
| + /* Do not wait when the console lock could not be safely taken. */ | |
| + if (this_cpu_read(printk_context) || in_nmi()) | |
| + return; | |
| + | |
| + while (timeout_ms > 0) { | |
| + if (console_trylock()) { | |
| + console_unlock(); | |
| + return; | |
| + } | |
| + | |
| + udelay(1000); | |
| + timeout_ms -= 1; | |
| + } | |
| +} | |
| diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c | |
| index c14517912cfa..8cebf6d8f10d 100644 | |
| --- a/kernel/rcu/rcutorture.c | |
| +++ b/kernel/rcu/rcutorture.c | |
| @@ -2364,6 +2364,12 @@ static int rcutorture_booster_init(unsigned int cpu) | |
| WARN_ON_ONCE(!t); | |
| sp.sched_priority = 2; | |
| sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | |
| +#ifdef CONFIG_PREEMPT_RT | |
| + t = per_cpu(timersd, cpu); | |
| + WARN_ON_ONCE(!t); | |
| + sp.sched_priority = 2; | |
| + sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | |
| +#endif | |
| } | |
| /* Don't allow time recalculation while creating a new task. */ | |
| diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h | |
| index 11a82404a6ce..e99ba5db9a15 100644 | |
| --- a/kernel/rcu/tree_stall.h | |
| +++ b/kernel/rcu/tree_stall.h | |
| @@ -649,6 +649,7 @@ static void print_cpu_stall(unsigned long gps) | |
| * See Documentation/RCU/stallwarn.rst for info on how to debug | |
| * RCU CPU stall warnings. | |
| */ | |
| + printk_prefer_direct_enter(); | |
| trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected")); | |
| pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name); | |
| raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); | |
| @@ -683,6 +684,7 @@ static void print_cpu_stall(unsigned long gps) | |
| */ | |
| set_tsk_need_resched(current); | |
| set_preempt_need_resched(); | |
| + printk_prefer_direct_exit(); | |
| } | |
| static void check_cpu_stall(struct rcu_data *rdp) | |
| diff --git a/kernel/reboot.c b/kernel/reboot.c | |
| index 6ebef11c8876..23a8cfed1a72 100644 | |
| --- a/kernel/reboot.c | |
| +++ b/kernel/reboot.c | |
| @@ -83,6 +83,7 @@ void kernel_restart_prepare(char *cmd) | |
| { | |
| blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); | |
| system_state = SYSTEM_RESTART; | |
| + try_block_console_kthreads(10000); | |
| usermodehelper_disable(); | |
| device_shutdown(); | |
| } | |
| @@ -283,6 +284,7 @@ static void kernel_shutdown_prepare(enum system_states state) | |
| blocking_notifier_call_chain(&reboot_notifier_list, | |
| (state == SYSTEM_HALT) ? SYS_HALT : SYS_POWER_OFF, NULL); | |
| system_state = state; | |
| + try_block_console_kthreads(10000); | |
| usermodehelper_disable(); | |
| device_shutdown(); | |
| } | |
| @@ -837,9 +839,11 @@ static int __orderly_reboot(void) | |
| ret = run_cmd(reboot_cmd); | |
| if (ret) { | |
| + printk_prefer_direct_enter(); | |
| pr_warn("Failed to start orderly reboot: forcing the issue\n"); | |
| emergency_sync(); | |
| kernel_restart(NULL); | |
| + printk_prefer_direct_exit(); | |
| } | |
| return ret; | |
| @@ -852,6 +856,7 @@ static int __orderly_poweroff(bool force) | |
| ret = run_cmd(poweroff_cmd); | |
| if (ret && force) { | |
| + printk_prefer_direct_enter(); | |
| pr_warn("Failed to start orderly shutdown: forcing the issue\n"); | |
| /* | |
| @@ -861,6 +866,7 @@ static int __orderly_poweroff(bool force) | |
| */ | |
| emergency_sync(); | |
| kernel_power_off(); | |
| + printk_prefer_direct_exit(); | |
| } | |
| return ret; | |
| @@ -918,6 +924,8 @@ EXPORT_SYMBOL_GPL(orderly_reboot); | |
| */ | |
| static void hw_failure_emergency_poweroff_func(struct work_struct *work) | |
| { | |
| + printk_prefer_direct_enter(); | |
| + | |
| /* | |
| * We have reached here after the emergency shutdown waiting period has | |
| * expired. This means orderly_poweroff has not been able to shut off | |
| @@ -934,6 +942,8 @@ static void hw_failure_emergency_poweroff_func(struct work_struct *work) | |
| */ | |
| pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n"); | |
| emergency_restart(); | |
| + | |
| + printk_prefer_direct_exit(); | |
| } | |
| static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work, | |
| @@ -972,11 +982,13 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced) | |
| { | |
| static atomic_t allow_proceed = ATOMIC_INIT(1); | |
| + printk_prefer_direct_enter(); | |
| + | |
| pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason); | |
| /* Shutdown should be initiated only once. */ | |
| if (!atomic_dec_and_test(&allow_proceed)) | |
| - return; | |
| + goto out; | |
| /* | |
| * Queue a backup emergency shutdown in the event of | |
| @@ -984,6 +996,8 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced) | |
| */ | |
| hw_failure_emergency_poweroff(ms_until_forced); | |
| orderly_poweroff(true); | |
| +out: | |
| + printk_prefer_direct_exit(); | |
| } | |
| EXPORT_SYMBOL_GPL(hw_protection_shutdown); | |
| diff --git a/kernel/sched/core.c b/kernel/sched/core.c | |
| index 838857575937..21ea4332fdf6 100644 | |
| --- a/kernel/sched/core.c | |
| +++ b/kernel/sched/core.c | |
| @@ -1039,6 +1039,46 @@ void resched_curr(struct rq *rq) | |
| trace_sched_wake_idle_without_ipi(cpu); | |
| } | |
| +#ifdef CONFIG_PREEMPT_LAZY | |
| + | |
| +static int tsk_is_polling(struct task_struct *p) | |
| +{ | |
| +#ifdef TIF_POLLING_NRFLAG | |
| + return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); | |
| +#else | |
| + return 0; | |
| +#endif | |
| +} | |
| + | |
| +void resched_curr_lazy(struct rq *rq) | |
| +{ | |
| + struct task_struct *curr = rq->curr; | |
| + int cpu; | |
| + | |
| + if (!sched_feat(PREEMPT_LAZY)) { | |
| + resched_curr(rq); | |
| + return; | |
| + } | |
| + | |
| + if (test_tsk_need_resched(curr)) | |
| + return; | |
| + | |
| + if (test_tsk_need_resched_lazy(curr)) | |
| + return; | |
| + | |
| + set_tsk_need_resched_lazy(curr); | |
| + | |
| + cpu = cpu_of(rq); | |
| + if (cpu == smp_processor_id()) | |
| + return; | |
| + | |
| + /* NEED_RESCHED_LAZY must be visible before we test polling */ | |
| + smp_mb(); | |
| + if (!tsk_is_polling(curr)) | |
| + smp_send_reschedule(cpu); | |
| +} | |
| +#endif | |
| + | |
| void resched_cpu(int cpu) | |
| { | |
| struct rq *rq = cpu_rq(cpu); | |
| @@ -2220,6 +2260,7 @@ void migrate_disable(void) | |
| preempt_disable(); | |
| this_rq()->nr_pinned++; | |
| p->migration_disabled = 1; | |
| + preempt_lazy_disable(); | |
| preempt_enable(); | |
| } | |
| EXPORT_SYMBOL_GPL(migrate_disable); | |
| @@ -2251,6 +2292,7 @@ void migrate_enable(void) | |
| barrier(); | |
| p->migration_disabled = 0; | |
| this_rq()->nr_pinned--; | |
| + preempt_lazy_enable(); | |
| preempt_enable(); | |
| } | |
| EXPORT_SYMBOL_GPL(migrate_enable); | |
| @@ -3277,6 +3319,76 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, | |
| } | |
| #endif /* CONFIG_NUMA_BALANCING */ | |
| +#ifdef CONFIG_PREEMPT_RT | |
| + | |
| +/* | |
| + * Consider: | |
| + * | |
| + * set_special_state(X); | |
| + * | |
| + * do_things() | |
| + * // Somewhere in there is an rtlock that can be contended: | |
| + * current_save_and_set_rtlock_wait_state(); | |
| + * [...] | |
| + * schedule_rtlock(); (A) | |
| + * [...] | |
| + * current_restore_rtlock_saved_state(); | |
| + * | |
| + * schedule(); (B) | |
| + * | |
| + * If p->saved_state is anything else than TASK_RUNNING, then p blocked on an | |
| + * rtlock (A) *before* voluntarily calling into schedule() (B) after setting its | |
| + * state to X. For things like ptrace (X=TASK_TRACED), the task could have more | |
| + * work to do upon acquiring the lock in do_things() before whoever called | |
| + * wait_task_inactive() should return. IOW, we have to wait for: | |
| + * | |
| + * p.saved_state = TASK_RUNNING | |
| + * p.__state = X | |
| + * | |
| + * which implies the task isn't blocked on an RT lock and got to schedule() (B). | |
| + * | |
| + * Also see comments in ttwu_state_match(). | |
| + */ | |
| + | |
| +static __always_inline bool state_mismatch(struct task_struct *p, unsigned int match_state) | |
| +{ | |
| + unsigned long flags; | |
| + bool mismatch; | |
| + | |
| + raw_spin_lock_irqsave(&p->pi_lock, flags); | |
| + if (READ_ONCE(p->__state) & match_state) | |
| + mismatch = false; | |
| + else if (READ_ONCE(p->saved_state) & match_state) | |
| + mismatch = false; | |
| + else | |
| + mismatch = true; | |
| + | |
| + raw_spin_unlock_irqrestore(&p->pi_lock, flags); | |
| + return mismatch; | |
| +} | |
| +static __always_inline bool state_match(struct task_struct *p, unsigned int match_state, | |
| + bool *wait) | |
| +{ | |
| + if (READ_ONCE(p->__state) & match_state) | |
| + return true; | |
| + if (READ_ONCE(p->saved_state) & match_state) { | |
| + *wait = true; | |
| + return true; | |
| + } | |
| + return false; | |
| +} | |
| +#else | |
| +static __always_inline bool state_mismatch(struct task_struct *p, unsigned int match_state) | |
| +{ | |
| + return !(READ_ONCE(p->__state) & match_state); | |
| +} | |
| +static __always_inline bool state_match(struct task_struct *p, unsigned int match_state, | |
| + bool *wait) | |
| +{ | |
| + return (READ_ONCE(p->__state) & match_state); | |
| +} | |
| +#endif | |
| + | |
| /* | |
| * wait_task_inactive - wait for a thread to unschedule. | |
| * | |
| @@ -3295,7 +3407,7 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, | |
| */ | |
| unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state) | |
| { | |
| - int running, queued; | |
| + bool running, wait; | |
| struct rq_flags rf; | |
| unsigned long ncsw; | |
| struct rq *rq; | |
| @@ -3321,7 +3433,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state | |
| * is actually now running somewhere else! | |
| */ | |
| while (task_on_cpu(rq, p)) { | |
| - if (!(READ_ONCE(p->__state) & match_state)) | |
| + if (state_mismatch(p, match_state)) | |
| return 0; | |
| cpu_relax(); | |
| } | |
| @@ -3334,9 +3446,10 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state | |
| rq = task_rq_lock(p, &rf); | |
| trace_sched_wait_task(p); | |
| running = task_on_cpu(rq, p); | |
| - queued = task_on_rq_queued(p); | |
| + wait = task_on_rq_queued(p); | |
| ncsw = 0; | |
| - if (READ_ONCE(p->__state) & match_state) | |
| + | |
| + if (state_match(p, match_state, &wait)) | |
| ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ | |
| task_rq_unlock(rq, p, &rf); | |
| @@ -3366,7 +3479,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state | |
| * running right now), it's preempted, and we should | |
| * yield - it could be a while. | |
| */ | |
| - if (unlikely(queued)) { | |
| + if (unlikely(wait)) { | |
| ktime_t to = NSEC_PER_SEC / HZ; | |
| set_current_state(TASK_UNINTERRUPTIBLE); | |
| @@ -4647,6 +4760,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) | |
| p->on_cpu = 0; | |
| #endif | |
| init_task_preempt_count(p); | |
| +#ifdef CONFIG_HAVE_PREEMPT_LAZY | |
| + task_thread_info(p)->preempt_lazy_count = 0; | |
| +#endif | |
| #ifdef CONFIG_SMP | |
| plist_node_init(&p->pushable_tasks, MAX_PRIO); | |
| RB_CLEAR_NODE(&p->pushable_dl_tasks); | |
| @@ -6520,6 +6636,7 @@ static void __sched notrace __schedule(unsigned int sched_mode) | |
| next = pick_next_task(rq, prev, &rf); | |
| clear_tsk_need_resched(prev); | |
| + clear_tsk_need_resched_lazy(prev); | |
| clear_preempt_need_resched(); | |
| #ifdef CONFIG_SCHED_DEBUG | |
| rq->last_seen_need_resched_ns = 0; | |
| @@ -6735,6 +6852,30 @@ static void __sched notrace preempt_schedule_common(void) | |
| } while (need_resched()); | |
| } | |
| +#ifdef CONFIG_PREEMPT_LAZY | |
| +/* | |
| + * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is | |
| + * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as | |
| + * preempt_lazy_count counter >0. | |
| + */ | |
| +static __always_inline int preemptible_lazy(void) | |
| +{ | |
| + if (test_thread_flag(TIF_NEED_RESCHED)) | |
| + return 1; | |
| + if (current_thread_info()->preempt_lazy_count) | |
| + return 0; | |
| + return 1; | |
| +} | |
| + | |
| +#else | |
| + | |
| +static inline int preemptible_lazy(void) | |
| +{ | |
| + return 1; | |
| +} | |
| + | |
| +#endif | |
| + | |
| #ifdef CONFIG_PREEMPTION | |
| /* | |
| * This is the entry point to schedule() from in-kernel preemption | |
| @@ -6748,6 +6889,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void) | |
| */ | |
| if (likely(!preemptible())) | |
| return; | |
| + if (!preemptible_lazy()) | |
| + return; | |
| preempt_schedule_common(); | |
| } | |
| NOKPROBE_SYMBOL(preempt_schedule); | |
| @@ -6795,6 +6938,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) | |
| if (likely(!preemptible())) | |
| return; | |
| + if (!preemptible_lazy()) | |
| + return; | |
| + | |
| do { | |
| /* | |
| * Because the function tracer can trace preempt_count_sub() | |
| @@ -9060,7 +9206,9 @@ void __init init_idle(struct task_struct *idle, int cpu) | |
| /* Set the preempt count _outside_ the spinlocks! */ | |
| init_idle_preempt_count(idle, cpu); | |
| - | |
| +#ifdef CONFIG_HAVE_PREEMPT_LAZY | |
| + task_thread_info(idle)->preempt_lazy_count = 0; | |
| +#endif | |
| /* | |
| * The idle tasks have their own, simple scheduling class: | |
| */ | |
| diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c | |
| index 1e12f731a033..50024a984c9b 100644 | |
| --- a/kernel/sched/fair.c | |
| +++ b/kernel/sched/fair.c | |
| @@ -4913,7 +4913,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |
| ideal_runtime = sched_slice(cfs_rq, curr); | |
| delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | |
| if (delta_exec > ideal_runtime) { | |
| - resched_curr(rq_of(cfs_rq)); | |
| + resched_curr_lazy(rq_of(cfs_rq)); | |
| /* | |
| * The current task ran long enough, ensure it doesn't get | |
| * re-elected due to buddy favours. | |
| @@ -4937,7 +4937,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |
| return; | |
| if (delta > ideal_runtime) | |
| - resched_curr(rq_of(cfs_rq)); | |
| + resched_curr_lazy(rq_of(cfs_rq)); | |
| } | |
| static void | |
| @@ -5083,7 +5083,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) | |
| * validating it and just reschedule. | |
| */ | |
| if (queued) { | |
| - resched_curr(rq_of(cfs_rq)); | |
| + resched_curr_lazy(rq_of(cfs_rq)); | |
| return; | |
| } | |
| /* | |
| @@ -5232,7 +5232,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) | |
| * hierarchy can be throttled | |
| */ | |
| if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) | |
| - resched_curr(rq_of(cfs_rq)); | |
| + resched_curr_lazy(rq_of(cfs_rq)); | |
| } | |
| static __always_inline | |
| @@ -5983,7 +5983,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) | |
| if (delta < 0) { | |
| if (task_current(rq, p)) | |
| - resched_curr(rq); | |
| + resched_curr_lazy(rq); | |
| return; | |
| } | |
| hrtick_start(rq, delta); | |
| @@ -7737,7 +7737,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |
| return; | |
| preempt: | |
| - resched_curr(rq); | |
| + resched_curr_lazy(rq); | |
| /* | |
| * Only set the backward buddy when the current task is still | |
| * on the rq. This can happen when a wakeup gets interleaved | |
| @@ -11891,7 +11891,7 @@ static void task_fork_fair(struct task_struct *p) | |
| * 'current' within the tree based on its new key value. | |
| */ | |
| swap(curr->vruntime, se->vruntime); | |
| - resched_curr(rq); | |
| + resched_curr_lazy(rq); | |
| } | |
| se->vruntime -= cfs_rq->min_vruntime; | |
| @@ -11918,7 +11918,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) | |
| */ | |
| if (task_current(rq, p)) { | |
| if (p->prio > oldprio) | |
| - resched_curr(rq); | |
| + resched_curr_lazy(rq); | |
| } else | |
| check_preempt_curr(rq, p, 0); | |
| } | |
| diff --git a/kernel/sched/features.h b/kernel/sched/features.h | |
| index ee7f23c76bd3..e13090e33f3c 100644 | |
| --- a/kernel/sched/features.h | |
| +++ b/kernel/sched/features.h | |
| @@ -48,6 +48,9 @@ SCHED_FEAT(NONTASK_CAPACITY, true) | |
| #ifdef CONFIG_PREEMPT_RT | |
| SCHED_FEAT(TTWU_QUEUE, false) | |
| +# ifdef CONFIG_PREEMPT_LAZY | |
| +SCHED_FEAT(PREEMPT_LAZY, true) | |
| +# endif | |
| #else | |
| /* | |
| diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h | |
| index f0c3d0d4a0dd..7a79f27deab1 100644 | |
| --- a/kernel/sched/sched.h | |
| +++ b/kernel/sched/sched.h | |
| @@ -2351,6 +2351,15 @@ extern void reweight_task(struct task_struct *p, const struct load_weight *lw); | |
| extern void resched_curr(struct rq *rq); | |
| extern void resched_cpu(int cpu); | |
| +#ifdef CONFIG_PREEMPT_LAZY | |
| +extern void resched_curr_lazy(struct rq *rq); | |
| +#else | |
| +static inline void resched_curr_lazy(struct rq *rq) | |
| +{ | |
| + resched_curr(rq); | |
| +} | |
| +#endif | |
| + | |
| extern struct rt_bandwidth def_rt_bandwidth; | |
| extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); | |
| extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); | |
| diff --git a/kernel/signal.c b/kernel/signal.c | |
| index 4bebd2443cc3..2e2eafc8375e 100644 | |
| --- a/kernel/signal.c | |
| +++ b/kernel/signal.c | |
| @@ -2302,13 +2302,13 @@ static int ptrace_stop(int exit_code, int why, unsigned long message, | |
| /* | |
| * Don't want to allow preemption here, because | |
| * sys_ptrace() needs this task to be inactive. | |
| - * | |
| - * XXX: implement read_unlock_no_resched(). | |
| */ | |
| - preempt_disable(); | |
| + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) | |
| + preempt_disable(); | |
| read_unlock(&tasklist_lock); | |
| cgroup_enter_frozen(); | |
| - preempt_enable_no_resched(); | |
| + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) | |
| + preempt_enable_no_resched(); | |
| schedule(); | |
| cgroup_leave_frozen(true); | |
| diff --git a/kernel/softirq.c b/kernel/softirq.c | |
| index a47396161843..e1a6e53a8ceb 100644 | |
| --- a/kernel/softirq.c | |
| +++ b/kernel/softirq.c | |
| @@ -80,21 +80,6 @@ static void wakeup_softirqd(void) | |
| wake_up_process(tsk); | |
| } | |
| -/* | |
| - * If ksoftirqd is scheduled, we do not want to process pending softirqs | |
| - * right now. Let ksoftirqd handle this at its own rate, to get fairness, | |
| - * unless we're doing some of the synchronous softirqs. | |
| - */ | |
| -#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ)) | |
| -static bool ksoftirqd_running(unsigned long pending) | |
| -{ | |
| - struct task_struct *tsk = __this_cpu_read(ksoftirqd); | |
| - | |
| - if (pending & SOFTIRQ_NOW_MASK) | |
| - return false; | |
| - return tsk && task_is_running(tsk) && !__kthread_should_park(tsk); | |
| -} | |
| - | |
| #ifdef CONFIG_TRACE_IRQFLAGS | |
| DEFINE_PER_CPU(int, hardirqs_enabled); | |
| DEFINE_PER_CPU(int, hardirq_context); | |
| @@ -236,7 +221,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) | |
| goto out; | |
| pending = local_softirq_pending(); | |
| - if (!pending || ksoftirqd_running(pending)) | |
| + if (!pending) | |
| goto out; | |
| /* | |
| @@ -432,9 +417,6 @@ static inline bool should_wake_ksoftirqd(void) | |
| static inline void invoke_softirq(void) | |
| { | |
| - if (ksoftirqd_running(local_softirq_pending())) | |
| - return; | |
| - | |
| if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) { | |
| #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK | |
| /* | |
| @@ -468,7 +450,7 @@ asmlinkage __visible void do_softirq(void) | |
| pending = local_softirq_pending(); | |
| - if (pending && !ksoftirqd_running(pending)) | |
| + if (pending) | |
| do_softirq_own_stack(); | |
| local_irq_restore(flags); | |
| @@ -641,6 +623,24 @@ static inline void tick_irq_exit(void) | |
| #endif | |
| } | |
| +#ifdef CONFIG_PREEMPT_RT | |
| +DEFINE_PER_CPU(struct task_struct *, timersd); | |
| +DEFINE_PER_CPU(unsigned long, pending_timer_softirq); | |
| + | |
| +static void wake_timersd(void) | |
| +{ | |
| + struct task_struct *tsk = __this_cpu_read(timersd); | |
| + | |
| + if (tsk) | |
| + wake_up_process(tsk); | |
| +} | |
| + | |
| +#else | |
| + | |
| +static inline void wake_timersd(void) { } | |
| + | |
| +#endif | |
| + | |
| static inline void __irq_exit_rcu(void) | |
| { | |
| #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED | |
| @@ -653,6 +653,10 @@ static inline void __irq_exit_rcu(void) | |
| if (!in_interrupt() && local_softirq_pending()) | |
| invoke_softirq(); | |
| + if (IS_ENABLED(CONFIG_PREEMPT_RT) && local_pending_timers() && | |
| + !(in_nmi() | in_hardirq())) | |
| + wake_timersd(); | |
| + | |
| tick_irq_exit(); | |
| } | |
| @@ -980,12 +984,70 @@ static struct smp_hotplug_thread softirq_threads = { | |
| .thread_comm = "ksoftirqd/%u", | |
| }; | |
| +#ifdef CONFIG_PREEMPT_RT | |
| +static void timersd_setup(unsigned int cpu) | |
| +{ | |
| + sched_set_fifo_low(current); | |
| +} | |
| + | |
| +static int timersd_should_run(unsigned int cpu) | |
| +{ | |
| + return local_pending_timers(); | |
| +} | |
| + | |
| +static void run_timersd(unsigned int cpu) | |
| +{ | |
| + unsigned int timer_si; | |
| + | |
| + ksoftirqd_run_begin(); | |
| + | |
| + timer_si = local_pending_timers(); | |
| + __this_cpu_write(pending_timer_softirq, 0); | |
| + or_softirq_pending(timer_si); | |
| + | |
| + __do_softirq(); | |
| + | |
| + ksoftirqd_run_end(); | |
| +} | |
| + | |
| +static void raise_ktimers_thread(unsigned int nr) | |
| +{ | |
| + trace_softirq_raise(nr); | |
| + __this_cpu_or(pending_timer_softirq, 1 << nr); | |
| +} | |
| + | |
| +void raise_hrtimer_softirq(void) | |
| +{ | |
| + raise_ktimers_thread(HRTIMER_SOFTIRQ); | |
| +} | |
| + | |
| +void raise_timer_softirq(void) | |
| +{ | |
| + unsigned long flags; | |
| + | |
| + local_irq_save(flags); | |
| + raise_ktimers_thread(TIMER_SOFTIRQ); | |
| + wake_timersd(); | |
| + local_irq_restore(flags); | |
| +} | |
| + | |
| +static struct smp_hotplug_thread timer_threads = { | |
| + .store = &timersd, | |
| + .setup = timersd_setup, | |
| + .thread_should_run = timersd_should_run, | |
| + .thread_fn = run_timersd, | |
| + .thread_comm = "ktimers/%u", | |
| +}; | |
| +#endif | |
| + | |
| static __init int spawn_ksoftirqd(void) | |
| { | |
| cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL, | |
| takeover_tasklets); | |
| BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); | |
| - | |
| +#ifdef CONFIG_PREEMPT_RT | |
| + BUG_ON(smpboot_register_percpu_thread(&timer_threads)); | |
| +#endif | |
| return 0; | |
| } | |
| early_initcall(spawn_ksoftirqd); | |
| diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c | |
| index f62cc13b5f14..f116e72eb6df 100644 | |
| --- a/kernel/time/hrtimer.c | |
| +++ b/kernel/time/hrtimer.c | |
| @@ -1809,7 +1809,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |
| if (!ktime_before(now, cpu_base->softirq_expires_next)) { | |
| cpu_base->softirq_expires_next = KTIME_MAX; | |
| cpu_base->softirq_activated = 1; | |
| - raise_softirq_irqoff(HRTIMER_SOFTIRQ); | |
| + raise_hrtimer_softirq(); | |
| } | |
| __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); | |
| @@ -1922,7 +1922,7 @@ void hrtimer_run_queues(void) | |
| if (!ktime_before(now, cpu_base->softirq_expires_next)) { | |
| cpu_base->softirq_expires_next = KTIME_MAX; | |
| cpu_base->softirq_activated = 1; | |
| - raise_softirq_irqoff(HRTIMER_SOFTIRQ); | |
| + raise_hrtimer_softirq(); | |
| } | |
| __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); | |
| diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c | |
| index 8cfdc6b978d7..48fd460a5d0e 100644 | |
| --- a/kernel/time/tick-sched.c | |
| +++ b/kernel/time/tick-sched.c | |
| @@ -800,7 +800,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) | |
| static inline bool local_timer_softirq_pending(void) | |
| { | |
| - return local_softirq_pending() & BIT(TIMER_SOFTIRQ); | |
| + return local_pending_timers() & BIT(TIMER_SOFTIRQ); | |
| } | |
| static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) | |
| diff --git a/kernel/time/timer.c b/kernel/time/timer.c | |
| index e09852be4e63..591cac6f03ec 100644 | |
| --- a/kernel/time/timer.c | |
| +++ b/kernel/time/timer.c | |
| @@ -1852,7 +1852,7 @@ static void run_local_timers(void) | |
| if (time_before(jiffies, base->next_expiry)) | |
| return; | |
| } | |
| - raise_softirq(TIMER_SOFTIRQ); | |
| + raise_timer_softirq(); | |
| } | |
| /* | |
| diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c | |
| index c9b52e920b8f..c2ef2443686f 100644 | |
| --- a/kernel/trace/trace.c | |
| +++ b/kernel/trace/trace.c | |
| @@ -2644,11 +2644,19 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status) | |
| if (softirq_count() >> (SOFTIRQ_SHIFT + 1)) | |
| trace_flags |= TRACE_FLAG_BH_OFF; | |
| - if (tif_need_resched()) | |
| + if (tif_need_resched_now()) | |
| trace_flags |= TRACE_FLAG_NEED_RESCHED; | |
| +#ifdef CONFIG_PREEMPT_LAZY | |
| + /* Run out of bits. Share the LAZY and PREEMPT_RESCHED */ | |
| + if (need_resched_lazy()) | |
| + trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY; | |
| +#else | |
| if (test_preempt_need_resched()) | |
| trace_flags |= TRACE_FLAG_PREEMPT_RESCHED; | |
| - return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) | | |
| +#endif | |
| + | |
| + return (trace_flags << 24) | (min_t(unsigned int, pc & 0xff, 0xf)) | | |
| + (preempt_lazy_count() & 0xff) << 16 | | |
| (min_t(unsigned int, migration_disable_value(), 0xf)) << 4; | |
| } | |
| @@ -4242,15 +4250,17 @@ unsigned long trace_total_entries(struct trace_array *tr) | |
| static void print_lat_help_header(struct seq_file *m) | |
| { | |
| - seq_puts(m, "# _------=> CPU# \n" | |
| - "# / _-----=> irqs-off/BH-disabled\n" | |
| - "# | / _----=> need-resched \n" | |
| - "# || / _---=> hardirq/softirq \n" | |
| - "# ||| / _--=> preempt-depth \n" | |
| - "# |||| / _-=> migrate-disable \n" | |
| - "# ||||| / delay \n" | |
| - "# cmd pid |||||| time | caller \n" | |
| - "# \\ / |||||| \\ | / \n"); | |
| + seq_puts(m, "# _--------=> CPU# \n" | |
| + "# / _-------=> irqs-off/BH-disabled\n" | |
| + "# | / _------=> need-resched \n" | |
| + "# || / _-----=> need-resched-lazy\n" | |
| + "# ||| / _----=> hardirq/softirq \n" | |
| + "# |||| / _---=> preempt-depth \n" | |
| + "# ||||| / _--=> preempt-lazy-depth\n" | |
| + "# |||||| / _-=> migrate-disable \n" | |
| + "# ||||||| / delay \n" | |
| + "# cmd pid |||||||| time | caller \n" | |
| + "# \\ / |||||||| \\ | / \n"); | |
| } | |
| static void print_event_info(struct array_buffer *buf, struct seq_file *m) | |
| @@ -4284,14 +4294,16 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file | |
| print_event_info(buf, m); | |
| - seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space); | |
| - seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); | |
| - seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); | |
| - seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); | |
| - seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space); | |
| - seq_printf(m, "# %.*s|||| / delay\n", prec, space); | |
| - seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID "); | |
| - seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | "); | |
| + seq_printf(m, "# %.*s _-------=> irqs-off/BH-disabled\n", prec, space); | |
| + seq_printf(m, "# %.*s / _------=> need-resched\n", prec, space); | |
| + seq_printf(m, "# %.*s| / _-----=> need-resched-lazy\n", prec, space); | |
| + seq_printf(m, "# %.*s|| / _----=> hardirq/softirq\n", prec, space); | |
| + seq_printf(m, "# %.*s||| / _---=> preempt-depth\n", prec, space); | |
| + seq_printf(m, "# %.*s|||| / _--=> preempt-lazy-depth\n", prec, space); | |
| + seq_printf(m, "# %.*s||||| / _-=> migrate-disable\n", prec, space); | |
| + seq_printf(m, "# %.*s|||||| / delay\n", prec, space); | |
| + seq_printf(m, "# TASK-PID %.*s CPU# ||||||| TIMESTAMP FUNCTION\n", prec, " TGID "); | |
| + seq_printf(m, "# | | %.*s | ||||||| | |\n", prec, " | "); | |
| } | |
| void | |
| diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c | |
| index 24859d964505..8d9a21d40f5a 100644 | |
| --- a/kernel/trace/trace_events.c | |
| +++ b/kernel/trace/trace_events.c | |
| @@ -208,6 +208,7 @@ static int trace_define_common_fields(void) | |
| /* Holds both preempt_count and migrate_disable */ | |
| __common_field(unsigned char, preempt_count); | |
| __common_field(int, pid); | |
| + __common_field(unsigned char, preempt_lazy_count); | |
| return ret; | |
| } | |
| diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c | |
| index bf1965b18099..133f15d3b886 100644 | |
| --- a/kernel/trace/trace_output.c | |
| +++ b/kernel/trace/trace_output.c | |
| @@ -442,6 +442,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | |
| { | |
| char hardsoft_irq; | |
| char need_resched; | |
| + char need_resched_lazy; | |
| char irqs_off; | |
| int hardirq; | |
| int softirq; | |
| @@ -462,20 +463,27 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | |
| switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | | |
| TRACE_FLAG_PREEMPT_RESCHED)) { | |
| +#ifndef CONFIG_PREEMPT_LAZY | |
| case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED: | |
| need_resched = 'N'; | |
| break; | |
| +#endif | |
| case TRACE_FLAG_NEED_RESCHED: | |
| need_resched = 'n'; | |
| break; | |
| +#ifndef CONFIG_PREEMPT_LAZY | |
| case TRACE_FLAG_PREEMPT_RESCHED: | |
| need_resched = 'p'; | |
| break; | |
| +#endif | |
| default: | |
| need_resched = '.'; | |
| break; | |
| } | |
| + need_resched_lazy = | |
| + (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.'; | |
| + | |
| hardsoft_irq = | |
| (nmi && hardirq) ? 'Z' : | |
| nmi ? 'z' : | |
| @@ -484,14 +492,20 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | |
| softirq ? 's' : | |
| '.' ; | |
| - trace_seq_printf(s, "%c%c%c", | |
| - irqs_off, need_resched, hardsoft_irq); | |
| + trace_seq_printf(s, "%c%c%c%c", | |
| + irqs_off, need_resched, need_resched_lazy, | |
| + hardsoft_irq); | |
| if (entry->preempt_count & 0xf) | |
| trace_seq_printf(s, "%x", entry->preempt_count & 0xf); | |
| else | |
| trace_seq_putc(s, '.'); | |
| + if (entry->preempt_lazy_count) | |
| + trace_seq_printf(s, "%x", entry->preempt_lazy_count); | |
| + else | |
| + trace_seq_putc(s, '.'); | |
| + | |
| if (entry->preempt_count & 0xf0) | |
| trace_seq_printf(s, "%x", entry->preempt_count >> 4); | |
| else | |
| diff --git a/kernel/watchdog.c b/kernel/watchdog.c | |
| index 45693fb3e08d..f366008298ac 100644 | |
| --- a/kernel/watchdog.c | |
| +++ b/kernel/watchdog.c | |
| @@ -431,6 +431,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |
| /* Start period for the next softlockup warning. */ | |
| update_report_ts(); | |
| + printk_prefer_direct_enter(); | |
| + | |
| pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", | |
| smp_processor_id(), duration, | |
| current->comm, task_pid_nr(current)); | |
| @@ -449,6 +451,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |
| add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); | |
| if (softlockup_panic) | |
| panic("softlockup: hung tasks"); | |
| + | |
| + printk_prefer_direct_exit(); | |
| } | |
| return HRTIMER_RESTART; | |
| diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c | |
| index 8ba4b269ab89..0cbe90634654 100644 | |
| --- a/kernel/watchdog_hld.c | |
| +++ b/kernel/watchdog_hld.c | |
| @@ -139,6 +139,8 @@ static void watchdog_overflow_callback(struct perf_event *event, | |
| if (__this_cpu_read(hard_watchdog_warn) == true) | |
| return; | |
| + printk_prefer_direct_enter(); | |
| + | |
| pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n", | |
| this_cpu); | |
| print_modules(); | |
| @@ -159,6 +161,8 @@ static void watchdog_overflow_callback(struct perf_event *event, | |
| if (hardlockup_panic) | |
| nmi_panic(regs, "Hard LOCKUP"); | |
| + printk_prefer_direct_exit(); | |
| + | |
| __this_cpu_write(hard_watchdog_warn, true); | |
| return; | |
| } | |
| diff --git a/lib/debugobjects.c b/lib/debugobjects.c | |
| index 5dfa582dbadd..d42f2839a222 100644 | |
| --- a/lib/debugobjects.c | |
| +++ b/lib/debugobjects.c | |
| @@ -600,10 +600,21 @@ static void debug_objects_fill_pool(void) | |
| { | |
| /* | |
| * On RT enabled kernels the pool refill must happen in preemptible | |
| - * context: | |
| + * context -- for !RT kernels we rely on the fact that spinlock_t and | |
| + * raw_spinlock_t are basically the same type and this lock-type | |
| + * inversion works just fine. | |
| */ | |
| - if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) | |
| + if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) { | |
| + /* | |
| + * Annotate away the spinlock_t inside raw_spinlock_t warning | |
| + * by temporarily raising the wait-type to WAIT_SLEEP, matching | |
| + * the preemptible() condition above. | |
| + */ | |
| + static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP); | |
| + lock_map_acquire_try(&fill_pool_map); | |
| fill_pool(); | |
| + lock_map_release(&fill_pool_map); | |
| + } | |
| } | |
| static void | |
| diff --git a/localversion-rt b/localversion-rt | |
| new file mode 100644 | |
| index 000000000000..41f7b993a830 | |
| --- /dev/null | |
| +++ b/localversion-rt | |
| @@ -0,0 +1 @@ | |
| +-rt43 | |
| diff --git a/mm/page_alloc.c b/mm/page_alloc.c | |
| index a905b850d31c..f99dc2f63ba4 100644 | |
| --- a/mm/page_alloc.c | |
| +++ b/mm/page_alloc.c | |
| @@ -6601,19 +6601,17 @@ static void __build_all_zonelists(void *data) | |
| unsigned long flags; | |
| /* | |
| - * Explicitly disable this CPU's interrupts before taking seqlock | |
| - * to prevent any IRQ handler from calling into the page allocator | |
| - * (e.g. GFP_ATOMIC) that could hit zonelist_iter_begin and livelock. | |
| + * The zonelist_update_seq must be acquired with irqsave because the | |
| + * reader can be invoked from IRQ with GFP_ATOMIC. | |
| */ | |
| - local_irq_save(flags); | |
| + write_seqlock_irqsave(&zonelist_update_seq, flags); | |
| /* | |
| - * Explicitly disable this CPU's synchronous printk() before taking | |
| - * seqlock to prevent any printk() from trying to hold port->lock, for | |
| + * Also disable synchronous printk() to prevent any printk() from | |
| + * trying to hold port->lock, for | |
| * tty_insert_flip_string_and_push_buffer() on other CPU might be | |
| * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. | |
| */ | |
| printk_deferred_enter(); | |
| - write_seqlock(&zonelist_update_seq); | |
| #ifdef CONFIG_NUMA | |
| memset(node_load, 0, sizeof(node_load)); | |
| @@ -6650,9 +6648,8 @@ static void __build_all_zonelists(void *data) | |
| #endif | |
| } | |
| - write_sequnlock(&zonelist_update_seq); | |
| printk_deferred_exit(); | |
| - local_irq_restore(flags); | |
| + write_sequnlock_irqrestore(&zonelist_update_seq, flags); | |
| } | |
| static noinline void __init | |
| diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c | |
| index d3e511e1eba8..0fa52bcc296b 100644 | |
| --- a/net/8021q/vlan_dev.c | |
| +++ b/net/8021q/vlan_dev.c | |
| @@ -712,13 +712,13 @@ static void vlan_dev_get_stats64(struct net_device *dev, | |
| p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&p->syncp); | |
| + start = u64_stats_fetch_begin(&p->syncp); | |
| rxpackets = u64_stats_read(&p->rx_packets); | |
| rxbytes = u64_stats_read(&p->rx_bytes); | |
| rxmulticast = u64_stats_read(&p->rx_multicast); | |
| txpackets = u64_stats_read(&p->tx_packets); | |
| txbytes = u64_stats_read(&p->tx_bytes); | |
| - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&p->syncp, start)); | |
| stats->rx_packets += rxpackets; | |
| stats->rx_bytes += rxbytes; | |
| diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c | |
| index 3cd2b648408d..454cd7a21c31 100644 | |
| --- a/net/bridge/br_multicast.c | |
| +++ b/net/bridge/br_multicast.c | |
| @@ -4907,9 +4907,9 @@ void br_multicast_get_stats(const struct net_bridge *br, | |
| unsigned int start; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); | |
| + start = u64_stats_fetch_begin(&cpu_stats->syncp); | |
| memcpy(&temp, &cpu_stats->mstats, sizeof(temp)); | |
| - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); | |
| mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries); | |
| mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries); | |
| diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c | |
| index 9ffd40b8270c..bc75fa1e4666 100644 | |
| --- a/net/bridge/br_vlan.c | |
| +++ b/net/bridge/br_vlan.c | |
| @@ -1389,12 +1389,12 @@ void br_vlan_get_stats(const struct net_bridge_vlan *v, | |
| cpu_stats = per_cpu_ptr(v->stats, i); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); | |
| + start = u64_stats_fetch_begin(&cpu_stats->syncp); | |
| rxpackets = u64_stats_read(&cpu_stats->rx_packets); | |
| rxbytes = u64_stats_read(&cpu_stats->rx_bytes); | |
| txbytes = u64_stats_read(&cpu_stats->tx_bytes); | |
| txpackets = u64_stats_read(&cpu_stats->tx_packets); | |
| - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); | |
| u64_stats_add(&stats->rx_packets, rxpackets); | |
| u64_stats_add(&stats->rx_bytes, rxbytes); | |
| diff --git a/net/core/dev.c b/net/core/dev.c | |
| index 20d8b9195ef6..522cb8b1b36c 100644 | |
| --- a/net/core/dev.c | |
| +++ b/net/core/dev.c | |
| @@ -4621,15 +4621,6 @@ static void rps_trigger_softirq(void *data) | |
| #endif /* CONFIG_RPS */ | |
| -/* Called from hardirq (IPI) context */ | |
| -static void trigger_rx_softirq(void *data) | |
| -{ | |
| - struct softnet_data *sd = data; | |
| - | |
| - __raise_softirq_irqoff(NET_RX_SOFTIRQ); | |
| - smp_store_release(&sd->defer_ipi_scheduled, 0); | |
| -} | |
| - | |
| /* | |
| * Check if this softnet_data structure is another cpu one | |
| * If yes, queue it to our IPI list and return 1 | |
| @@ -6690,6 +6681,30 @@ static void skb_defer_free_flush(struct softnet_data *sd) | |
| } | |
| } | |
| +#ifndef CONFIG_PREEMPT_RT | |
| +/* Called from hardirq (IPI) context */ | |
| +static void trigger_rx_softirq(void *data) | |
| +{ | |
| + struct softnet_data *sd = data; | |
| + | |
| + __raise_softirq_irqoff(NET_RX_SOFTIRQ); | |
| + smp_store_release(&sd->defer_ipi_scheduled, 0); | |
| +} | |
| + | |
| +#else | |
| + | |
| +static void trigger_rx_softirq(struct work_struct *defer_work) | |
| +{ | |
| + struct softnet_data *sd; | |
| + | |
| + sd = container_of(defer_work, struct softnet_data, defer_work); | |
| + smp_store_release(&sd->defer_ipi_scheduled, 0); | |
| + local_bh_disable(); | |
| + skb_defer_free_flush(sd); | |
| + local_bh_enable(); | |
| +} | |
| +#endif | |
| + | |
| static __latent_entropy void net_rx_action(struct softirq_action *h) | |
| { | |
| struct softnet_data *sd = this_cpu_ptr(&softnet_data); | |
| @@ -10512,12 +10527,12 @@ void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, | |
| stats = per_cpu_ptr(netstats, cpu); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&stats->syncp); | |
| + start = u64_stats_fetch_begin(&stats->syncp); | |
| rx_packets = u64_stats_read(&stats->rx_packets); | |
| rx_bytes = u64_stats_read(&stats->rx_bytes); | |
| tx_packets = u64_stats_read(&stats->tx_packets); | |
| tx_bytes = u64_stats_read(&stats->tx_bytes); | |
| - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&stats->syncp, start)); | |
| s->rx_packets += rx_packets; | |
| s->rx_bytes += rx_bytes; | |
| @@ -11451,7 +11466,11 @@ static int __init net_dev_init(void) | |
| INIT_CSD(&sd->csd, rps_trigger_softirq, sd); | |
| sd->cpu = i; | |
| #endif | |
| +#ifndef CONFIG_PREEMPT_RT | |
| INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd); | |
| +#else | |
| + INIT_WORK(&sd->defer_work, trigger_rx_softirq); | |
| +#endif | |
| spin_lock_init(&sd->defer_lock); | |
| init_gro_hash(&sd->backlog); | |
| diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c | |
| index 522657b597d9..6445b93f60ad 100644 | |
| --- a/net/core/drop_monitor.c | |
| +++ b/net/core/drop_monitor.c | |
| @@ -1432,9 +1432,9 @@ static void net_dm_stats_read(struct net_dm_stats *stats) | |
| u64 dropped; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); | |
| + start = u64_stats_fetch_begin(&cpu_stats->syncp); | |
| dropped = u64_stats_read(&cpu_stats->dropped); | |
| - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); | |
| u64_stats_add(&stats->dropped, dropped); | |
| } | |
| @@ -1476,9 +1476,9 @@ static void net_dm_hw_stats_read(struct net_dm_stats *stats) | |
| u64 dropped; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); | |
| + start = u64_stats_fetch_begin(&cpu_stats->syncp); | |
| dropped = u64_stats_read(&cpu_stats->dropped); | |
| - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); | |
| u64_stats_add(&stats->dropped, dropped); | |
| } | |
| diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c | |
| index c8d137ef5980..b71ccaec0991 100644 | |
| --- a/net/core/gen_stats.c | |
| +++ b/net/core/gen_stats.c | |
| @@ -135,10 +135,10 @@ static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_sync *bstats, | |
| u64 bytes, packets; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&bcpu->syncp); | |
| + start = u64_stats_fetch_begin(&bcpu->syncp); | |
| bytes = u64_stats_read(&bcpu->bytes); | |
| packets = u64_stats_read(&bcpu->packets); | |
| - } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&bcpu->syncp, start)); | |
| t_bytes += bytes; | |
| t_packets += packets; | |
| @@ -162,10 +162,10 @@ void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats, | |
| } | |
| do { | |
| if (running) | |
| - start = u64_stats_fetch_begin_irq(&b->syncp); | |
| + start = u64_stats_fetch_begin(&b->syncp); | |
| bytes = u64_stats_read(&b->bytes); | |
| packets = u64_stats_read(&b->packets); | |
| - } while (running && u64_stats_fetch_retry_irq(&b->syncp, start)); | |
| + } while (running && u64_stats_fetch_retry(&b->syncp, start)); | |
| _bstats_update(bstats, bytes, packets); | |
| } | |
| @@ -187,10 +187,10 @@ static void gnet_stats_read_basic(u64 *ret_bytes, u64 *ret_packets, | |
| u64 bytes, packets; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&bcpu->syncp); | |
| + start = u64_stats_fetch_begin(&bcpu->syncp); | |
| bytes = u64_stats_read(&bcpu->bytes); | |
| packets = u64_stats_read(&bcpu->packets); | |
| - } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&bcpu->syncp, start)); | |
| t_bytes += bytes; | |
| t_packets += packets; | |
| @@ -201,10 +201,10 @@ static void gnet_stats_read_basic(u64 *ret_bytes, u64 *ret_packets, | |
| } | |
| do { | |
| if (running) | |
| - start = u64_stats_fetch_begin_irq(&b->syncp); | |
| + start = u64_stats_fetch_begin(&b->syncp); | |
| *ret_bytes = u64_stats_read(&b->bytes); | |
| *ret_packets = u64_stats_read(&b->packets); | |
| - } while (running && u64_stats_fetch_retry_irq(&b->syncp, start)); | |
| + } while (running && u64_stats_fetch_retry(&b->syncp, start)); | |
| } | |
| static int | |
| diff --git a/net/core/skbuff.c b/net/core/skbuff.c | |
| index 768b8d65a5ba..ec9119e84c54 100644 | |
| --- a/net/core/skbuff.c | |
| +++ b/net/core/skbuff.c | |
| @@ -6696,6 +6696,11 @@ nodefer: __kfree_skb(skb); | |
| /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU | |
| * if we are unlucky enough (this seems very unlikely). | |
| */ | |
| - if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) | |
| + if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) { | |
| +#ifndef CONFIG_PREEMPT_RT | |
| smp_call_function_single_async(cpu, &sd->defer_csd); | |
| +#else | |
| + schedule_work_on(cpu, &sd->defer_work); | |
| +#endif | |
| + } | |
| } | |
| diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c | |
| index 032c7af065cd..94e8cc3de330 100644 | |
| --- a/net/devlink/leftover.c | |
| +++ b/net/devlink/leftover.c | |
| @@ -8307,10 +8307,10 @@ static void devlink_trap_stats_read(struct devlink_stats __percpu *trap_stats, | |
| cpu_stats = per_cpu_ptr(trap_stats, i); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); | |
| + start = u64_stats_fetch_begin(&cpu_stats->syncp); | |
| rx_packets = u64_stats_read(&cpu_stats->rx_packets); | |
| rx_bytes = u64_stats_read(&cpu_stats->rx_bytes); | |
| - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); | |
| u64_stats_add(&stats->rx_packets, rx_packets); | |
| u64_stats_add(&stats->rx_bytes, rx_bytes); | |
| diff --git a/net/dsa/slave.c b/net/dsa/slave.c | |
| index 5fe075bf479e..28ee63ec1d1d 100644 | |
| --- a/net/dsa/slave.c | |
| +++ b/net/dsa/slave.c | |
| @@ -976,12 +976,12 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev, | |
| s = per_cpu_ptr(dev->tstats, i); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&s->syncp); | |
| + start = u64_stats_fetch_begin(&s->syncp); | |
| tx_packets = u64_stats_read(&s->tx_packets); | |
| tx_bytes = u64_stats_read(&s->tx_bytes); | |
| rx_packets = u64_stats_read(&s->rx_packets); | |
| rx_bytes = u64_stats_read(&s->rx_bytes); | |
| - } while (u64_stats_fetch_retry_irq(&s->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&s->syncp, start)); | |
| data[0] += tx_packets; | |
| data[1] += tx_bytes; | |
| data[2] += rx_packets; | |
| diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c | |
| index cc013be9b02c..8b0e3d50a383 100644 | |
| --- a/net/ipv4/af_inet.c | |
| +++ b/net/ipv4/af_inet.c | |
| @@ -1736,9 +1736,9 @@ u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt, | |
| bhptr = per_cpu_ptr(mib, cpu); | |
| syncp = (struct u64_stats_sync *)(bhptr + syncp_offset); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(syncp); | |
| + start = u64_stats_fetch_begin(syncp); | |
| v = *(((u64 *)bhptr) + offt); | |
| - } while (u64_stats_fetch_retry_irq(syncp, start)); | |
| + } while (u64_stats_fetch_retry(syncp, start)); | |
| return v; | |
| } | |
| diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c | |
| index 33cb0381b574..0ab95b936025 100644 | |
| --- a/net/ipv6/seg6_local.c | |
| +++ b/net/ipv6/seg6_local.c | |
| @@ -1644,13 +1644,13 @@ static int put_nla_counters(struct sk_buff *skb, struct seg6_local_lwt *slwt) | |
| pcounters = per_cpu_ptr(slwt->pcpu_counters, i); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&pcounters->syncp); | |
| + start = u64_stats_fetch_begin(&pcounters->syncp); | |
| packets = u64_stats_read(&pcounters->packets); | |
| bytes = u64_stats_read(&pcounters->bytes); | |
| errors = u64_stats_read(&pcounters->errors); | |
| - } while (u64_stats_fetch_retry_irq(&pcounters->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&pcounters->syncp, start)); | |
| counters.packets += packets; | |
| counters.bytes += bytes; | |
| diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c | |
| index dd1864f6549f..33ab891f7d32 100644 | |
| --- a/net/mac80211/sta_info.c | |
| +++ b/net/mac80211/sta_info.c | |
| @@ -2419,9 +2419,9 @@ static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats, | |
| u64 value; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&rxstats->syncp); | |
| + start = u64_stats_fetch_begin(&rxstats->syncp); | |
| value = rxstats->msdu[tid]; | |
| - } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&rxstats->syncp, start)); | |
| return value; | |
| } | |
| @@ -2487,9 +2487,9 @@ static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats) | |
| u64 value; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&rxstats->syncp); | |
| + start = u64_stats_fetch_begin(&rxstats->syncp); | |
| value = rxstats->bytes; | |
| - } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&rxstats->syncp, start)); | |
| return value; | |
| } | |
| diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c | |
| index f1f43894efb8..dc5165d3eec4 100644 | |
| --- a/net/mpls/af_mpls.c | |
| +++ b/net/mpls/af_mpls.c | |
| @@ -1079,9 +1079,9 @@ static void mpls_get_stats(struct mpls_dev *mdev, | |
| p = per_cpu_ptr(mdev->stats, i); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&p->syncp); | |
| + start = u64_stats_fetch_begin(&p->syncp); | |
| local = p->stats; | |
| - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&p->syncp, start)); | |
| stats->rx_packets += local.rx_packets; | |
| stats->rx_bytes += local.rx_bytes; | |
| diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c | |
| index 17a1b731a76b..2be696513629 100644 | |
| --- a/net/netfilter/ipvs/ip_vs_ctl.c | |
| +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |
| @@ -2299,13 +2299,13 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v) | |
| u64 conns, inpkts, outpkts, inbytes, outbytes; | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&u->syncp); | |
| + start = u64_stats_fetch_begin(&u->syncp); | |
| conns = u64_stats_read(&u->cnt.conns); | |
| inpkts = u64_stats_read(&u->cnt.inpkts); | |
| outpkts = u64_stats_read(&u->cnt.outpkts); | |
| inbytes = u64_stats_read(&u->cnt.inbytes); | |
| outbytes = u64_stats_read(&u->cnt.outbytes); | |
| - } while (u64_stats_fetch_retry_irq(&u->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&u->syncp, start)); | |
| seq_printf(seq, "%3X %8LX %8LX %8LX %16LX %16LX\n", | |
| i, (u64)conns, (u64)inpkts, | |
| diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c | |
| index 25a9bce8cd3a..d7bfa99a70db 100644 | |
| --- a/net/netfilter/nf_tables_api.c | |
| +++ b/net/netfilter/nf_tables_api.c | |
| @@ -1714,10 +1714,10 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats) | |
| for_each_possible_cpu(cpu) { | |
| cpu_stats = per_cpu_ptr(stats, cpu); | |
| do { | |
| - seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp); | |
| + seq = u64_stats_fetch_begin(&cpu_stats->syncp); | |
| pkts = cpu_stats->pkts; | |
| bytes = cpu_stats->bytes; | |
| - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq)); | |
| + } while (u64_stats_fetch_retry(&cpu_stats->syncp, seq)); | |
| total.pkts += pkts; | |
| total.bytes += bytes; | |
| } | |
| diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c | |
| index 3c7b24535409..0953f531f984 100644 | |
| --- a/net/openvswitch/datapath.c | |
| +++ b/net/openvswitch/datapath.c | |
| @@ -716,9 +716,9 @@ static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats, | |
| percpu_stats = per_cpu_ptr(dp->stats_percpu, i); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&percpu_stats->syncp); | |
| + start = u64_stats_fetch_begin(&percpu_stats->syncp); | |
| local_stats = *percpu_stats; | |
| - } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&percpu_stats->syncp, start)); | |
| stats->n_hit += local_stats.n_hit; | |
| stats->n_missed += local_stats.n_missed; | |
| diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c | |
| index d4a2db0b2299..0a0e4c283f02 100644 | |
| --- a/net/openvswitch/flow_table.c | |
| +++ b/net/openvswitch/flow_table.c | |
| @@ -205,9 +205,9 @@ static void tbl_mask_array_reset_counters(struct mask_array *ma) | |
| stats = per_cpu_ptr(ma->masks_usage_stats, cpu); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&stats->syncp); | |
| + start = u64_stats_fetch_begin(&stats->syncp); | |
| counter = stats->usage_cntrs[i]; | |
| - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); | |
| + } while (u64_stats_fetch_retry(&stats->syncp, start)); | |
| ma->masks_usage_zero_cntr[i] += counter; | |
| } | |
| @@ -1136,10 +1136,9 @@ void ovs_flow_masks_rebalance(struct flow_table *table) | |
| stats = per_cpu_ptr(ma->masks_usage_stats, cpu); | |
| do { | |
| - start = u64_stats_fetch_begin_irq(&stats->syncp); | |
| + start = u64_stats_fetch_begin(&stats->syncp); | |
| counter = stats->usage_cntrs[i]; | |
| - } while (u64_stats_fetch_retry_irq(&stats->syncp, | |
| - start)); | |
| + } while (u64_stats_fetch_retry(&stats->syncp, start)); | |
| masks_and_count[i].counter += counter; | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment