diff options
author | Will Deacon <will@kernel.org> | 2024-11-26 16:10:31 +0000 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2024-11-29 10:22:20 +0000 |
commit | 33bd7b0e18e78a5a8a902b374bbe4e7a9b451734 (patch) | |
tree | 7719c9c9102b2598f1968cb17660217914e0a220 | |
parent | f9be1c2c78d6bce194daa959f462fa7e9f2f1b5d (diff) | |
download | linux-cpu-hotplug.tar.gz |
cpu/hotplug: Fixes and optimisations to generic hotplug logiccpu-hotplug
WIP:
- Test this out
- If it makes sense, split up into separate changes
Signed-off-by: Will Deacon <will@kernel.org>
-rw-r--r-- | arch/x86/kernel/smpboot.c | 2 | ||||
-rw-r--r-- | include/linux/cpuhotplug.h | 2 | ||||
-rw-r--r-- | kernel/cpu.c | 24 |
3 files changed, 17 insertions, 11 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 0cdebfc01d2e7b..b3b7e0450c82ee 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1001,7 +1001,7 @@ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) pr_info("CPU %u is now offline\n", cpu); } -void arch_cpuhp_sync_state_poll(void) +void arch_cpuhp_sync_state_poll(atomic_t *st, int state) { if (smp_ops.poll_sync_state) smp_ops.poll_sync_state(); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 9a5413a30e4173..b28e553eb0a41c 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -511,7 +511,7 @@ static inline void cpuhp_online_idle(enum cpuhp_state state) { } struct task_struct; void cpuhp_ap_sync_alive(void); -void arch_cpuhp_sync_state_poll(void); +void arch_cpuhp_sync_state_poll(atomic_t *st, int old); void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu, bool is_alive); int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle); bool arch_cpuhp_init_parallel_bringup(void); diff --git a/kernel/cpu.c b/kernel/cpu.c index c0145f9e79c9a5..3ec5f4ba56e3b2 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -303,7 +303,13 @@ static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state) (void)atomic_xchg(st, state); } -void __weak arch_cpuhp_sync_state_poll(void) { cpu_relax(); } +void __weak arch_cpuhp_sync_state_poll(atomic_t *st, int old) +{ + if (old < SYNC_STATE_ALIVE) + cpu_relax(); + else + atomic_cond_read_relaxed(st, VAL != old); +} static bool cpuhp_wait_for_sync_state(unsigned int cpu, enum cpuhp_sync_state state, enum cpuhp_sync_state next_state) @@ -328,7 +334,7 @@ static bool cpuhp_wait_for_sync_state(unsigned int cpu, enum cpuhp_sync_state st return false; } else if (now - start < NSEC_PER_MSEC) { /* Poll for one millisecond */ - arch_cpuhp_sync_state_poll(); + arch_cpuhp_sync_state_poll(st, sync); } else { usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); } @@ -395,8 +401,7 @@ void cpuhp_ap_sync_alive(void) cpuhp_ap_update_sync_state(SYNC_STATE_ALIVE); /* Wait for the control CPU to release it. */ - while (atomic_read(st) != SYNC_STATE_SHOULD_ONLINE) - cpu_relax(); + atomic_cond_read_acquire(st, VAL == SYNC_STATE_SHOULD_ONLINE); } static bool cpuhp_can_boot_ap(unsigned int cpu) @@ -408,22 +413,22 @@ again: switch (sync) { case SYNC_STATE_DEAD: /* CPU is properly dead */ + atomic_set(st, SYNC_STATE_KICKED); break; case SYNC_STATE_KICKED: /* CPU did not come up in previous attempt */ break; case SYNC_STATE_ALIVE: /* CPU is stuck cpuhp_ap_sync_alive(). */ + if (!atomic_try_cmpxchg_relaxed(st, &sync, SYNC_STATE_KICKED)) + goto again; break; default: /* CPU failed to report online or dead and is in limbo state. */ return false; } - /* Prepare for booting */ - if (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_KICKED)) - goto again; - + /* Continue with booting */ return true; } @@ -1785,7 +1790,8 @@ static void __init cpuhp_bringup_mask(const struct cpumask *mask, unsigned int n for_each_cpu(cpu, mask) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); - if (cpu_up(cpu, target) && can_rollback_cpu(st)) { + if (!cpu_online(cpu) && cpu_up(cpu, target) && + can_rollback_cpu(st)) { /* * If this failed then cpu_up() might have only * rolled back to CPUHP_BP_KICK_AP for the final |