// SPDX-License-Identifier: GPL-2.0-or-later1/*2* coupled.c - helper functions to enter the same idle state on multiple cpus3*4* Copyright (c) 2011 Google, Inc.5*6* Author: Colin Cross <[email protected]>7*/89#include <linux/kernel.h>10#include <linux/cpu.h>11#include <linux/cpuidle.h>12#include <linux/mutex.h>13#include <linux/sched.h>14#include <linux/slab.h>15#include <linux/spinlock.h>1617#include "cpuidle.h"1819/**20* DOC: Coupled cpuidle states21*22* On some ARM SMP SoCs (OMAP4460, Tegra 2, and probably more), the23* cpus cannot be independently powered down, either due to24* sequencing restrictions (on Tegra 2, cpu 0 must be the last to25* power down), or due to HW bugs (on OMAP4460, a cpu powering up26* will corrupt the gic state unless the other cpu runs a work27* around). Each cpu has a power state that it can enter without28* coordinating with the other cpu (usually Wait For Interrupt, or29* WFI), and one or more "coupled" power states that affect blocks30* shared between the cpus (L2 cache, interrupt controller, and31* sometimes the whole SoC). Entering a coupled power state must32* be tightly controlled on both cpus.33*34* This file implements a solution, where each cpu will wait in the35* WFI state until all cpus are ready to enter a coupled state, at36* which point the coupled state function will be called on all37* cpus at approximately the same time.38*39* Once all cpus are ready to enter idle, they are woken by an smp40* cross call. At this point, there is a chance that one of the41* cpus will find work to do, and choose not to enter idle. A42* final pass is needed to guarantee that all cpus will call the43* power state enter function at the same time. During this pass,44* each cpu will increment the ready counter, and continue once the45* ready counter matches the number of online coupled cpus. If any46* cpu exits idle, the other cpus will decrement their counter and47* retry.48*49* requested_state stores the deepest coupled idle state each cpu50* is ready for. It is assumed that the states are indexed from51* shallowest (highest power, lowest exit latency) to deepest52* (lowest power, highest exit latency). The requested_state53* variable is not locked. It is only written from the cpu that54* it stores (or by the on/offlining cpu if that cpu is offline),55* and only read after all the cpus are ready for the coupled idle56* state are no longer updating it.57*58* Three atomic counters are used. alive_count tracks the number59* of cpus in the coupled set that are currently or soon will be60* online. waiting_count tracks the number of cpus that are in61* the waiting loop, in the ready loop, or in the coupled idle state.62* ready_count tracks the number of cpus that are in the ready loop63* or in the coupled idle state.64*65* To use coupled cpuidle states, a cpuidle driver must:66*67* Set struct cpuidle_device.coupled_cpus to the mask of all68* coupled cpus, usually the same as cpu_possible_mask if all cpus69* are part of the same cluster. The coupled_cpus mask must be70* set in the struct cpuidle_device for each cpu.71*72* Set struct cpuidle_device.safe_state to a state that is not a73* coupled state. This is usually WFI.74*75* Set CPUIDLE_FLAG_COUPLED in struct cpuidle_state.flags for each76* state that affects multiple cpus.77*78* Provide a struct cpuidle_state.enter function for each state79* that affects multiple cpus. This function is guaranteed to be80* called on all cpus at approximately the same time. The driver81* should ensure that the cpus all abort together if any cpu tries82* to abort once the function is called. The function should return83* with interrupts still disabled.84*/8586/**87* struct cpuidle_coupled - data for set of cpus that share a coupled idle state88* @coupled_cpus: mask of cpus that are part of the coupled set89* @requested_state: array of requested states for cpus in the coupled set90* @ready_waiting_counts: combined count of cpus in ready or waiting loops91* @abort_barrier: synchronisation point for abort cases92* @online_count: count of cpus that are online93* @refcnt: reference count of cpuidle devices that are using this struct94* @prevent: flag to prevent coupled idle while a cpu is hotplugging95*/96struct cpuidle_coupled {97cpumask_t coupled_cpus;98int requested_state[NR_CPUS];99atomic_t ready_waiting_counts;100atomic_t abort_barrier;101int online_count;102int refcnt;103int prevent;104};105106#define WAITING_BITS 16107#define MAX_WAITING_CPUS (1 << WAITING_BITS)108#define WAITING_MASK (MAX_WAITING_CPUS - 1)109#define READY_MASK (~WAITING_MASK)110111#define CPUIDLE_COUPLED_NOT_IDLE (-1)112113static DEFINE_PER_CPU(call_single_data_t, cpuidle_coupled_poke_cb);114115/*116* The cpuidle_coupled_poke_pending mask is used to avoid calling117* __smp_call_function_single with the per cpu call_single_data_t struct already118* in use. This prevents a deadlock where two cpus are waiting for each others119* call_single_data_t struct to be available120*/121static cpumask_t cpuidle_coupled_poke_pending;122123/*124* The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked125* once to minimize entering the ready loop with a poke pending, which would126* require aborting and retrying.127*/128static cpumask_t cpuidle_coupled_poked;129130/**131* cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus132* @dev: cpuidle_device of the calling cpu133* @a: atomic variable to hold the barrier134*135* No caller to this function will return from this function until all online136* cpus in the same coupled group have called this function. Once any caller137* has returned from this function, the barrier is immediately available for138* reuse.139*140* The atomic variable must be initialized to 0 before any cpu calls141* this function, will be reset to 0 before any cpu returns from this function.142*143* Must only be called from within a coupled idle state handler144* (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set).145*146* Provides full smp barrier semantics before and after calling.147*/148void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)149{150int n = dev->coupled->online_count;151152smp_mb__before_atomic();153atomic_inc(a);154155while (atomic_read(a) < n)156cpu_relax();157158if (atomic_inc_return(a) == n * 2) {159atomic_set(a, 0);160return;161}162163while (atomic_read(a) > n)164cpu_relax();165}166167/**168* cpuidle_state_is_coupled - check if a state is part of a coupled set169* @drv: struct cpuidle_driver for the platform170* @state: index of the target state in drv->states171*172* Returns true if the target state is coupled with cpus besides this one173*/174bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)175{176return drv->states[state].flags & CPUIDLE_FLAG_COUPLED;177}178179/**180* cpuidle_coupled_state_verify - check if the coupled states are correctly set.181* @drv: struct cpuidle_driver for the platform182*183* Returns 0 for valid state values, a negative error code otherwise:184* * -EINVAL if any coupled state(safe_state_index) is wrongly set.185*/186int cpuidle_coupled_state_verify(struct cpuidle_driver *drv)187{188int i;189190for (i = drv->state_count - 1; i >= 0; i--) {191if (cpuidle_state_is_coupled(drv, i) &&192(drv->safe_state_index == i ||193drv->safe_state_index < 0 ||194drv->safe_state_index >= drv->state_count))195return -EINVAL;196}197198return 0;199}200201/**202* cpuidle_coupled_set_ready - mark a cpu as ready203* @coupled: the struct coupled that contains the current cpu204*/205static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled)206{207atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);208}209210/**211* cpuidle_coupled_set_not_ready - mark a cpu as not ready212* @coupled: the struct coupled that contains the current cpu213*214* Decrements the ready counter, unless the ready (and thus the waiting) counter215* is equal to the number of online cpus. Prevents a race where one cpu216* decrements the waiting counter and then re-increments it just before another217* cpu has decremented its ready counter, leading to the ready counter going218* down from the number of online cpus without going through the coupled idle219* state.220*221* Returns 0 if the counter was decremented successfully, -EINVAL if the ready222* counter was equal to the number of online cpus.223*/224static225inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled)226{227int all;228int ret;229230all = coupled->online_count | (coupled->online_count << WAITING_BITS);231ret = atomic_add_unless(&coupled->ready_waiting_counts,232-MAX_WAITING_CPUS, all);233234return ret ? 0 : -EINVAL;235}236237/**238* cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready239* @coupled: the struct coupled that contains the current cpu240*241* Returns true if all of the cpus in a coupled set are out of the ready loop.242*/243static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled)244{245int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;246return r == 0;247}248249/**250* cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready251* @coupled: the struct coupled that contains the current cpu252*253* Returns true if all cpus coupled to this target state are in the ready loop254*/255static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled)256{257int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;258return r == coupled->online_count;259}260261/**262* cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting263* @coupled: the struct coupled that contains the current cpu264*265* Returns true if all cpus coupled to this target state are in the wait loop266*/267static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled)268{269int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;270return w == coupled->online_count;271}272273/**274* cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting275* @coupled: the struct coupled that contains the current cpu276*277* Returns true if all of the cpus in a coupled set are out of the waiting loop.278*/279static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled)280{281int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;282return w == 0;283}284285/**286* cpuidle_coupled_get_state - determine the deepest idle state287* @dev: struct cpuidle_device for this cpu288* @coupled: the struct coupled that contains the current cpu289*290* Returns the deepest idle state that all coupled cpus can enter291*/292static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,293struct cpuidle_coupled *coupled)294{295int i;296int state = INT_MAX;297298/*299* Read barrier ensures that read of requested_state is ordered after300* reads of ready_count. Matches the write barriers301* cpuidle_set_state_waiting.302*/303smp_rmb();304305for_each_cpu(i, &coupled->coupled_cpus)306if (cpu_online(i) && coupled->requested_state[i] < state)307state = coupled->requested_state[i];308309return state;310}311312static void cpuidle_coupled_handle_poke(void *info)313{314int cpu = (unsigned long)info;315cpumask_set_cpu(cpu, &cpuidle_coupled_poked);316cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending);317}318319/**320* cpuidle_coupled_poke - wake up a cpu that may be waiting321* @cpu: target cpu322*323* Ensures that the target cpu exits it's waiting idle state (if it is in it)324* and will see updates to waiting_count before it re-enters it's waiting idle325* state.326*327* If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu328* either has or will soon have a pending IPI that will wake it out of idle,329* or it is currently processing the IPI and is not in idle.330*/331static void cpuidle_coupled_poke(int cpu)332{333call_single_data_t *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);334335if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))336smp_call_function_single_async(cpu, csd);337}338339/**340* cpuidle_coupled_poke_others - wake up all other cpus that may be waiting341* @this_cpu: target cpu342* @coupled: the struct coupled that contains the current cpu343*344* Calls cpuidle_coupled_poke on all other online cpus.345*/346static void cpuidle_coupled_poke_others(int this_cpu,347struct cpuidle_coupled *coupled)348{349int cpu;350351for_each_cpu(cpu, &coupled->coupled_cpus)352if (cpu != this_cpu && cpu_online(cpu))353cpuidle_coupled_poke(cpu);354}355356/**357* cpuidle_coupled_set_waiting - mark this cpu as in the wait loop358* @cpu: target cpu359* @coupled: the struct coupled that contains the current cpu360* @next_state: the index in drv->states of the requested state for this cpu361*362* Updates the requested idle state for the specified cpuidle device.363* Returns the number of waiting cpus.364*/365static int cpuidle_coupled_set_waiting(int cpu,366struct cpuidle_coupled *coupled, int next_state)367{368coupled->requested_state[cpu] = next_state;369370/*371* The atomic_inc_return provides a write barrier to order the write372* to requested_state with the later write that increments ready_count.373*/374return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;375}376377/**378* cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop379* @cpu: target cpu380* @coupled: the struct coupled that contains the current cpu381*382* Removes the requested idle state for the specified cpuidle device.383*/384static void cpuidle_coupled_set_not_waiting(int cpu,385struct cpuidle_coupled *coupled)386{387/*388* Decrementing waiting count can race with incrementing it in389* cpuidle_coupled_set_waiting, but that's OK. Worst case, some390* cpus will increment ready_count and then spin until they391* notice that this cpu has cleared it's requested_state.392*/393atomic_dec(&coupled->ready_waiting_counts);394395coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE;396}397398/**399* cpuidle_coupled_set_done - mark this cpu as leaving the ready loop400* @cpu: the current cpu401* @coupled: the struct coupled that contains the current cpu402*403* Marks this cpu as no longer in the ready and waiting loops. Decrements404* the waiting count first to prevent another cpu looping back in and seeing405* this cpu as waiting just before it exits idle.406*/407static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)408{409cpuidle_coupled_set_not_waiting(cpu, coupled);410atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);411}412413/**414* cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed415* @cpu: this cpu416*417* Turns on interrupts and spins until any outstanding poke interrupts have418* been processed and the poke bit has been cleared.419*420* Other interrupts may also be processed while interrupts are enabled, so421* need_resched() must be tested after this function returns to make sure422* the interrupt didn't schedule work that should take the cpu out of idle.423*424* Returns 0 if no poke was pending, 1 if a poke was cleared.425*/426static int cpuidle_coupled_clear_pokes(int cpu)427{428if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))429return 0;430431local_irq_enable();432while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))433cpu_relax();434local_irq_disable();435436return 1;437}438439static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled)440{441return cpumask_first_and_and(cpu_online_mask, &coupled->coupled_cpus,442&cpuidle_coupled_poke_pending) < nr_cpu_ids;443}444445/**446* cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus447* @dev: struct cpuidle_device for the current cpu448* @drv: struct cpuidle_driver for the platform449* @next_state: index of the requested state in drv->states450*451* Coordinate with coupled cpus to enter the target state. This is a two452* stage process. In the first stage, the cpus are operating independently,453* and may call into cpuidle_enter_state_coupled at completely different times.454* To save as much power as possible, the first cpus to call this function will455* go to an intermediate state (the cpuidle_device's safe state), and wait for456* all the other cpus to call this function. Once all coupled cpus are idle,457* the second stage will start. Each coupled cpu will spin until all cpus have458* guaranteed that they will call the target_state.459*460* This function must be called with interrupts disabled. It may enable461* interrupts while preparing for idle, and it will always return with462* interrupts enabled.463*/464int cpuidle_enter_state_coupled(struct cpuidle_device *dev,465struct cpuidle_driver *drv, int next_state)466{467int entered_state = -1;468struct cpuidle_coupled *coupled = dev->coupled;469int w;470471if (!coupled)472return -EINVAL;473474while (coupled->prevent) {475cpuidle_coupled_clear_pokes(dev->cpu);476if (need_resched()) {477local_irq_enable();478return entered_state;479}480entered_state = cpuidle_enter_state(dev, drv,481drv->safe_state_index);482local_irq_disable();483}484485/* Read barrier ensures online_count is read after prevent is cleared */486smp_rmb();487488reset:489cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked);490491w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);492/*493* If this is the last cpu to enter the waiting state, poke494* all the other cpus out of their waiting state so they can495* enter a deeper state. This can race with one of the cpus496* exiting the waiting state due to an interrupt and497* decrementing waiting_count, see comment below.498*/499if (w == coupled->online_count) {500cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked);501cpuidle_coupled_poke_others(dev->cpu, coupled);502}503504retry:505/*506* Wait for all coupled cpus to be idle, using the deepest state507* allowed for a single cpu. If this was not the poking cpu, wait508* for at least one poke before leaving to avoid a race where509* two cpus could arrive at the waiting loop at the same time,510* but the first of the two to arrive could skip the loop without511* processing the pokes from the last to arrive.512*/513while (!cpuidle_coupled_cpus_waiting(coupled) ||514!cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) {515if (cpuidle_coupled_clear_pokes(dev->cpu))516continue;517518if (need_resched()) {519cpuidle_coupled_set_not_waiting(dev->cpu, coupled);520goto out;521}522523if (coupled->prevent) {524cpuidle_coupled_set_not_waiting(dev->cpu, coupled);525goto out;526}527528entered_state = cpuidle_enter_state(dev, drv,529drv->safe_state_index);530local_irq_disable();531}532533cpuidle_coupled_clear_pokes(dev->cpu);534if (need_resched()) {535cpuidle_coupled_set_not_waiting(dev->cpu, coupled);536goto out;537}538539/*540* Make sure final poke status for this cpu is visible before setting541* cpu as ready.542*/543smp_wmb();544545/*546* All coupled cpus are probably idle. There is a small chance that547* one of the other cpus just became active. Increment the ready count,548* and spin until all coupled cpus have incremented the counter. Once a549* cpu has incremented the ready counter, it cannot abort idle and must550* spin until either all cpus have incremented the ready counter, or551* another cpu leaves idle and decrements the waiting counter.552*/553554cpuidle_coupled_set_ready(coupled);555while (!cpuidle_coupled_cpus_ready(coupled)) {556/* Check if any other cpus bailed out of idle. */557if (!cpuidle_coupled_cpus_waiting(coupled))558if (!cpuidle_coupled_set_not_ready(coupled))559goto retry;560561cpu_relax();562}563564/*565* Make sure read of all cpus ready is done before reading pending pokes566*/567smp_rmb();568569/*570* There is a small chance that a cpu left and reentered idle after this571* cpu saw that all cpus were waiting. The cpu that reentered idle will572* have sent this cpu a poke, which will still be pending after the573* ready loop. The pending interrupt may be lost by the interrupt574* controller when entering the deep idle state. It's not possible to575* clear a pending interrupt without turning interrupts on and handling576* it, and it's too late to turn on interrupts here, so reset the577* coupled idle state of all cpus and retry.578*/579if (cpuidle_coupled_any_pokes_pending(coupled)) {580cpuidle_coupled_set_done(dev->cpu, coupled);581/* Wait for all cpus to see the pending pokes */582cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier);583goto reset;584}585586/* all cpus have acked the coupled state */587next_state = cpuidle_coupled_get_state(dev, coupled);588589entered_state = cpuidle_enter_state(dev, drv, next_state);590591cpuidle_coupled_set_done(dev->cpu, coupled);592593out:594/*595* Normal cpuidle states are expected to return with irqs enabled.596* That leads to an inefficiency where a cpu receiving an interrupt597* that brings it out of idle will process that interrupt before598* exiting the idle enter function and decrementing ready_count. All599* other cpus will need to spin waiting for the cpu that is processing600* the interrupt. If the driver returns with interrupts disabled,601* all other cpus will loop back into the safe idle state instead of602* spinning, saving power.603*604* Calling local_irq_enable here allows coupled states to return with605* interrupts disabled, but won't cause problems for drivers that606* exit with interrupts enabled.607*/608local_irq_enable();609610/*611* Wait until all coupled cpus have exited idle. There is no risk that612* a cpu exits and re-enters the ready state because this cpu has613* already decremented its waiting_count.614*/615while (!cpuidle_coupled_no_cpus_ready(coupled))616cpu_relax();617618return entered_state;619}620621static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled)622{623coupled->online_count = cpumask_weight_and(cpu_online_mask, &coupled->coupled_cpus);624}625626/**627* cpuidle_coupled_register_device - register a coupled cpuidle device628* @dev: struct cpuidle_device for the current cpu629*630* Called from cpuidle_register_device to handle coupled idle init. Finds the631* cpuidle_coupled struct for this set of coupled cpus, or creates one if none632* exists yet.633*/634int cpuidle_coupled_register_device(struct cpuidle_device *dev)635{636int cpu;637struct cpuidle_device *other_dev;638call_single_data_t *csd;639struct cpuidle_coupled *coupled;640641if (cpumask_empty(&dev->coupled_cpus))642return 0;643644for_each_cpu(cpu, &dev->coupled_cpus) {645other_dev = per_cpu(cpuidle_devices, cpu);646if (other_dev && other_dev->coupled) {647coupled = other_dev->coupled;648goto have_coupled;649}650}651652/* No existing coupled info found, create a new one */653coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL);654if (!coupled)655return -ENOMEM;656657coupled->coupled_cpus = dev->coupled_cpus;658659have_coupled:660dev->coupled = coupled;661if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus)))662coupled->prevent++;663664cpuidle_coupled_update_online_cpus(coupled);665666coupled->refcnt++;667668csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);669INIT_CSD(csd, cpuidle_coupled_handle_poke, (void *)(unsigned long)dev->cpu);670671return 0;672}673674/**675* cpuidle_coupled_unregister_device - unregister a coupled cpuidle device676* @dev: struct cpuidle_device for the current cpu677*678* Called from cpuidle_unregister_device to tear down coupled idle. Removes the679* cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if680* this was the last cpu in the set.681*/682void cpuidle_coupled_unregister_device(struct cpuidle_device *dev)683{684struct cpuidle_coupled *coupled = dev->coupled;685686if (cpumask_empty(&dev->coupled_cpus))687return;688689if (--coupled->refcnt)690kfree(coupled);691dev->coupled = NULL;692}693694/**695* cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state696* @coupled: the struct coupled that contains the cpu that is changing state697*698* Disables coupled cpuidle on a coupled set of cpus. Used to ensure that699* cpu_online_mask doesn't change while cpus are coordinating coupled idle.700*/701static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled)702{703int cpu = get_cpu();704705/* Force all cpus out of the waiting loop. */706coupled->prevent++;707cpuidle_coupled_poke_others(cpu, coupled);708put_cpu();709while (!cpuidle_coupled_no_cpus_waiting(coupled))710cpu_relax();711}712713/**714* cpuidle_coupled_allow_idle - allows cpus to enter a coupled state715* @coupled: the struct coupled that contains the cpu that is changing state716*717* Enables coupled cpuidle on a coupled set of cpus. Used to ensure that718* cpu_online_mask doesn't change while cpus are coordinating coupled idle.719*/720static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled)721{722int cpu = get_cpu();723724/*725* Write barrier ensures readers see the new online_count when they726* see prevent == 0.727*/728smp_wmb();729coupled->prevent--;730/* Force cpus out of the prevent loop. */731cpuidle_coupled_poke_others(cpu, coupled);732put_cpu();733}734735static int coupled_cpu_online(unsigned int cpu)736{737struct cpuidle_device *dev;738739mutex_lock(&cpuidle_lock);740741dev = per_cpu(cpuidle_devices, cpu);742if (dev && dev->coupled) {743cpuidle_coupled_update_online_cpus(dev->coupled);744cpuidle_coupled_allow_idle(dev->coupled);745}746747mutex_unlock(&cpuidle_lock);748return 0;749}750751static int coupled_cpu_up_prepare(unsigned int cpu)752{753struct cpuidle_device *dev;754755mutex_lock(&cpuidle_lock);756757dev = per_cpu(cpuidle_devices, cpu);758if (dev && dev->coupled)759cpuidle_coupled_prevent_idle(dev->coupled);760761mutex_unlock(&cpuidle_lock);762return 0;763}764765static int __init cpuidle_coupled_init(void)766{767int ret;768769ret = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_COUPLED_PREPARE,770"cpuidle/coupled:prepare",771coupled_cpu_up_prepare,772coupled_cpu_online);773if (ret)774return ret;775ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,776"cpuidle/coupled:online",777coupled_cpu_online,778coupled_cpu_up_prepare);779if (ret < 0)780cpuhp_remove_state_nocalls(CPUHP_CPUIDLE_COUPLED_PREPARE);781return ret;782}783core_initcall(cpuidle_coupled_init);784785786