Path: blob/master/drivers/hwspinlock/hwspinlock_core.c
15109 views
/*1* Hardware spinlock framework2*3* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com4*5* Contact: Ohad Ben-Cohen <[email protected]>6*7* This program is free software; you can redistribute it and/or modify it8* under the terms of the GNU General Public License version 2 as published9* by the Free Software Foundation.10*11* This program is distributed in the hope that it will be useful,12* but WITHOUT ANY WARRANTY; without even the implied warranty of13* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the14* GNU General Public License for more details.15*/1617#define pr_fmt(fmt) "%s: " fmt, __func__1819#include <linux/kernel.h>20#include <linux/module.h>21#include <linux/spinlock.h>22#include <linux/types.h>23#include <linux/err.h>24#include <linux/jiffies.h>25#include <linux/radix-tree.h>26#include <linux/hwspinlock.h>27#include <linux/pm_runtime.h>2829#include "hwspinlock_internal.h"3031/* radix tree tags */32#define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */3334/*35* A radix tree is used to maintain the available hwspinlock instances.36* The tree associates hwspinlock pointers with their integer key id,37* and provides easy-to-use API which makes the hwspinlock core code simple38* and easy to read.39*40* Radix trees are quick on lookups, and reasonably efficient in terms of41* storage, especially with high density usages such as this framework42* requires (a continuous range of integer keys, beginning with zero, is43* used as the ID's of the hwspinlock instances).44*45* The radix tree API supports tagging items in the tree, which this46* framework uses to mark unused hwspinlock instances (see the47* HWSPINLOCK_UNUSED tag above). As a result, the process of querying the48* tree, looking for an unused hwspinlock instance, is now reduced to a49* single radix tree API call.50*/51static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);5253/*54* Synchronization of access to the tree is achieved using this spinlock,55* as the radix-tree API requires that users provide all synchronisation.56*/57static DEFINE_SPINLOCK(hwspinlock_tree_lock);5859/**60* __hwspin_trylock() - attempt to lock a specific hwspinlock61* @hwlock: an hwspinlock which we want to trylock62* @mode: controls whether local interrupts are disabled or not63* @flags: a pointer where the caller's interrupt state will be saved at (if64* requested)65*66* This function attempts to lock an hwspinlock, and will immediately67* fail if the hwspinlock is already taken.68*69* Upon a successful return from this function, preemption (and possibly70* interrupts) is disabled, so the caller must not sleep, and is advised to71* release the hwspinlock as soon as possible. This is required in order to72* minimize remote cores polling on the hardware interconnect.73*74* The user decides whether local interrupts are disabled or not, and if yes,75* whether he wants their previous state to be saved. It is up to the user76* to choose the appropriate @mode of operation, exactly the same way users77* should decide between spin_trylock, spin_trylock_irq and78* spin_trylock_irqsave.79*80* Returns 0 if we successfully locked the hwspinlock or -EBUSY if81* the hwspinlock was already taken.82* This function will never sleep.83*/84int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)85{86int ret;8788BUG_ON(!hwlock);89BUG_ON(!flags && mode == HWLOCK_IRQSTATE);9091/*92* This spin_lock{_irq, _irqsave} serves three purposes:93*94* 1. Disable preemption, in order to minimize the period of time95* in which the hwspinlock is taken. This is important in order96* to minimize the possible polling on the hardware interconnect97* by a remote user of this lock.98* 2. Make the hwspinlock SMP-safe (so we can take it from99* additional contexts on the local host).100* 3. Ensure that in_atomic/might_sleep checks catch potential101* problems with hwspinlock usage (e.g. scheduler checks like102* 'scheduling while atomic' etc.)103*/104if (mode == HWLOCK_IRQSTATE)105ret = spin_trylock_irqsave(&hwlock->lock, *flags);106else if (mode == HWLOCK_IRQ)107ret = spin_trylock_irq(&hwlock->lock);108else109ret = spin_trylock(&hwlock->lock);110111/* is lock already taken by another context on the local cpu ? */112if (!ret)113return -EBUSY;114115/* try to take the hwspinlock device */116ret = hwlock->ops->trylock(hwlock);117118/* if hwlock is already taken, undo spin_trylock_* and exit */119if (!ret) {120if (mode == HWLOCK_IRQSTATE)121spin_unlock_irqrestore(&hwlock->lock, *flags);122else if (mode == HWLOCK_IRQ)123spin_unlock_irq(&hwlock->lock);124else125spin_unlock(&hwlock->lock);126127return -EBUSY;128}129130/*131* We can be sure the other core's memory operations132* are observable to us only _after_ we successfully take133* the hwspinlock, and we must make sure that subsequent memory134* operations (both reads and writes) will not be reordered before135* we actually took the hwspinlock.136*137* Note: the implicit memory barrier of the spinlock above is too138* early, so we need this additional explicit memory barrier.139*/140mb();141142return 0;143}144EXPORT_SYMBOL_GPL(__hwspin_trylock);145146/**147* __hwspin_lock_timeout() - lock an hwspinlock with timeout limit148* @hwlock: the hwspinlock to be locked149* @timeout: timeout value in msecs150* @mode: mode which controls whether local interrupts are disabled or not151* @flags: a pointer to where the caller's interrupt state will be saved at (if152* requested)153*154* This function locks the given @hwlock. If the @hwlock155* is already taken, the function will busy loop waiting for it to156* be released, but give up after @timeout msecs have elapsed.157*158* Upon a successful return from this function, preemption is disabled159* (and possibly local interrupts, too), so the caller must not sleep,160* and is advised to release the hwspinlock as soon as possible.161* This is required in order to minimize remote cores polling on the162* hardware interconnect.163*164* The user decides whether local interrupts are disabled or not, and if yes,165* whether he wants their previous state to be saved. It is up to the user166* to choose the appropriate @mode of operation, exactly the same way users167* should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.168*169* Returns 0 when the @hwlock was successfully taken, and an appropriate170* error code otherwise (most notably -ETIMEDOUT if the @hwlock is still171* busy after @timeout msecs). The function will never sleep.172*/173int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,174int mode, unsigned long *flags)175{176int ret;177unsigned long expire;178179expire = msecs_to_jiffies(to) + jiffies;180181for (;;) {182/* Try to take the hwspinlock */183ret = __hwspin_trylock(hwlock, mode, flags);184if (ret != -EBUSY)185break;186187/*188* The lock is already taken, let's check if the user wants189* us to try again190*/191if (time_is_before_eq_jiffies(expire))192return -ETIMEDOUT;193194/*195* Allow platform-specific relax handlers to prevent196* hogging the interconnect (no sleeping, though)197*/198if (hwlock->ops->relax)199hwlock->ops->relax(hwlock);200}201202return ret;203}204EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);205206/**207* __hwspin_unlock() - unlock a specific hwspinlock208* @hwlock: a previously-acquired hwspinlock which we want to unlock209* @mode: controls whether local interrupts needs to be restored or not210* @flags: previous caller's interrupt state to restore (if requested)211*212* This function will unlock a specific hwspinlock, enable preemption and213* (possibly) enable interrupts or restore their previous state.214* @hwlock must be already locked before calling this function: it is a bug215* to call unlock on a @hwlock that is already unlocked.216*217* The user decides whether local interrupts should be enabled or not, and218* if yes, whether he wants their previous state to be restored. It is up219* to the user to choose the appropriate @mode of operation, exactly the220* same way users decide between spin_unlock, spin_unlock_irq and221* spin_unlock_irqrestore.222*223* The function will never sleep.224*/225void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)226{227BUG_ON(!hwlock);228BUG_ON(!flags && mode == HWLOCK_IRQSTATE);229230/*231* We must make sure that memory operations (both reads and writes),232* done before unlocking the hwspinlock, will not be reordered233* after the lock is released.234*235* That's the purpose of this explicit memory barrier.236*237* Note: the memory barrier induced by the spin_unlock below is too238* late; the other core is going to access memory soon after it will239* take the hwspinlock, and by then we want to be sure our memory240* operations are already observable.241*/242mb();243244hwlock->ops->unlock(hwlock);245246/* Undo the spin_trylock{_irq, _irqsave} called while locking */247if (mode == HWLOCK_IRQSTATE)248spin_unlock_irqrestore(&hwlock->lock, *flags);249else if (mode == HWLOCK_IRQ)250spin_unlock_irq(&hwlock->lock);251else252spin_unlock(&hwlock->lock);253}254EXPORT_SYMBOL_GPL(__hwspin_unlock);255256/**257* hwspin_lock_register() - register a new hw spinlock258* @hwlock: hwspinlock to register.259*260* This function should be called from the underlying platform-specific261* implementation, to register a new hwspinlock instance.262*263* Can be called from an atomic context (will not sleep) but not from264* within interrupt context.265*266* Returns 0 on success, or an appropriate error code on failure267*/268int hwspin_lock_register(struct hwspinlock *hwlock)269{270struct hwspinlock *tmp;271int ret;272273if (!hwlock || !hwlock->ops ||274!hwlock->ops->trylock || !hwlock->ops->unlock) {275pr_err("invalid parameters\n");276return -EINVAL;277}278279spin_lock_init(&hwlock->lock);280281spin_lock(&hwspinlock_tree_lock);282283ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock);284if (ret)285goto out;286287/* mark this hwspinlock as available */288tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,289HWSPINLOCK_UNUSED);290291/* self-sanity check which should never fail */292WARN_ON(tmp != hwlock);293294out:295spin_unlock(&hwspinlock_tree_lock);296return ret;297}298EXPORT_SYMBOL_GPL(hwspin_lock_register);299300/**301* hwspin_lock_unregister() - unregister an hw spinlock302* @id: index of the specific hwspinlock to unregister303*304* This function should be called from the underlying platform-specific305* implementation, to unregister an existing (and unused) hwspinlock.306*307* Can be called from an atomic context (will not sleep) but not from308* within interrupt context.309*310* Returns the address of hwspinlock @id on success, or NULL on failure311*/312struct hwspinlock *hwspin_lock_unregister(unsigned int id)313{314struct hwspinlock *hwlock = NULL;315int ret;316317spin_lock(&hwspinlock_tree_lock);318319/* make sure the hwspinlock is not in use (tag is set) */320ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);321if (ret == 0) {322pr_err("hwspinlock %d still in use (or not present)\n", id);323goto out;324}325326hwlock = radix_tree_delete(&hwspinlock_tree, id);327if (!hwlock) {328pr_err("failed to delete hwspinlock %d\n", id);329goto out;330}331332out:333spin_unlock(&hwspinlock_tree_lock);334return hwlock;335}336EXPORT_SYMBOL_GPL(hwspin_lock_unregister);337338/**339* __hwspin_lock_request() - tag an hwspinlock as used and power it up340*341* This is an internal function that prepares an hwspinlock instance342* before it is given to the user. The function assumes that343* hwspinlock_tree_lock is taken.344*345* Returns 0 or positive to indicate success, and a negative value to346* indicate an error (with the appropriate error code)347*/348static int __hwspin_lock_request(struct hwspinlock *hwlock)349{350struct hwspinlock *tmp;351int ret;352353/* prevent underlying implementation from being removed */354if (!try_module_get(hwlock->owner)) {355dev_err(hwlock->dev, "%s: can't get owner\n", __func__);356return -EINVAL;357}358359/* notify PM core that power is now needed */360ret = pm_runtime_get_sync(hwlock->dev);361if (ret < 0) {362dev_err(hwlock->dev, "%s: can't power on device\n", __func__);363return ret;364}365366/* mark hwspinlock as used, should not fail */367tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock->id,368HWSPINLOCK_UNUSED);369370/* self-sanity check that should never fail */371WARN_ON(tmp != hwlock);372373return ret;374}375376/**377* hwspin_lock_get_id() - retrieve id number of a given hwspinlock378* @hwlock: a valid hwspinlock instance379*380* Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.381*/382int hwspin_lock_get_id(struct hwspinlock *hwlock)383{384if (!hwlock) {385pr_err("invalid hwlock\n");386return -EINVAL;387}388389return hwlock->id;390}391EXPORT_SYMBOL_GPL(hwspin_lock_get_id);392393/**394* hwspin_lock_request() - request an hwspinlock395*396* This function should be called by users of the hwspinlock device,397* in order to dynamically assign them an unused hwspinlock.398* Usually the user of this lock will then have to communicate the lock's id399* to the remote core before it can be used for synchronization (to get the400* id of a given hwlock, use hwspin_lock_get_id()).401*402* Can be called from an atomic context (will not sleep) but not from403* within interrupt context (simply because there is no use case for404* that yet).405*406* Returns the address of the assigned hwspinlock, or NULL on error407*/408struct hwspinlock *hwspin_lock_request(void)409{410struct hwspinlock *hwlock;411int ret;412413spin_lock(&hwspinlock_tree_lock);414415/* look for an unused lock */416ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,4170, 1, HWSPINLOCK_UNUSED);418if (ret == 0) {419pr_warn("a free hwspinlock is not available\n");420hwlock = NULL;421goto out;422}423424/* sanity check that should never fail */425WARN_ON(ret > 1);426427/* mark as used and power up */428ret = __hwspin_lock_request(hwlock);429if (ret < 0)430hwlock = NULL;431432out:433spin_unlock(&hwspinlock_tree_lock);434return hwlock;435}436EXPORT_SYMBOL_GPL(hwspin_lock_request);437438/**439* hwspin_lock_request_specific() - request for a specific hwspinlock440* @id: index of the specific hwspinlock that is requested441*442* This function should be called by users of the hwspinlock module,443* in order to assign them a specific hwspinlock.444* Usually early board code will be calling this function in order to445* reserve specific hwspinlock ids for predefined purposes.446*447* Can be called from an atomic context (will not sleep) but not from448* within interrupt context (simply because there is no use case for449* that yet).450*451* Returns the address of the assigned hwspinlock, or NULL on error452*/453struct hwspinlock *hwspin_lock_request_specific(unsigned int id)454{455struct hwspinlock *hwlock;456int ret;457458spin_lock(&hwspinlock_tree_lock);459460/* make sure this hwspinlock exists */461hwlock = radix_tree_lookup(&hwspinlock_tree, id);462if (!hwlock) {463pr_warn("hwspinlock %u does not exist\n", id);464goto out;465}466467/* sanity check (this shouldn't happen) */468WARN_ON(hwlock->id != id);469470/* make sure this hwspinlock is unused */471ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);472if (ret == 0) {473pr_warn("hwspinlock %u is already in use\n", id);474hwlock = NULL;475goto out;476}477478/* mark as used and power up */479ret = __hwspin_lock_request(hwlock);480if (ret < 0)481hwlock = NULL;482483out:484spin_unlock(&hwspinlock_tree_lock);485return hwlock;486}487EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);488489/**490* hwspin_lock_free() - free a specific hwspinlock491* @hwlock: the specific hwspinlock to free492*493* This function mark @hwlock as free again.494* Should only be called with an @hwlock that was retrieved from495* an earlier call to omap_hwspin_lock_request{_specific}.496*497* Can be called from an atomic context (will not sleep) but not from498* within interrupt context (simply because there is no use case for499* that yet).500*501* Returns 0 on success, or an appropriate error code on failure502*/503int hwspin_lock_free(struct hwspinlock *hwlock)504{505struct hwspinlock *tmp;506int ret;507508if (!hwlock) {509pr_err("invalid hwlock\n");510return -EINVAL;511}512513spin_lock(&hwspinlock_tree_lock);514515/* make sure the hwspinlock is used */516ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id,517HWSPINLOCK_UNUSED);518if (ret == 1) {519dev_err(hwlock->dev, "%s: hwlock is already free\n", __func__);520dump_stack();521ret = -EINVAL;522goto out;523}524525/* notify the underlying device that power is not needed */526ret = pm_runtime_put(hwlock->dev);527if (ret < 0)528goto out;529530/* mark this hwspinlock as available */531tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,532HWSPINLOCK_UNUSED);533534/* sanity check (this shouldn't happen) */535WARN_ON(tmp != hwlock);536537module_put(hwlock->owner);538539out:540spin_unlock(&hwspinlock_tree_lock);541return ret;542}543EXPORT_SYMBOL_GPL(hwspin_lock_free);544545MODULE_LICENSE("GPL v2");546MODULE_DESCRIPTION("Hardware spinlock interface");547MODULE_AUTHOR("Ohad Ben-Cohen <[email protected]>");548549550