/*1* Copyright 2010 Tilera Corporation. All Rights Reserved.2*3* This program is free software; you can redistribute it and/or4* modify it under the terms of the GNU General Public License5* as published by the Free Software Foundation, version 2.6*7* This program is distributed in the hope that it will be useful, but8* WITHOUT ANY WARRANTY; without even the implied warranty of9* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or10* NON INFRINGEMENT. See the GNU General Public License for11* more details.12*/1314#include <linux/spinlock.h>15#include <linux/module.h>16#include <asm/processor.h>17#include <arch/spr_def.h>1819#include "spinlock_common.h"2021void arch_spin_lock(arch_spinlock_t *lock)22{23int my_ticket;24int iterations = 0;25int delta;2627while ((my_ticket = __insn_tns((void *)&lock->next_ticket)) & 1)28delay_backoff(iterations++);2930/* Increment the next ticket number, implicitly releasing tns lock. */31lock->next_ticket = my_ticket + TICKET_QUANTUM;3233/* Wait until it's our turn. */34while ((delta = my_ticket - lock->current_ticket) != 0)35relax((128 / CYCLES_PER_RELAX_LOOP) * delta);36}37EXPORT_SYMBOL(arch_spin_lock);3839int arch_spin_trylock(arch_spinlock_t *lock)40{41/*42* Grab a ticket; no need to retry if it's busy, we'll just43* treat that the same as "locked", since someone else44* will lock it momentarily anyway.45*/46int my_ticket = __insn_tns((void *)&lock->next_ticket);4748if (my_ticket == lock->current_ticket) {49/* Not currently locked, so lock it by keeping this ticket. */50lock->next_ticket = my_ticket + TICKET_QUANTUM;51/* Success! */52return 1;53}5455if (!(my_ticket & 1)) {56/* Release next_ticket. */57lock->next_ticket = my_ticket;58}5960return 0;61}62EXPORT_SYMBOL(arch_spin_trylock);6364void arch_spin_unlock_wait(arch_spinlock_t *lock)65{66u32 iterations = 0;67while (arch_spin_is_locked(lock))68delay_backoff(iterations++);69}70EXPORT_SYMBOL(arch_spin_unlock_wait);7172/*73* The low byte is always reserved to be the marker for a "tns" operation74* since the low bit is set to "1" by a tns. The next seven bits are75* zeroes. The next byte holds the "next" writer value, i.e. the ticket76* available for the next task that wants to write. The third byte holds77* the current writer value, i.e. the writer who holds the current ticket.78* If current == next == 0, there are no interested writers.79*/80#define WR_NEXT_SHIFT _WR_NEXT_SHIFT81#define WR_CURR_SHIFT _WR_CURR_SHIFT82#define WR_WIDTH _WR_WIDTH83#define WR_MASK ((1 << WR_WIDTH) - 1)8485/*86* The last eight bits hold the active reader count. This has to be87* zero before a writer can start to write.88*/89#define RD_COUNT_SHIFT _RD_COUNT_SHIFT90#define RD_COUNT_WIDTH _RD_COUNT_WIDTH91#define RD_COUNT_MASK ((1 << RD_COUNT_WIDTH) - 1)929394/*95* We can get the read lock if everything but the reader bits (which96* are in the high part of the word) is zero, i.e. no active or97* waiting writers, no tns.98*99* We guard the tns/store-back with an interrupt critical section to100* preserve the semantic that the same read lock can be acquired in an101* interrupt context.102*/103inline int arch_read_trylock(arch_rwlock_t *rwlock)104{105u32 val;106__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1);107val = __insn_tns((int *)&rwlock->lock);108if (likely((val << _RD_COUNT_WIDTH) == 0)) {109val += 1 << RD_COUNT_SHIFT;110rwlock->lock = val;111__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);112BUG_ON(val == 0); /* we don't expect wraparound */113return 1;114}115if ((val & 1) == 0)116rwlock->lock = val;117__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);118return 0;119}120EXPORT_SYMBOL(arch_read_trylock);121122/*123* Spin doing arch_read_trylock() until we acquire the lock.124* ISSUE: This approach can permanently starve readers. A reader who sees125* a writer could instead take a ticket lock (just like a writer would),126* and atomically enter read mode (with 1 reader) when it gets the ticket.127* This way both readers and writers would always make forward progress128* in a finite time.129*/130void arch_read_lock(arch_rwlock_t *rwlock)131{132u32 iterations = 0;133while (unlikely(!arch_read_trylock(rwlock)))134delay_backoff(iterations++);135}136EXPORT_SYMBOL(arch_read_lock);137138void arch_read_unlock(arch_rwlock_t *rwlock)139{140u32 val, iterations = 0;141142mb(); /* guarantee anything modified under the lock is visible */143for (;;) {144__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1);145val = __insn_tns((int *)&rwlock->lock);146if (likely(val & 1) == 0) {147rwlock->lock = val - (1 << _RD_COUNT_SHIFT);148__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);149break;150}151__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);152delay_backoff(iterations++);153}154}155EXPORT_SYMBOL(arch_read_unlock);156157/*158* We don't need an interrupt critical section here (unlike for159* arch_read_lock) since we should never use a bare write lock where160* it could be interrupted by code that could try to re-acquire it.161*/162void arch_write_lock(arch_rwlock_t *rwlock)163{164/*165* The trailing underscore on this variable (and curr_ below)166* reminds us that the high bits are garbage; we mask them out167* when we compare them.168*/169u32 my_ticket_;170u32 iterations = 0;171u32 val = __insn_tns((int *)&rwlock->lock);172173if (likely(val == 0)) {174rwlock->lock = 1 << _WR_NEXT_SHIFT;175return;176}177178/*179* Wait until there are no readers, then bump up the next180* field and capture the ticket value.181*/182for (;;) {183if (!(val & 1)) {184if ((val >> RD_COUNT_SHIFT) == 0)185break;186rwlock->lock = val;187}188delay_backoff(iterations++);189val = __insn_tns((int *)&rwlock->lock);190}191192/* Take out the next ticket and extract my ticket value. */193rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT);194my_ticket_ = val >> WR_NEXT_SHIFT;195196/* Wait until the "current" field matches our ticket. */197for (;;) {198u32 curr_ = val >> WR_CURR_SHIFT;199u32 delta = ((my_ticket_ - curr_) & WR_MASK);200if (likely(delta == 0))201break;202203/* Delay based on how many lock-holders are still out there. */204relax((256 / CYCLES_PER_RELAX_LOOP) * delta);205206/*207* Get a non-tns value to check; we don't need to tns208* it ourselves. Since we're not tns'ing, we retry209* more rapidly to get a valid value.210*/211while ((val = rwlock->lock) & 1)212relax(4);213}214}215EXPORT_SYMBOL(arch_write_lock);216217int arch_write_trylock(arch_rwlock_t *rwlock)218{219u32 val = __insn_tns((int *)&rwlock->lock);220221/*222* If a tns is in progress, or there's a waiting or active locker,223* or active readers, we can't take the lock, so give up.224*/225if (unlikely(val != 0)) {226if (!(val & 1))227rwlock->lock = val;228return 0;229}230231/* Set the "next" field to mark it locked. */232rwlock->lock = 1 << _WR_NEXT_SHIFT;233return 1;234}235EXPORT_SYMBOL(arch_write_trylock);236237void arch_write_unlock(arch_rwlock_t *rwlock)238{239u32 val, eq, mask;240241mb(); /* guarantee anything modified under the lock is visible */242val = __insn_tns((int *)&rwlock->lock);243if (likely(val == (1 << _WR_NEXT_SHIFT))) {244rwlock->lock = 0;245return;246}247while (unlikely(val & 1)) {248/* Limited backoff since we are the highest-priority task. */249relax(4);250val = __insn_tns((int *)&rwlock->lock);251}252mask = 1 << WR_CURR_SHIFT;253val = __insn_addb(val, mask);254eq = __insn_seqb(val, val << (WR_CURR_SHIFT - WR_NEXT_SHIFT));255val = __insn_mz(eq & mask, val);256rwlock->lock = val;257}258EXPORT_SYMBOL(arch_write_unlock);259260261