Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/oops/markOop.hpp
32285 views
/*1* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef SHARE_VM_OOPS_MARKOOP_HPP25#define SHARE_VM_OOPS_MARKOOP_HPP2627#include "oops/oop.hpp"2829// The markOop describes the header of an object.30//31// Note that the mark is not a real oop but just a word.32// It is placed in the oop hierarchy for historical reasons.33//34// Bit-format of an object header (most significant first, big endian layout below):35//36// 32 bits:37// --------38// hash:25 ------------>| age:4 biased_lock:1 lock:2 (normal object)39// JavaThread*:23 epoch:2 age:4 biased_lock:1 lock:2 (biased object)40// size:32 ------------------------------------------>| (CMS free block)41// PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object)42//43// 64 bits:44// --------45// unused:25 hash:31 -->| unused:1 age:4 biased_lock:1 lock:2 (normal object)46// JavaThread*:54 epoch:2 unused:1 age:4 biased_lock:1 lock:2 (biased object)47// PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object)48// size:64 ----------------------------------------------------->| (CMS free block)49//50// unused:25 hash:31 -->| cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && normal object)51// JavaThread*:54 epoch:2 cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && biased object)52// narrowOop:32 unused:24 cms_free:1 unused:4 promo_bits:3 ----->| (COOPs && CMS promoted object)53// unused:21 size:35 -->| cms_free:1 unused:7 ------------------>| (COOPs && CMS free block)54//55// - hash contains the identity hash value: largest value is56// 31 bits, see os::random(). Also, 64-bit vm's require57// a hash value no bigger than 32 bits because they will not58// properly generate a mask larger than that: see library_call.cpp59// and c1_CodePatterns_sparc.cpp.60//61// - the biased lock pattern is used to bias a lock toward a given62// thread. When this pattern is set in the low three bits, the lock63// is either biased toward a given thread or "anonymously" biased,64// indicating that it is possible for it to be biased. When the65// lock is biased toward a given thread, locking and unlocking can66// be performed by that thread without using atomic operations.67// When a lock's bias is revoked, it reverts back to the normal68// locking scheme described below.69//70// Note that we are overloading the meaning of the "unlocked" state71// of the header. Because we steal a bit from the age we can72// guarantee that the bias pattern will never be seen for a truly73// unlocked object.74//75// Note also that the biased state contains the age bits normally76// contained in the object header. Large increases in scavenge77// times were seen when these bits were absent and an arbitrary age78// assigned to all biased objects, because they tended to consume a79// significant fraction of the eden semispaces and were not80// promoted promptly, causing an increase in the amount of copying81// performed. The runtime system aligns all JavaThread* pointers to82// a very large value (currently 128 bytes (32bVM) or 256 bytes (64bVM))83// to make room for the age bits & the epoch bits (used in support of84// biased locking), and for the CMS "freeness" bit in the 64bVM (+COOPs).85//86// [JavaThread* | epoch | age | 1 | 01] lock is biased toward given thread87// [0 | epoch | age | 1 | 01] lock is anonymously biased88//89// - the two lock bits are used to describe three states: locked/unlocked and monitor.90//91// [ptr | 00] locked ptr points to real header on stack92// [header | 0 | 01] unlocked regular object header93// [ptr | 10] monitor inflated lock (header is wapped out)94// [ptr | 11] marked used by markSweep to mark an object95// not valid at any other time96//97// We assume that stack/thread pointers have the lowest two bits cleared.9899class BasicLock;100class ObjectMonitor;101class JavaThread;102103class markOopDesc: public oopDesc {104private:105// Conversion106uintptr_t value() const { return (uintptr_t) this; }107108public:109// Constants110enum { age_bits = 4,111lock_bits = 2,112biased_lock_bits = 1,113max_hash_bits = BitsPerWord - age_bits - lock_bits - biased_lock_bits,114hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits,115cms_bits = LP64_ONLY(1) NOT_LP64(0),116epoch_bits = 2117};118119// The biased locking code currently requires that the age bits be120// contiguous to the lock bits.121enum { lock_shift = 0,122biased_lock_shift = lock_bits,123age_shift = lock_bits + biased_lock_bits,124cms_shift = age_shift + age_bits,125hash_shift = cms_shift + cms_bits,126epoch_shift = hash_shift127};128129enum { lock_mask = right_n_bits(lock_bits),130lock_mask_in_place = lock_mask << lock_shift,131biased_lock_mask = right_n_bits(lock_bits + biased_lock_bits),132biased_lock_mask_in_place= biased_lock_mask << lock_shift,133biased_lock_bit_in_place = 1 << biased_lock_shift,134age_mask = right_n_bits(age_bits),135age_mask_in_place = age_mask << age_shift,136epoch_mask = right_n_bits(epoch_bits),137epoch_mask_in_place = epoch_mask << epoch_shift,138cms_mask = right_n_bits(cms_bits),139cms_mask_in_place = cms_mask << cms_shift140};141142const static uintptr_t hash_mask = right_n_bits(hash_bits);143const static uintptr_t hash_mask_in_place = hash_mask << hash_shift;144145// Alignment of JavaThread pointers encoded in object header required by biased locking146enum { biased_lock_alignment = 2 << (epoch_shift + epoch_bits)147};148149enum { locked_value = 0,150unlocked_value = 1,151monitor_value = 2,152marked_value = 3,153biased_lock_pattern = 5154};155156enum { no_hash = 0 }; // no hash value assigned157158enum { no_hash_in_place = (address_word)no_hash << hash_shift,159no_lock_in_place = unlocked_value160};161162enum { max_age = age_mask };163164enum { max_bias_epoch = epoch_mask };165166// Biased Locking accessors.167// These must be checked by all code which calls into the168// ObjectSynchronizer and other code. The biasing is not understood169// by the lower-level CAS-based locking code, although the runtime170// fixes up biased locks to be compatible with it when a bias is171// revoked.172bool has_bias_pattern() const {173return (mask_bits(value(), biased_lock_mask_in_place) == biased_lock_pattern);174}175JavaThread* biased_locker() const {176assert(has_bias_pattern(), "should not call this otherwise");177return (JavaThread*) ((intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place))));178}179// Indicates that the mark has the bias bit set but that it has not180// yet been biased toward a particular thread181bool is_biased_anonymously() const {182return (has_bias_pattern() && (biased_locker() == NULL));183}184// Indicates epoch in which this bias was acquired. If the epoch185// changes due to too many bias revocations occurring, the biases186// from the previous epochs are all considered invalid.187int bias_epoch() const {188assert(has_bias_pattern(), "should not call this otherwise");189return (mask_bits(value(), epoch_mask_in_place) >> epoch_shift);190}191markOop set_bias_epoch(int epoch) {192assert(has_bias_pattern(), "should not call this otherwise");193assert((epoch & (~epoch_mask)) == 0, "epoch overflow");194return markOop(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift));195}196markOop incr_bias_epoch() {197return set_bias_epoch((1 + bias_epoch()) & epoch_mask);198}199// Prototype mark for initialization200static markOop biased_locking_prototype() {201return markOop( biased_lock_pattern );202}203204// lock accessors (note that these assume lock_shift == 0)205bool is_locked() const {206return (mask_bits(value(), lock_mask_in_place) != unlocked_value);207}208bool is_unlocked() const {209return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value);210}211bool is_marked() const {212return (mask_bits(value(), lock_mask_in_place) == marked_value);213}214bool is_neutral() const { return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); }215216// Special temporary state of the markOop while being inflated.217// Code that looks at mark outside a lock need to take this into account.218bool is_being_inflated() const { return (value() == 0); }219220// Distinguished markword value - used when inflating over221// an existing stacklock. 0 indicates the markword is "BUSY".222// Lockword mutators that use a LD...CAS idiom should always223// check for and avoid overwriting a 0 value installed by some224// other thread. (They should spin or block instead. The 0 value225// is transient and *should* be short-lived).226static markOop INFLATING() { return (markOop) 0; } // inflate-in-progress227228// Should this header be preserved during GC?229inline bool must_be_preserved(oop obj_containing_mark) const;230inline bool must_be_preserved_with_bias(oop obj_containing_mark) const;231232// Should this header (including its age bits) be preserved in the233// case of a promotion failure during scavenge?234// Note that we special case this situation. We want to avoid235// calling BiasedLocking::preserve_marks()/restore_marks() (which236// decrease the number of mark words that need to be preserved237// during GC) during each scavenge. During scavenges in which there238// is no promotion failure, we actually don't need to call the above239// routines at all, since we don't mutate and re-initialize the240// marks of promoted objects using init_mark(). However, during241// scavenges which result in promotion failure, we do re-initialize242// the mark words of objects, meaning that we should have called243// these mark word preservation routines. Currently there's no good244// place in which to call them in any of the scavengers (although245// guarded by appropriate locks we could make one), but the246// observation is that promotion failures are quite rare and247// reducing the number of mark words preserved during them isn't a248// high priority.249inline bool must_be_preserved_for_promotion_failure(oop obj_containing_mark) const;250inline bool must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const;251252// Should this header be preserved during a scavenge where CMS is253// the old generation?254// (This is basically the same body as must_be_preserved_for_promotion_failure(),255// but takes the Klass* as argument instead)256inline bool must_be_preserved_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const;257inline bool must_be_preserved_with_bias_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const;258259// WARNING: The following routines are used EXCLUSIVELY by260// synchronization functions. They are not really gc safe.261// They must get updated if markOop layout get changed.262markOop set_unlocked() const {263return markOop(value() | unlocked_value);264}265bool has_locker() const {266return ((value() & lock_mask_in_place) == locked_value);267}268BasicLock* locker() const {269assert(has_locker(), "check");270return (BasicLock*) value();271}272bool has_monitor() const {273return ((value() & monitor_value) != 0);274}275ObjectMonitor* monitor() const {276assert(has_monitor(), "check");277// Use xor instead of &~ to provide one extra tag-bit check.278return (ObjectMonitor*) (value() ^ monitor_value);279}280bool has_displaced_mark_helper() const {281return ((value() & unlocked_value) == 0);282}283markOop displaced_mark_helper() const {284assert(has_displaced_mark_helper(), "check");285intptr_t ptr = (value() & ~monitor_value);286return *(markOop*)ptr;287}288void set_displaced_mark_helper(markOop m) const {289assert(has_displaced_mark_helper(), "check");290intptr_t ptr = (value() & ~monitor_value);291*(markOop*)ptr = m;292}293markOop copy_set_hash(intptr_t hash) const {294intptr_t tmp = value() & (~hash_mask_in_place);295tmp |= ((hash & hash_mask) << hash_shift);296return (markOop)tmp;297}298// it is only used to be stored into BasicLock as the299// indicator that the lock is using heavyweight monitor300static markOop unused_mark() {301return (markOop) marked_value;302}303// the following two functions create the markOop to be304// stored into object header, it encodes monitor info305static markOop encode(BasicLock* lock) {306return (markOop) lock;307}308static markOop encode(ObjectMonitor* monitor) {309intptr_t tmp = (intptr_t) monitor;310return (markOop) (tmp | monitor_value);311}312static markOop encode(JavaThread* thread, uint age, int bias_epoch) {313intptr_t tmp = (intptr_t) thread;314assert(UseBiasedLocking && ((tmp & (epoch_mask_in_place | age_mask_in_place | biased_lock_mask_in_place)) == 0), "misaligned JavaThread pointer");315assert(age <= max_age, "age too large");316assert(bias_epoch <= max_bias_epoch, "bias epoch too large");317return (markOop) (tmp | (bias_epoch << epoch_shift) | (age << age_shift) | biased_lock_pattern);318}319320// used to encode pointers during GC321markOop clear_lock_bits() { return markOop(value() & ~lock_mask_in_place); }322323// age operations324markOop set_marked() { return markOop((value() & ~lock_mask_in_place) | marked_value); }325markOop set_unmarked() { return markOop((value() & ~lock_mask_in_place) | unlocked_value); }326327uint age() const { return mask_bits(value() >> age_shift, age_mask); }328markOop set_age(uint v) const {329assert((v & ~age_mask) == 0, "shouldn't overflow age field");330return markOop((value() & ~age_mask_in_place) | (((uintptr_t)v & age_mask) << age_shift));331}332markOop incr_age() const { return age() == max_age ? markOop(this) : set_age(age() + 1); }333334// hash operations335intptr_t hash() const {336return mask_bits(value() >> hash_shift, hash_mask);337}338339bool has_no_hash() const {340return hash() == no_hash;341}342343// Prototype mark for initialization344static markOop prototype() {345return markOop( no_hash_in_place | no_lock_in_place );346}347348// Helper function for restoration of unmarked mark oops during GC349static inline markOop prototype_for_object(oop obj);350351// Debugging352void print_on(outputStream* st) const;353354// Prepare address of oop for placement into mark355inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); }356357// Recover address of oop from encoded form used in mark358inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return clear_lock_bits(); }359360// These markOops indicate cms free chunk blocks and not objects.361// In 64 bit, the markOop is set to distinguish them from oops.362// These are defined in 32 bit mode for vmStructs.363const static uintptr_t cms_free_chunk_pattern = 0x1;364365// Constants for the size field.366enum { size_shift = cms_shift + cms_bits,367size_bits = 35 // need for compressed oops 32G368};369// These values are too big for Win64370const static uintptr_t size_mask = LP64_ONLY(right_n_bits(size_bits))371NOT_LP64(0);372const static uintptr_t size_mask_in_place =373(address_word)size_mask << size_shift;374375#ifdef _LP64376static markOop cms_free_prototype() {377return markOop(((intptr_t)prototype() & ~cms_mask_in_place) |378((cms_free_chunk_pattern & cms_mask) << cms_shift));379}380uintptr_t cms_encoding() const {381return mask_bits(value() >> cms_shift, cms_mask);382}383bool is_cms_free_chunk() const {384return is_neutral() &&385(cms_encoding() & cms_free_chunk_pattern) == cms_free_chunk_pattern;386}387388size_t get_size() const { return (size_t)(value() >> size_shift); }389static markOop set_size_and_free(size_t size) {390assert((size & ~size_mask) == 0, "shouldn't overflow size field");391return markOop(((intptr_t)cms_free_prototype() & ~size_mask_in_place) |392(((intptr_t)size & size_mask) << size_shift));393}394#endif // _LP64395};396397#endif // SHARE_VM_OOPS_MARKOOP_HPP398399400