Path: blob/master/src/hotspot/share/asm/assembler.cpp
40951 views
/*1* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "asm/codeBuffer.hpp"26#include "asm/macroAssembler.hpp"27#include "asm/macroAssembler.inline.hpp"28#include "gc/shared/collectedHeap.hpp"29#include "memory/universe.hpp"30#include "oops/compressedOops.hpp"31#include "runtime/icache.hpp"32#include "runtime/os.hpp"33#include "runtime/thread.hpp"343536// Implementation of AbstractAssembler37//38// The AbstractAssembler is generating code into a CodeBuffer. To make code generation faster,39// the assembler keeps a copy of the code buffers boundaries & modifies them when40// emitting bytes rather than using the code buffers accessor functions all the time.41// The code buffer is updated via set_code_end(...) after emitting a whole instruction.4243AbstractAssembler::AbstractAssembler(CodeBuffer* code) {44if (code == NULL) return;45CodeSection* cs = code->insts();46cs->clear_mark(); // new assembler kills old mark47if (cs->start() == NULL) {48vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "CodeCache: no room for %s", code->name());49}50_code_section = cs;51_oop_recorder= code->oop_recorder();52DEBUG_ONLY( _short_branch_delta = 0; )53}5455void AbstractAssembler::set_code_section(CodeSection* cs) {56assert(cs->outer() == code_section()->outer(), "sanity");57assert(cs->is_allocated(), "need to pre-allocate this section");58cs->clear_mark(); // new assembly into this section kills old mark59_code_section = cs;60}6162// Inform CodeBuffer that incoming code and relocation will be for stubs63address AbstractAssembler::start_a_stub(int required_space) {64CodeBuffer* cb = code();65CodeSection* cs = cb->stubs();66assert(_code_section == cb->insts(), "not in insts?");67if (cs->maybe_expand_to_ensure_remaining(required_space)68&& cb->blob() == NULL) {69return NULL;70}71set_code_section(cs);72return pc();73}7475// Inform CodeBuffer that incoming code and relocation will be code76// Should not be called if start_a_stub() returned NULL77void AbstractAssembler::end_a_stub() {78assert(_code_section == code()->stubs(), "not in stubs?");79set_code_section(code()->insts());80}8182// Inform CodeBuffer that incoming code and relocation will be for stubs83address AbstractAssembler::start_a_const(int required_space, int required_align) {84CodeBuffer* cb = code();85CodeSection* cs = cb->consts();86assert(_code_section == cb->insts() || _code_section == cb->stubs(), "not in insts/stubs?");87address end = cs->end();88int pad = -(intptr_t)end & (required_align-1);89if (cs->maybe_expand_to_ensure_remaining(pad + required_space)) {90if (cb->blob() == NULL) return NULL;91end = cs->end(); // refresh pointer92}93if (pad > 0) {94while (--pad >= 0) { *end++ = 0; }95cs->set_end(end);96}97set_code_section(cs);98return end;99}100101// Inform CodeBuffer that incoming code and relocation will be code102// in section cs (insts or stubs).103void AbstractAssembler::end_a_const(CodeSection* cs) {104assert(_code_section == code()->consts(), "not in consts?");105set_code_section(cs);106}107108void AbstractAssembler::flush() {109ICache::invalidate_range(addr_at(0), offset());110}111112void AbstractAssembler::bind(Label& L) {113if (L.is_bound()) {114// Assembler can bind a label more than once to the same place.115guarantee(L.loc() == locator(), "attempt to redefine label");116return;117}118L.bind_loc(locator());119L.patch_instructions((MacroAssembler*)this);120}121122void AbstractAssembler::generate_stack_overflow_check(int frame_size_in_bytes) {123// Each code entry causes one stack bang n pages down the stack where n124// is configurable by StackShadowPages. The setting depends on the maximum125// depth of VM call stack or native before going back into java code,126// since only java code can raise a stack overflow exception using the127// stack banging mechanism. The VM and native code does not detect stack128// overflow.129// The code in JavaCalls::call() checks that there is at least n pages130// available, so all entry code needs to do is bang once for the end of131// this shadow zone.132// The entry code may need to bang additional pages if the framesize133// is greater than a page.134135const int page_size = os::vm_page_size();136int bang_end = (int)StackOverflow::stack_shadow_zone_size();137138// This is how far the previous frame's stack banging extended.139const int bang_end_safe = bang_end;140141if (frame_size_in_bytes > page_size) {142bang_end += frame_size_in_bytes;143}144145int bang_offset = bang_end_safe;146while (bang_offset <= bang_end) {147// Need at least one stack bang at end of shadow zone.148bang_stack_with_offset(bang_offset);149bang_offset += page_size;150}151}152153void Label::add_patch_at(CodeBuffer* cb, int branch_loc, const char* file, int line) {154assert(_loc == -1, "Label is unbound");155// Don't add patch locations during scratch emit.156if (cb->insts()->scratch_emit()) { return; }157if (_patch_index < PatchCacheSize) {158_patches[_patch_index] = branch_loc;159#ifdef ASSERT160_lines[_patch_index] = line;161_files[_patch_index] = file;162#endif163} else {164if (_patch_overflow == NULL) {165_patch_overflow = cb->create_patch_overflow();166}167_patch_overflow->push(branch_loc);168}169++_patch_index;170}171172void Label::patch_instructions(MacroAssembler* masm) {173assert(is_bound(), "Label is bound");174CodeBuffer* cb = masm->code();175int target_sect = CodeBuffer::locator_sect(loc());176address target = cb->locator_address(loc());177while (_patch_index > 0) {178--_patch_index;179int branch_loc;180int line = 0;181const char* file = NULL;182if (_patch_index >= PatchCacheSize) {183branch_loc = _patch_overflow->pop();184} else {185branch_loc = _patches[_patch_index];186#ifdef ASSERT187line = _lines[_patch_index];188file = _files[_patch_index];189#endif190}191int branch_sect = CodeBuffer::locator_sect(branch_loc);192address branch = cb->locator_address(branch_loc);193if (branch_sect == CodeBuffer::SECT_CONSTS) {194// The thing to patch is a constant word.195*(address*)branch = target;196continue;197}198199// Push the target offset into the branch instruction.200masm->pd_patch_instruction(branch, target, file, line);201}202}203204void AbstractAssembler::block_comment(const char* comment) {205if (sect() == CodeBuffer::SECT_INSTS) {206code_section()->outer()->block_comment(offset(), comment);207}208}209210const char* AbstractAssembler::code_string(const char* str) {211if (sect() == CodeBuffer::SECT_INSTS || sect() == CodeBuffer::SECT_STUBS) {212return code_section()->outer()->code_string(str);213}214return NULL;215}216217bool MacroAssembler::uses_implicit_null_check(void* address) {218// Exception handler checks the nmethod's implicit null checks table219// only when this method returns false.220uintptr_t addr = reinterpret_cast<uintptr_t>(address);221uintptr_t page_size = (uintptr_t)os::vm_page_size();222#ifdef _LP64223if (UseCompressedOops && CompressedOops::base() != NULL) {224// A SEGV can legitimately happen in C2 code at address225// (heap_base + offset) if Matcher::narrow_oop_use_complex_address226// is configured to allow narrow oops field loads to be implicitly227// null checked228uintptr_t start = (uintptr_t)CompressedOops::base();229uintptr_t end = start + page_size;230if (addr >= start && addr < end) {231return true;232}233}234#endif235return addr < page_size;236}237238bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {239// The offset -1 is used (hardcoded) in a number of places in C1 and MacroAssembler240// to indicate an unknown offset. For example, TemplateTable::pop_and_check_object(Register r)241// calls MacroAssembler::null_check(Register reg, int offset = -1) which gets here242// with -1. Another example is GraphBuilder::access_field(...) which uses -1 as placeholder243// for offsets to be patched in later. The -1 there means the offset is not yet known244// and may lie outside of the zero-trapping page, and thus we need to ensure we're forcing245// an explicit null check for -1.246247// Check if offset is outside of [0, os::vm_page_size()]248return offset < 0 || offset >= os::vm_page_size();249}250251252