Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/x86/vm/c1_LinearScan_x86.hpp
32285 views
/*1* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef CPU_X86_VM_C1_LINEARSCAN_X86_HPP25#define CPU_X86_VM_C1_LINEARSCAN_X86_HPP2627inline bool LinearScan::is_processed_reg_num(int reg_num) {28#ifndef _LP6429// rsp and rbp (numbers 6 ancd 7) are ignored30assert(FrameMap::rsp_opr->cpu_regnr() == 6, "wrong assumption below");31assert(FrameMap::rbp_opr->cpu_regnr() == 7, "wrong assumption below");32assert(reg_num >= 0, "invalid reg_num");33#else34// rsp and rbp, r10, r15 (numbers [12,15]) are ignored35// r12 (number 11) is conditional on compressed oops.36assert(FrameMap::r12_opr->cpu_regnr() == 11, "wrong assumption below");37assert(FrameMap::r10_opr->cpu_regnr() == 12, "wrong assumption below");38assert(FrameMap::r15_opr->cpu_regnr() == 13, "wrong assumption below");39assert(FrameMap::rsp_opr->cpu_regnrLo() == 14, "wrong assumption below");40assert(FrameMap::rbp_opr->cpu_regnrLo() == 15, "wrong assumption below");41assert(reg_num >= 0, "invalid reg_num");42#endif // _LP6443return reg_num <= FrameMap::last_cpu_reg() || reg_num >= pd_nof_cpu_regs_frame_map;44}4546inline int LinearScan::num_physical_regs(BasicType type) {47// Intel requires two cpu registers for long,48// but requires only one fpu register for double49if (LP64_ONLY(false &&) type == T_LONG) {50return 2;51}52return 1;53}545556inline bool LinearScan::requires_adjacent_regs(BasicType type) {57return false;58}5960inline bool LinearScan::is_caller_save(int assigned_reg) {61assert(assigned_reg >= 0 && assigned_reg < nof_regs, "should call this only for registers");62return true; // no callee-saved registers on Intel6364}656667inline void LinearScan::pd_add_temps(LIR_Op* op) {68switch (op->code()) {69case lir_tan:70case lir_sin:71case lir_cos: {72// The slow path for these functions may need to save and73// restore all live registers but we don't want to save and74// restore everything all the time, so mark the xmms as being75// killed. If the slow path were explicit or we could propagate76// live register masks down to the assembly we could do better77// but we don't have any easy way to do that right now. We78// could also consider not killing all xmm registers if we79// assume that slow paths are uncommon but it's not clear that80// would be a good idea.81if (UseSSE > 0) {82#ifndef PRODUCT83if (TraceLinearScanLevel >= 2) {84tty->print_cr("killing XMMs for trig");85}86#endif87int op_id = op->id();88for (int xmm = 0; xmm < FrameMap::nof_caller_save_xmm_regs; xmm++) {89LIR_Opr opr = FrameMap::caller_save_xmm_reg_at(xmm);90add_temp(reg_num(opr), op_id, noUse, T_ILLEGAL);91}92}93break;94}95}96}979899// Implementation of LinearScanWalker100101inline bool LinearScanWalker::pd_init_regs_for_alloc(Interval* cur) {102if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::byte_reg)) {103assert(cur->type() != T_FLOAT && cur->type() != T_DOUBLE, "cpu regs only");104_first_reg = pd_first_byte_reg;105_last_reg = FrameMap::last_byte_reg();106return true;107} else if ((UseSSE >= 1 && cur->type() == T_FLOAT) || (UseSSE >= 2 && cur->type() == T_DOUBLE)) {108_first_reg = pd_first_xmm_reg;109_last_reg = pd_last_xmm_reg;110return true;111}112113return false;114}115116117class FpuStackAllocator VALUE_OBJ_CLASS_SPEC {118private:119Compilation* _compilation;120LinearScan* _allocator;121122LIR_OpVisitState visitor;123124LIR_List* _lir;125int _pos;126FpuStackSim _sim;127FpuStackSim _temp_sim;128129bool _debug_information_computed;130131LinearScan* allocator() { return _allocator; }132Compilation* compilation() const { return _compilation; }133134// unified bailout support135void bailout(const char* msg) const { compilation()->bailout(msg); }136bool bailed_out() const { return compilation()->bailed_out(); }137138int pos() { return _pos; }139void set_pos(int pos) { _pos = pos; }140LIR_Op* cur_op() { return lir()->instructions_list()->at(pos()); }141LIR_List* lir() { return _lir; }142void set_lir(LIR_List* lir) { _lir = lir; }143FpuStackSim* sim() { return &_sim; }144FpuStackSim* temp_sim() { return &_temp_sim; }145146int fpu_num(LIR_Opr opr);147int tos_offset(LIR_Opr opr);148LIR_Opr to_fpu_stack_top(LIR_Opr opr, bool dont_check_offset = false);149150// Helper functions for handling operations151void insert_op(LIR_Op* op);152void insert_exchange(int offset);153void insert_exchange(LIR_Opr opr);154void insert_free(int offset);155void insert_free_if_dead(LIR_Opr opr);156void insert_free_if_dead(LIR_Opr opr, LIR_Opr ignore);157void insert_copy(LIR_Opr from, LIR_Opr to);158void do_rename(LIR_Opr from, LIR_Opr to);159void do_push(LIR_Opr opr);160void pop_if_last_use(LIR_Op* op, LIR_Opr opr);161void pop_always(LIR_Op* op, LIR_Opr opr);162void clear_fpu_stack(LIR_Opr preserve);163void handle_op1(LIR_Op1* op1);164void handle_op2(LIR_Op2* op2);165void handle_opCall(LIR_OpCall* opCall);166void compute_debug_information(LIR_Op* op);167void allocate_exception_handler(XHandler* xhandler);168void allocate_block(BlockBegin* block);169170#ifndef PRODUCT171void check_invalid_lir_op(LIR_Op* op);172#endif173174// Helper functions for merging of fpu stacks175void merge_insert_add(LIR_List* instrs, FpuStackSim* cur_sim, int reg);176void merge_insert_xchg(LIR_List* instrs, FpuStackSim* cur_sim, int slot);177void merge_insert_pop(LIR_List* instrs, FpuStackSim* cur_sim);178bool merge_rename(FpuStackSim* cur_sim, FpuStackSim* sux_sim, int start_slot, int change_slot);179void merge_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, FpuStackSim* sux_sim);180void merge_cleanup_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, BitMap& live_fpu_regs);181bool merge_fpu_stack_with_successors(BlockBegin* block);182183public:184LIR_Opr to_fpu_stack(LIR_Opr opr); // used by LinearScan for creation of debug information185186FpuStackAllocator(Compilation* compilation, LinearScan* allocator);187void allocate();188};189190#endif // CPU_X86_VM_C1_LINEARSCAN_X86_HPP191192193