Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/c1/c1_LinearScan.hpp
32285 views
/*1* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef SHARE_VM_C1_C1_LINEARSCAN_HPP25#define SHARE_VM_C1_C1_LINEARSCAN_HPP2627#include "c1/c1_FpuStackSim.hpp"28#include "c1/c1_FrameMap.hpp"29#include "c1/c1_IR.hpp"30#include "c1/c1_Instruction.hpp"31#include "c1/c1_LIR.hpp"32#include "c1/c1_LIRGenerator.hpp"3334class DebugInfoCache;35class FpuStackAllocator;36class IRScopeDebugInfo;37class Interval;38class IntervalWalker;39class LIRGenerator;40class LinearScan;41class MoveResolver;42class Range;4344define_array(IntervalArray, Interval*)45define_stack(IntervalList, IntervalArray)4647define_array(IntervalsArray, IntervalList*)48define_stack(IntervalsList, IntervalsArray)4950define_array(OopMapArray, OopMap*)51define_stack(OopMapList, OopMapArray)5253define_array(ScopeValueArray, ScopeValue*)5455define_array(LIR_OpListArray, LIR_OpList*);56define_stack(LIR_OpListStack, LIR_OpListArray);575859enum IntervalUseKind {60// priority of use kinds must be ascending61noUse = 0,62loopEndMarker = 1,63shouldHaveRegister = 2,64mustHaveRegister = 3,6566firstValidKind = 1,67lastValidKind = 368};69define_array(UseKindArray, IntervalUseKind)70define_stack(UseKindStack, UseKindArray)717273enum IntervalKind {74fixedKind = 0, // interval pre-colored by LIR_Generator75anyKind = 1, // no register/memory allocated by LIR_Generator76nofKinds,77firstKind = fixedKind78};798081// during linear scan an interval is in one of four states in82enum IntervalState {83unhandledState = 0, // unhandled state (not processed yet)84activeState = 1, // life and is in a physical register85inactiveState = 2, // in a life time hole and is in a physical register86handledState = 3, // spilled or not life again87invalidState = -188};899091enum IntervalSpillState {92noDefinitionFound, // starting state of calculation: no definition found yet93oneDefinitionFound, // one definition has already been found.94// Note: two consecutive definitions are treated as one (e.g. consecutive move and add because of two-operand LIR form)95// the position of this definition is stored in _definition_pos96oneMoveInserted, // one spill move has already been inserted.97storeAtDefinition, // the interval should be stored immediately after its definition because otherwise98// there would be multiple redundant stores99startInMemory, // the interval starts in memory (e.g. method parameter), so a store is never necessary100noOptimization // the interval has more then one definition (e.g. resulting from phi moves), so stores to memory are not optimized101};102103104#define for_each_interval_kind(kind) \105for (IntervalKind kind = firstKind; kind < nofKinds; kind = (IntervalKind)(kind + 1))106107#define for_each_visitor_mode(mode) \108for (LIR_OpVisitState::OprMode mode = LIR_OpVisitState::firstMode; mode < LIR_OpVisitState::numModes; mode = (LIR_OpVisitState::OprMode)(mode + 1))109110111class LinearScan : public CompilationResourceObj {112// declare classes used by LinearScan as friends because they113// need a wide variety of functions declared here114//115// Only the small interface to the rest of the compiler is public116friend class Interval;117friend class IntervalWalker;118friend class LinearScanWalker;119friend class FpuStackAllocator;120friend class MoveResolver;121friend class LinearScanStatistic;122friend class LinearScanTimers;123friend class RegisterVerifier;124125public:126enum {127any_reg = -1,128nof_cpu_regs = pd_nof_cpu_regs_linearscan,129nof_fpu_regs = pd_nof_fpu_regs_linearscan,130nof_xmm_regs = pd_nof_xmm_regs_linearscan,131nof_regs = nof_cpu_regs + nof_fpu_regs + nof_xmm_regs132};133134private:135Compilation* _compilation;136IR* _ir;137LIRGenerator* _gen;138FrameMap* _frame_map;139140BlockList _cached_blocks; // cached list with all blocks in linear-scan order (only correct if original list keeps unchanged)141int _num_virtual_regs; // number of virtual registers (without new registers introduced because of splitting intervals)142bool _has_fpu_registers; // true if this method uses any floating point registers (and so fpu stack allocation is necessary)143int _num_calls; // total number of calls in this method144int _max_spills; // number of stack slots used for intervals allocated to memory145int _unused_spill_slot; // unused spill slot for a single-word value because of alignment of a double-word value146147IntervalList _intervals; // mapping from register number to interval148IntervalList* _new_intervals_from_allocation; // list with all intervals created during allocation when an existing interval is split149IntervalArray* _sorted_intervals; // intervals sorted by Interval::from()150bool _needs_full_resort; // set to true if an Interval::from() is changed and _sorted_intervals must be resorted151152LIR_OpArray _lir_ops; // mapping from LIR_Op id to LIR_Op node153BlockBeginArray _block_of_op; // mapping from LIR_Op id to the BlockBegin containing this instruction154BitMap _has_info; // bit set for each LIR_Op id that has a CodeEmitInfo155BitMap _has_call; // bit set for each LIR_Op id that destroys all caller save registers156BitMap2D _interval_in_loop; // bit set for each virtual register that is contained in each loop157158// cached debug info to prevent multiple creation of same object159// TODO: cached scope values for registers could be static160ScopeValueArray _scope_value_cache;161162static ConstantOopWriteValue* _oop_null_scope_value;163static ConstantIntValue* _int_m1_scope_value;164static ConstantIntValue* _int_0_scope_value;165static ConstantIntValue* _int_1_scope_value;166static ConstantIntValue* _int_2_scope_value;167168// accessors169IR* ir() const { return _ir; }170Compilation* compilation() const { return _compilation; }171LIRGenerator* gen() const { return _gen; }172FrameMap* frame_map() const { return _frame_map; }173174// unified bailout support175void bailout(const char* msg) const { compilation()->bailout(msg); }176bool bailed_out() const { return compilation()->bailed_out(); }177178// access to block list (sorted in linear scan order)179int block_count() const { assert(_cached_blocks.length() == ir()->linear_scan_order()->length(), "invalid cached block list"); return _cached_blocks.length(); }180BlockBegin* block_at(int idx) const { assert(_cached_blocks.at(idx) == ir()->linear_scan_order()->at(idx), "invalid cached block list"); return _cached_blocks.at(idx); }181182int num_virtual_regs() const { return _num_virtual_regs; }183// size of live_in and live_out sets of BasicBlocks (BitMap needs rounded size for iteration)184int live_set_size() const { return round_to(_num_virtual_regs, BitsPerWord); }185bool has_fpu_registers() const { return _has_fpu_registers; }186int num_loops() const { return ir()->num_loops(); }187bool is_interval_in_loop(int interval, int loop) const { return _interval_in_loop.at(interval, loop); }188189// handling of fpu stack allocation (platform dependent, needed for debug information generation)190#ifdef X86191FpuStackAllocator* _fpu_stack_allocator;192bool use_fpu_stack_allocation() const { return UseSSE < 2 && has_fpu_registers(); }193#else194bool use_fpu_stack_allocation() const { return false; }195#endif196197198// access to interval list199int interval_count() const { return _intervals.length(); }200Interval* interval_at(int reg_num) const { return _intervals.at(reg_num); }201202IntervalList* new_intervals_from_allocation() const { return _new_intervals_from_allocation; }203204// access to LIR_Ops and Blocks indexed by op_id205int max_lir_op_id() const { assert(_lir_ops.length() > 0, "no operations"); return (_lir_ops.length() - 1) << 1; }206LIR_Op* lir_op_with_id(int op_id) const { assert(op_id >= 0 && op_id <= max_lir_op_id() && op_id % 2 == 0, "op_id out of range or not even"); return _lir_ops.at(op_id >> 1); }207BlockBegin* block_of_op_with_id(int op_id) const { assert(_block_of_op.length() > 0 && op_id >= 0 && op_id <= max_lir_op_id() + 1, "op_id out of range"); return _block_of_op.at(op_id >> 1); }208209bool is_block_begin(int op_id) { return op_id == 0 || block_of_op_with_id(op_id) != block_of_op_with_id(op_id - 1); }210bool covers_block_begin(int op_id_1, int op_id_2) { return block_of_op_with_id(op_id_1) != block_of_op_with_id(op_id_2); }211212bool has_call(int op_id) { assert(op_id % 2 == 0, "must be even"); return _has_call.at(op_id >> 1); }213bool has_info(int op_id) { assert(op_id % 2 == 0, "must be even"); return _has_info.at(op_id >> 1); }214215216// functions for converting LIR-Operands to register numbers217static bool is_valid_reg_num(int reg_num) { return reg_num >= 0; }218static int reg_num(LIR_Opr opr);219static int reg_numHi(LIR_Opr opr);220221// functions for classification of intervals222static bool is_precolored_interval(const Interval* i);223static bool is_virtual_interval(const Interval* i);224225static bool is_precolored_cpu_interval(const Interval* i);226static bool is_virtual_cpu_interval(const Interval* i);227static bool is_precolored_fpu_interval(const Interval* i);228static bool is_virtual_fpu_interval(const Interval* i);229230static bool is_in_fpu_register(const Interval* i);231static bool is_oop_interval(const Interval* i);232233234// General helper functions235int allocate_spill_slot(bool double_word);236void assign_spill_slot(Interval* it);237void propagate_spill_slots();238239Interval* create_interval(int reg_num);240void append_interval(Interval* it);241void copy_register_flags(Interval* from, Interval* to);242243// platform dependent functions244static bool is_processed_reg_num(int reg_num);245static int num_physical_regs(BasicType type);246static bool requires_adjacent_regs(BasicType type);247static bool is_caller_save(int assigned_reg);248249// spill move optimization: eliminate moves from register to stack if250// stack slot is known to be correct251void change_spill_definition_pos(Interval* interval, int def_pos);252void change_spill_state(Interval* interval, int spill_pos);253static bool must_store_at_definition(const Interval* i);254void eliminate_spill_moves();255256// Phase 1: number all instructions in all blocks257void number_instructions();258259// Phase 2: compute local live sets separately for each block260// (sets live_gen and live_kill for each block)261//262// helper methods used by compute_local_live_sets()263void set_live_gen_kill(Value value, LIR_Op* op, BitMap& live_gen, BitMap& live_kill);264265void compute_local_live_sets();266267// Phase 3: perform a backward dataflow analysis to compute global live sets268// (sets live_in and live_out for each block)269void compute_global_live_sets();270271272// Phase 4: build intervals273// (fills the list _intervals)274//275// helper methods used by build_intervals()276void add_use (Value value, int from, int to, IntervalUseKind use_kind);277278void add_def (LIR_Opr opr, int def_pos, IntervalUseKind use_kind);279void add_use (LIR_Opr opr, int from, int to, IntervalUseKind use_kind);280void add_temp(LIR_Opr opr, int temp_pos, IntervalUseKind use_kind);281282void add_def (int reg_num, int def_pos, IntervalUseKind use_kind, BasicType type);283void add_use (int reg_num, int from, int to, IntervalUseKind use_kind, BasicType type);284void add_temp(int reg_num, int temp_pos, IntervalUseKind use_kind, BasicType type);285286// Add platform dependent kills for particular LIR ops. Can be used287// to add platform dependent behaviour for some operations.288void pd_add_temps(LIR_Op* op);289290IntervalUseKind use_kind_of_output_operand(LIR_Op* op, LIR_Opr opr);291IntervalUseKind use_kind_of_input_operand(LIR_Op* op, LIR_Opr opr);292void handle_method_arguments(LIR_Op* op);293void handle_doubleword_moves(LIR_Op* op);294void add_register_hints(LIR_Op* op);295296void build_intervals();297298299// Phase 5: actual register allocation300// (Uses LinearScanWalker)301//302// helper functions for building a sorted list of intervals303NOT_PRODUCT(bool is_sorted(IntervalArray* intervals);)304static int interval_cmp(Interval** a, Interval** b);305void add_to_list(Interval** first, Interval** prev, Interval* interval);306void create_unhandled_lists(Interval** list1, Interval** list2, bool (is_list1)(const Interval* i), bool (is_list2)(const Interval* i));307308void sort_intervals_before_allocation();309void sort_intervals_after_allocation();310void allocate_registers();311312313// Phase 6: resolve data flow314// (insert moves at edges between blocks if intervals have been split)315//316// helper functions for resolve_data_flow()317Interval* split_child_at_op_id(Interval* interval, int op_id, LIR_OpVisitState::OprMode mode);318Interval* interval_at_block_begin(BlockBegin* block, int reg_num);319Interval* interval_at_block_end(BlockBegin* block, int reg_num);320Interval* interval_at_op_id(int reg_num, int op_id);321void resolve_collect_mappings(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver);322void resolve_find_insert_pos(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver);323void resolve_data_flow();324325void resolve_exception_entry(BlockBegin* block, int reg_num, MoveResolver &move_resolver);326void resolve_exception_entry(BlockBegin* block, MoveResolver &move_resolver);327void resolve_exception_edge(XHandler* handler, int throwing_op_id, int reg_num, Phi* phi, MoveResolver &move_resolver);328void resolve_exception_edge(XHandler* handler, int throwing_op_id, MoveResolver &move_resolver);329void resolve_exception_handlers();330331// Phase 7: assign register numbers back to LIR332// (includes computation of debug information and oop maps)333//334// helper functions for assign_reg_num()335VMReg vm_reg_for_interval(Interval* interval);336VMReg vm_reg_for_operand(LIR_Opr opr);337338static LIR_Opr operand_for_interval(Interval* interval);339static LIR_Opr calc_operand_for_interval(const Interval* interval);340LIR_Opr canonical_spill_opr(Interval* interval);341342LIR_Opr color_lir_opr(LIR_Opr opr, int id, LIR_OpVisitState::OprMode);343344// methods used for oop map computation345IntervalWalker* init_compute_oop_maps();346OopMap* compute_oop_map(IntervalWalker* iw, LIR_Op* op, CodeEmitInfo* info, bool is_call_site);347void compute_oop_map(IntervalWalker* iw, const LIR_OpVisitState &visitor, LIR_Op* op);348349// methods used for debug information computation350void init_compute_debug_info();351352MonitorValue* location_for_monitor_index(int monitor_index);353LocationValue* location_for_name(int name, Location::Type loc_type);354void set_oop(OopMap* map, VMReg name) {355if (map->legal_vm_reg_name(name)) {356map->set_oop(name);357} else {358bailout("illegal oopMap register name");359}360}361362int append_scope_value_for_constant(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values);363int append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values);364int append_scope_value(int op_id, Value value, GrowableArray<ScopeValue*>* scope_values);365366IRScopeDebugInfo* compute_debug_info_for_scope(int op_id, IRScope* cur_scope, ValueStack* cur_state, ValueStack* innermost_state);367void compute_debug_info(CodeEmitInfo* info, int op_id);368369void assign_reg_num(LIR_OpList* instructions, IntervalWalker* iw);370void assign_reg_num();371372373// Phase 8: fpu stack allocation374// (Used only on x86 when fpu operands are present)375void allocate_fpu_stack();376377378// helper functions for printing state379#ifndef PRODUCT380static void print_bitmap(BitMap& bitmap);381void print_intervals(const char* label);382void print_lir(int level, const char* label, bool hir_valid = true);383#endif384385#ifdef ASSERT386// verification functions for allocation387// (check that all intervals have a correct register and that no registers are overwritten)388void verify();389void verify_intervals();390void verify_no_oops_in_fixed_intervals();391void verify_constants();392void verify_registers();393#endif394395public:396// creation397LinearScan(IR* ir, LIRGenerator* gen, FrameMap* frame_map);398399// main entry function: perform linear scan register allocation400void do_linear_scan();401402// accessors used by Compilation403int max_spills() const { return _max_spills; }404int num_calls() const { assert(_num_calls >= 0, "not set"); return _num_calls; }405406// entry functions for printing407#ifndef PRODUCT408static void print_statistics();409static void print_timers(double total);410#endif411};412413414// Helper class for ordering moves that are inserted at the same position in the LIR415// When moves between registers are inserted, it is important that the moves are416// ordered such that no register is overwritten. So moves from register to stack417// are processed prior to moves from stack to register. When moves have circular418// dependencies, a temporary stack slot is used to break the circle.419// The same logic is used in the LinearScanWalker and in LinearScan during resolve_data_flow420// and therefore factored out in a separate class421class MoveResolver: public StackObj {422private:423LinearScan* _allocator;424425LIR_List* _insert_list;426int _insert_idx;427LIR_InsertionBuffer _insertion_buffer; // buffer where moves are inserted428429IntervalList _mapping_from;430LIR_OprList _mapping_from_opr;431IntervalList _mapping_to;432bool _multiple_reads_allowed;433int _register_blocked[LinearScan::nof_regs];434435int register_blocked(int reg) { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); return _register_blocked[reg]; }436void set_register_blocked(int reg, int direction) { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); assert(direction == 1 || direction == -1, "out of bounds"); _register_blocked[reg] += direction; }437438void block_registers(Interval* it);439void unblock_registers(Interval* it);440bool save_to_process_move(Interval* from, Interval* to);441442void create_insertion_buffer(LIR_List* list);443void append_insertion_buffer();444void insert_move(Interval* from_interval, Interval* to_interval);445void insert_move(LIR_Opr from_opr, Interval* to_interval);446447DEBUG_ONLY(void verify_before_resolve();)448void resolve_mappings();449public:450MoveResolver(LinearScan* allocator);451452DEBUG_ONLY(void check_empty();)453void set_multiple_reads_allowed() { _multiple_reads_allowed = true; }454void set_insert_position(LIR_List* insert_list, int insert_idx);455void move_insert_position(LIR_List* insert_list, int insert_idx);456void add_mapping(Interval* from, Interval* to);457void add_mapping(LIR_Opr from, Interval* to);458void resolve_and_append_moves();459460LinearScan* allocator() { return _allocator; }461bool has_mappings() { return _mapping_from.length() > 0; }462};463464465class Range : public CompilationResourceObj {466friend class Interval;467468private:469static Range* _end; // sentinel (from == to == max_jint)470471int _from; // from (inclusive)472int _to; // to (exclusive)473Range* _next; // linear list of Ranges474475// used only by class Interval, so hide them476bool intersects(Range* r) const { return intersects_at(r) != -1; }477int intersects_at(Range* r) const;478479public:480Range(int from, int to, Range* next);481482static void initialize(Arena* arena);483static Range* end() { return _end; }484485int from() const { return _from; }486int to() const { return _to; }487Range* next() const { return _next; }488void set_from(int from) { _from = from; }489void set_to(int to) { _to = to; }490void set_next(Range* next) { _next = next; }491492// for testing493void print(outputStream* out = tty) const PRODUCT_RETURN;494};495496497// Interval is an ordered list of disjoint ranges.498499// For pre-colored double word LIR_Oprs, one interval is created for500// the low word register and one is created for the hi word register.501// On Intel for FPU double registers only one interval is created. At502// all times assigned_reg contains the reg. number of the physical503// register.504505// For LIR_Opr in virtual registers a single interval can represent506// single and double word values. When a physical register is507// assigned to the interval, assigned_reg contains the508// phys. reg. number and for double word values assigned_regHi the509// phys. reg. number of the hi word if there is any. For spilled510// intervals assigned_reg contains the stack index. assigned_regHi is511// always -1.512513class Interval : public CompilationResourceObj {514private:515static Interval* _end; // sentinel (interval with only range Range::end())516517int _reg_num;518BasicType _type; // valid only for virtual registers519Range* _first; // sorted list of Ranges520intStack _use_pos_and_kinds; // sorted list of use-positions and their according use-kinds521522Range* _current; // interval iteration: the current Range523Interval* _next; // interval iteration: sorted list of Intervals (ends with sentinel)524IntervalState _state; // interval iteration: to which set belongs this interval525526527int _assigned_reg;528int _assigned_regHi;529530int _cached_to; // cached value: to of last range (-1: not cached)531LIR_Opr _cached_opr;532VMReg _cached_vm_reg;533534Interval* _split_parent; // the original interval where this interval is derived from535IntervalList _split_children; // list of all intervals that are split off from this interval (only available for split parents)536Interval* _current_split_child; // the current split child that has been active or inactive last (always stored in split parents)537538int _canonical_spill_slot; // the stack slot where all split parts of this interval are spilled to (always stored in split parents)539bool _insert_move_when_activated; // true if move is inserted between _current_split_child and this interval when interval gets active the first time540IntervalSpillState _spill_state; // for spill move optimization541int _spill_definition_pos; // position where the interval is defined (if defined only once)542Interval* _register_hint; // this interval should be in the same register as the hint interval543544int calc_to();545Interval* new_split_child();546public:547Interval(int reg_num);548549static void initialize(Arena* arena);550static Interval* end() { return _end; }551552// accessors553int reg_num() const { return _reg_num; }554void set_reg_num(int r) { assert(_reg_num == -1, "cannot change reg_num"); _reg_num = r; }555BasicType type() const { assert(_reg_num == -1 || _reg_num >= LIR_OprDesc::vreg_base, "cannot access type for fixed interval"); return _type; }556void set_type(BasicType type) { assert(_reg_num < LIR_OprDesc::vreg_base || _type == T_ILLEGAL || _type == type, "overwriting existing type"); _type = type; }557558Range* first() const { return _first; }559int from() const { return _first->from(); }560int to() { if (_cached_to == -1) _cached_to = calc_to(); assert(_cached_to == calc_to(), "invalid cached value"); return _cached_to; }561int num_use_positions() const { return _use_pos_and_kinds.length() / 2; }562563Interval* next() const { return _next; }564Interval** next_addr() { return &_next; }565void set_next(Interval* next) { _next = next; }566567int assigned_reg() const { return _assigned_reg; }568int assigned_regHi() const { return _assigned_regHi; }569void assign_reg(int reg) { _assigned_reg = reg; _assigned_regHi = LinearScan::any_reg; }570void assign_reg(int reg,int regHi) { _assigned_reg = reg; _assigned_regHi = regHi; }571572Interval* register_hint(bool search_split_child = true) const; // calculation needed573void set_register_hint(Interval* i) { _register_hint = i; }574575int state() const { return _state; }576void set_state(IntervalState s) { _state = s; }577578// access to split parent and split children579bool is_split_parent() const { return _split_parent == this; }580bool is_split_child() const { return _split_parent != this; }581Interval* split_parent() const { assert(_split_parent->is_split_parent(), "must be"); return _split_parent; }582Interval* split_child_at_op_id(int op_id, LIR_OpVisitState::OprMode mode);583Interval* split_child_before_op_id(int op_id);584bool split_child_covers(int op_id, LIR_OpVisitState::OprMode mode);585DEBUG_ONLY(void check_split_children();)586587// information stored in split parent, but available for all children588int canonical_spill_slot() const { return split_parent()->_canonical_spill_slot; }589void set_canonical_spill_slot(int slot) { assert(split_parent()->_canonical_spill_slot == -1, "overwriting existing value"); split_parent()->_canonical_spill_slot = slot; }590Interval* current_split_child() const { return split_parent()->_current_split_child; }591void make_current_split_child() { split_parent()->_current_split_child = this; }592593bool insert_move_when_activated() const { return _insert_move_when_activated; }594void set_insert_move_when_activated(bool b) { _insert_move_when_activated = b; }595596// for spill optimization597IntervalSpillState spill_state() const { return split_parent()->_spill_state; }598int spill_definition_pos() const { return split_parent()->_spill_definition_pos; }599void set_spill_state(IntervalSpillState state) { assert(state >= spill_state(), "state cannot decrease"); split_parent()->_spill_state = state; }600void set_spill_definition_pos(int pos) { assert(spill_definition_pos() == -1, "cannot set the position twice"); split_parent()->_spill_definition_pos = pos; }601// returns true if this interval has a shadow copy on the stack that is always correct602bool always_in_memory() const { return split_parent()->_spill_state == storeAtDefinition || split_parent()->_spill_state == startInMemory; }603604// caching of values that take time to compute and are used multiple times605LIR_Opr cached_opr() const { return _cached_opr; }606VMReg cached_vm_reg() const { return _cached_vm_reg; }607void set_cached_opr(LIR_Opr opr) { _cached_opr = opr; }608void set_cached_vm_reg(VMReg reg) { _cached_vm_reg = reg; }609610// access to use positions611int first_usage(IntervalUseKind min_use_kind) const; // id of the first operation requiring this interval in a register612int next_usage(IntervalUseKind min_use_kind, int from) const; // id of next usage seen from the given position613int next_usage_exact(IntervalUseKind exact_use_kind, int from) const;614int previous_usage(IntervalUseKind min_use_kind, int from) const;615616// manipulating intervals617void add_use_pos(int pos, IntervalUseKind use_kind);618void add_range(int from, int to);619Interval* split(int split_pos);620Interval* split_from_start(int split_pos);621void remove_first_use_pos() { _use_pos_and_kinds.truncate(_use_pos_and_kinds.length() - 2); }622623// test intersection624bool covers(int op_id, LIR_OpVisitState::OprMode mode) const;625bool has_hole_between(int from, int to);626bool intersects(Interval* i) const { return _first->intersects(i->_first); }627int intersects_at(Interval* i) const { return _first->intersects_at(i->_first); }628629// range iteration630void rewind_range() { _current = _first; }631void next_range() { assert(this != _end, "not allowed on sentinel"); _current = _current->next(); }632int current_from() const { return _current->from(); }633int current_to() const { return _current->to(); }634bool current_at_end() const { return _current == Range::end(); }635bool current_intersects(Interval* it) { return _current->intersects(it->_current); };636int current_intersects_at(Interval* it) { return _current->intersects_at(it->_current); };637638// printing639void print(outputStream* out = tty) const PRODUCT_RETURN;640};641642643class IntervalWalker : public CompilationResourceObj {644protected:645Compilation* _compilation;646LinearScan* _allocator;647648Interval* _unhandled_first[nofKinds]; // sorted list of intervals, not life before the current position649Interval* _active_first [nofKinds]; // sorted list of intervals, life at the current position650Interval* _inactive_first [nofKinds]; // sorted list of intervals, intervals in a life time hole at the current position651652Interval* _current; // the current interval coming from unhandled list653int _current_position; // the current position (intercept point through the intervals)654IntervalKind _current_kind; // and whether it is fixed_kind or any_kind.655656657Compilation* compilation() const { return _compilation; }658LinearScan* allocator() const { return _allocator; }659660// unified bailout support661void bailout(const char* msg) const { compilation()->bailout(msg); }662bool bailed_out() const { return compilation()->bailed_out(); }663664void check_bounds(IntervalKind kind) { assert(kind >= fixedKind && kind <= anyKind, "invalid interval_kind"); }665666Interval** unhandled_first_addr(IntervalKind kind) { check_bounds(kind); return &_unhandled_first[kind]; }667Interval** active_first_addr(IntervalKind kind) { check_bounds(kind); return &_active_first[kind]; }668Interval** inactive_first_addr(IntervalKind kind) { check_bounds(kind); return &_inactive_first[kind]; }669670void append_unsorted(Interval** first, Interval* interval);671void append_sorted(Interval** first, Interval* interval);672void append_to_unhandled(Interval** list, Interval* interval);673674bool remove_from_list(Interval** list, Interval* i);675void remove_from_list(Interval* i);676677void next_interval();678Interval* current() const { return _current; }679IntervalKind current_kind() const { return _current_kind; }680681void walk_to(IntervalState state, int from);682683// activate_current() is called when an unhandled interval becomes active (in current(), current_kind()).684// Return false if current() should not be moved the the active interval list.685// It is safe to append current to any interval list but the unhandled list.686virtual bool activate_current() { return true; }687688// interval_moved() is called whenever an interval moves from one interval list to another.689// In the implementation of this method it is prohibited to move the interval to any list.690virtual void interval_moved(Interval* interval, IntervalKind kind, IntervalState from, IntervalState to);691692public:693IntervalWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first);694695Interval* unhandled_first(IntervalKind kind) { check_bounds(kind); return _unhandled_first[kind]; }696Interval* active_first(IntervalKind kind) { check_bounds(kind); return _active_first[kind]; }697Interval* inactive_first(IntervalKind kind) { check_bounds(kind); return _inactive_first[kind]; }698699// active contains the intervals that are live after the lir_op700void walk_to(int lir_op_id);701// active contains the intervals that are live before the lir_op702void walk_before(int lir_op_id) { walk_to(lir_op_id-1); }703// walk through all intervals704void walk() { walk_to(max_jint); }705706int current_position() { return _current_position; }707};708709710// The actual linear scan register allocator711class LinearScanWalker : public IntervalWalker {712enum {713any_reg = LinearScan::any_reg714};715716private:717int _first_reg; // the reg. number of the first phys. register718int _last_reg; // the reg. nmber of the last phys. register719int _num_phys_regs; // required by current interval720bool _adjacent_regs; // have lo/hi words of phys. regs be adjacent721722int _use_pos[LinearScan::nof_regs];723int _block_pos[LinearScan::nof_regs];724IntervalList* _spill_intervals[LinearScan::nof_regs];725726MoveResolver _move_resolver; // for ordering spill moves727728// accessors mapped to same functions in class LinearScan729int block_count() const { return allocator()->block_count(); }730BlockBegin* block_at(int idx) const { return allocator()->block_at(idx); }731BlockBegin* block_of_op_with_id(int op_id) const { return allocator()->block_of_op_with_id(op_id); }732733void init_use_lists(bool only_process_use_pos);734void exclude_from_use(int reg);735void exclude_from_use(Interval* i);736void set_use_pos(int reg, Interval* i, int use_pos, bool only_process_use_pos);737void set_use_pos(Interval* i, int use_pos, bool only_process_use_pos);738void set_block_pos(int reg, Interval* i, int block_pos);739void set_block_pos(Interval* i, int block_pos);740741void free_exclude_active_fixed();742void free_exclude_active_any();743void free_collect_inactive_fixed(Interval* cur);744void free_collect_inactive_any(Interval* cur);745void free_collect_unhandled(IntervalKind kind, Interval* cur);746void spill_exclude_active_fixed();747void spill_block_unhandled_fixed(Interval* cur);748void spill_block_inactive_fixed(Interval* cur);749void spill_collect_active_any();750void spill_collect_inactive_any(Interval* cur);751752void insert_move(int op_id, Interval* src_it, Interval* dst_it);753int find_optimal_split_pos(BlockBegin* min_block, BlockBegin* max_block, int max_split_pos);754int find_optimal_split_pos(Interval* it, int min_split_pos, int max_split_pos, bool do_loop_optimization);755void split_before_usage(Interval* it, int min_split_pos, int max_split_pos);756void split_for_spilling(Interval* it);757void split_stack_interval(Interval* it);758void split_when_partial_register_available(Interval* it, int register_available_until);759void split_and_spill_interval(Interval* it);760761int find_free_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split);762int find_free_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split);763bool alloc_free_reg(Interval* cur);764765int find_locked_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split);766int find_locked_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split);767void split_and_spill_intersecting_intervals(int reg, int regHi);768void alloc_locked_reg(Interval* cur);769770bool no_allocation_possible(Interval* cur);771void update_phys_reg_range(bool requires_cpu_register);772void init_vars_for_alloc(Interval* cur);773bool pd_init_regs_for_alloc(Interval* cur);774775void combine_spilled_intervals(Interval* cur);776bool is_move(LIR_Op* op, Interval* from, Interval* to);777778bool activate_current();779780public:781LinearScanWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first);782783// must be called when all intervals are allocated784void finish_allocation() { _move_resolver.resolve_and_append_moves(); }785};786787788789/*790When a block has more than one predecessor, and all predecessors end with791the same sequence of move-instructions, than this moves can be placed once792at the beginning of the block instead of multiple times in the predecessors.793794Similarly, when a block has more than one successor, then equal sequences of795moves at the beginning of the successors can be placed once at the end of796the block. But because the moves must be inserted before all branch797instructions, this works only when there is exactly one conditional branch798at the end of the block (because the moves must be inserted before all799branches, but after all compares).800801This optimization affects all kind of moves (reg->reg, reg->stack and802stack->reg). Because this optimization works best when a block contains only803few moves, it has a huge impact on the number of blocks that are totally804empty.805*/806class EdgeMoveOptimizer : public StackObj {807private:808// the class maintains a list with all lir-instruction-list of the809// successors (predecessors) and the current index into the lir-lists810LIR_OpListStack _edge_instructions;811intStack _edge_instructions_idx;812813void init_instructions();814void append_instructions(LIR_OpList* instructions, int instructions_idx);815LIR_Op* instruction_at(int edge);816void remove_cur_instruction(int edge, bool decrement_index);817818bool operations_different(LIR_Op* op1, LIR_Op* op2);819820void optimize_moves_at_block_end(BlockBegin* cur);821void optimize_moves_at_block_begin(BlockBegin* cur);822823EdgeMoveOptimizer();824825public:826static void optimize(BlockList* code);827};828829830831class ControlFlowOptimizer : public StackObj {832private:833BlockList _original_preds;834835enum {836ShortLoopSize = 5837};838void reorder_short_loop(BlockList* code, BlockBegin* header_block, int header_idx);839void reorder_short_loops(BlockList* code);840841bool can_delete_block(BlockBegin* cur);842void substitute_branch_target(BlockBegin* cur, BlockBegin* target_from, BlockBegin* target_to);843void delete_empty_blocks(BlockList* code);844845void delete_unnecessary_jumps(BlockList* code);846void delete_jumps_to_return(BlockList* code);847848DEBUG_ONLY(void verify(BlockList* code);)849850ControlFlowOptimizer();851public:852static void optimize(BlockList* code);853};854855856#ifndef PRODUCT857858// Helper class for collecting statistics of LinearScan859class LinearScanStatistic : public StackObj {860public:861enum Counter {862// general counters863counter_method,864counter_fpu_method,865counter_loop_method,866counter_exception_method,867counter_loop,868counter_block,869counter_loop_block,870counter_exception_block,871counter_interval,872counter_fixed_interval,873counter_range,874counter_fixed_range,875counter_use_pos,876counter_fixed_use_pos,877counter_spill_slots,878blank_line_1,879880// counter for classes of lir instructions881counter_instruction,882counter_label,883counter_entry,884counter_return,885counter_call,886counter_move,887counter_cmp,888counter_cond_branch,889counter_uncond_branch,890counter_stub_branch,891counter_alu,892counter_alloc,893counter_sync,894counter_throw,895counter_unwind,896counter_typecheck,897counter_fpu_stack,898counter_misc_inst,899counter_other_inst,900blank_line_2,901902// counter for different types of moves903counter_move_total,904counter_move_reg_reg,905counter_move_reg_stack,906counter_move_stack_reg,907counter_move_stack_stack,908counter_move_reg_mem,909counter_move_mem_reg,910counter_move_const_any,911912number_of_counters,913invalid_counter = -1914};915916private:917int _counters_sum[number_of_counters];918int _counters_max[number_of_counters];919920void inc_counter(Counter idx, int value = 1) { _counters_sum[idx] += value; }921922const char* counter_name(int counter_idx);923Counter base_counter(int counter_idx);924925void sum_up(LinearScanStatistic &method_statistic);926void collect(LinearScan* allocator);927928public:929LinearScanStatistic();930void print(const char* title);931static void compute(LinearScan* allocator, LinearScanStatistic &global_statistic);932};933934935// Helper class for collecting compilation time of LinearScan936class LinearScanTimers : public StackObj {937public:938enum Timer {939timer_do_nothing,940timer_number_instructions,941timer_compute_local_live_sets,942timer_compute_global_live_sets,943timer_build_intervals,944timer_sort_intervals_before,945timer_allocate_registers,946timer_resolve_data_flow,947timer_sort_intervals_after,948timer_eliminate_spill_moves,949timer_assign_reg_num,950timer_allocate_fpu_stack,951timer_optimize_lir,952953number_of_timers954};955956private:957elapsedTimer _timers[number_of_timers];958const char* timer_name(int idx);959960public:961LinearScanTimers();962963void begin_method(); // called for each method when register allocation starts964void end_method(LinearScan* allocator); // called for each method when register allocation completed965void print(double total_time); // called before termination of VM to print global summary966967elapsedTimer* timer(int idx) { return &(_timers[idx]); }968};969970971#endif // ifndef PRODUCT972973974// Pick up platform-dependent implementation details975#ifdef TARGET_ARCH_x86976# include "c1_LinearScan_x86.hpp"977#endif978#ifdef TARGET_ARCH_aarch32979# include "c1_LinearScan_aarch32.hpp"980#endif981#ifdef TARGET_ARCH_aarch64982# include "c1_LinearScan_aarch64.hpp"983#endif984#ifdef TARGET_ARCH_sparc985# include "c1_LinearScan_sparc.hpp"986#endif987#ifdef TARGET_ARCH_arm988# include "c1_LinearScan_arm.hpp"989#endif990#ifdef TARGET_ARCH_ppc991# include "c1_LinearScan_ppc.hpp"992#endif993994995#endif // SHARE_VM_C1_C1_LINEARSCAN_HPP996997998