From 8ec4f3eb747cb48cd563b3cb7df1cc2bed96e010 Mon Sep 17 00:00:00 2001 From: iveresov Date: Fri, 3 Sep 2010 17:51:07 -0700 Subject: 6953144: Tiered compilation Summary: Infrastructure for tiered compilation support (interpreter + c1 + c2) for 32 and 64 bit. Simple tiered policy implementation. Reviewed-by: kvn, never, phh, twisti --- src/share/vm/c1/c1_Canonicalizer.cpp | 15 ++- src/share/vm/c1/c1_Canonicalizer.hpp | 10 +- src/share/vm/c1/c1_CodeStubs.hpp | 6 +- src/share/vm/c1/c1_Compilation.cpp | 13 ++- src/share/vm/c1/c1_Compilation.hpp | 26 +++++ src/share/vm/c1/c1_Compiler.hpp | 4 +- src/share/vm/c1/c1_GraphBuilder.cpp | 177 ++++++++++++++++++------------ src/share/vm/c1/c1_GraphBuilder.hpp | 32 ++---- src/share/vm/c1/c1_IR.cpp | 36 +++--- src/share/vm/c1/c1_Instruction.cpp | 8 +- src/share/vm/c1/c1_Instruction.hpp | 75 ++++++++----- src/share/vm/c1/c1_InstructionPrinter.cpp | 20 +--- src/share/vm/c1/c1_InstructionPrinter.hpp | 4 +- src/share/vm/c1/c1_LIR.cpp | 50 +++++---- src/share/vm/c1/c1_LIR.hpp | 33 ++++-- src/share/vm/c1/c1_LIRAssembler.cpp | 10 ++ src/share/vm/c1/c1_LIRAssembler.hpp | 1 + src/share/vm/c1/c1_LIRGenerator.cpp | 173 +++++++++++++++++++---------- src/share/vm/c1/c1_LIRGenerator.hpp | 32 ++++-- src/share/vm/c1/c1_Optimizer.cpp | 6 +- src/share/vm/c1/c1_Runtime1.cpp | 77 ++++++++----- src/share/vm/c1/c1_Runtime1.hpp | 4 +- src/share/vm/c1/c1_ValueMap.hpp | 4 +- src/share/vm/c1/c1_globals.hpp | 38 ++----- 24 files changed, 531 insertions(+), 323 deletions(-) (limited to 'src/share/vm/c1') diff --git a/src/share/vm/c1/c1_Canonicalizer.cpp b/src/share/vm/c1/c1_Canonicalizer.cpp index 5607fc00f..56ea0d5fa 100644 --- a/src/share/vm/c1/c1_Canonicalizer.cpp +++ b/src/share/vm/c1/c1_Canonicalizer.cpp @@ -652,10 +652,20 @@ void Canonicalizer::do_If(If* x) { else if (lss_sux == gtr_sux) { cond = If::neq; tsux = lss_sux; fsux = eql_sux; } else if (eql_sux == gtr_sux) { cond = If::geq; tsux = eql_sux; fsux = lss_sux; } else { ShouldNotReachHere(); } - If* canon = new If(cmp->x(), cond, nan_sux == tsux, cmp->y(), tsux, fsux, cmp->state_before(), x->is_safepoint()); + If* canon = new If(cmp->x(), cond, nan_sux == tsux, cmp->y(), tsux, fsux, cmp->state_before(), x->is_safepoint()); if (cmp->x() == cmp->y()) { do_If(canon); } else { + if (compilation()->profile_branches()) { + // TODO: If profiling, leave floating point comparisons unoptimized. + // We currently do not support profiling of the unordered case. + switch(cmp->op()) { + case Bytecodes::_fcmpl: case Bytecodes::_fcmpg: + case Bytecodes::_dcmpl: case Bytecodes::_dcmpg: + set_canonical(x); + return; + } + } set_canonical(canon); set_bci(cmp->bci()); } @@ -881,4 +891,5 @@ void Canonicalizer::do_UnsafePutObject(UnsafePutObject* x) {} void Canonicalizer::do_UnsafePrefetchRead (UnsafePrefetchRead* x) {} void Canonicalizer::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {} void Canonicalizer::do_ProfileCall(ProfileCall* x) {} -void Canonicalizer::do_ProfileCounter(ProfileCounter* x) {} +void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {} + diff --git a/src/share/vm/c1/c1_Canonicalizer.hpp b/src/share/vm/c1/c1_Canonicalizer.hpp index ae2530c10..a25a4bd23 100644 --- a/src/share/vm/c1/c1_Canonicalizer.hpp +++ b/src/share/vm/c1/c1_Canonicalizer.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,9 +24,11 @@ class Canonicalizer: InstructionVisitor { private: + Compilation *_compilation; Instruction* _canonical; int _bci; + Compilation *compilation() { return _compilation; } void set_canonical(Value x); void set_bci(int bci) { _bci = bci; } void set_constant(jint x) { set_canonical(new Constant(new IntConstant(x))); } @@ -43,7 +45,9 @@ class Canonicalizer: InstructionVisitor { int* scale); public: - Canonicalizer(Value x, int bci) { _canonical = x; _bci = bci; if (CanonicalizeNodes) x->visit(this); } + Canonicalizer(Compilation* c, Value x, int bci) : _compilation(c), _canonical(x), _bci(bci) { + if (CanonicalizeNodes) x->visit(this); + } Value canonical() const { return _canonical; } int bci() const { return _bci; } @@ -92,5 +96,5 @@ class Canonicalizer: InstructionVisitor { virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x); virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x); virtual void do_ProfileCall (ProfileCall* x); - virtual void do_ProfileCounter (ProfileCounter* x); + virtual void do_ProfileInvoke (ProfileInvoke* x); }; diff --git a/src/share/vm/c1/c1_CodeStubs.hpp b/src/share/vm/c1/c1_CodeStubs.hpp index 07ed4302d..d8a8ed6bc 100644 --- a/src/share/vm/c1/c1_CodeStubs.hpp +++ b/src/share/vm/c1/c1_CodeStubs.hpp @@ -80,20 +80,21 @@ class CodeStubList: public _CodeStubList { } }; -#ifdef TIERED class CounterOverflowStub: public CodeStub { private: CodeEmitInfo* _info; int _bci; + LIR_Opr _method; public: - CounterOverflowStub(CodeEmitInfo* info, int bci) : _info(info), _bci(bci) { + CounterOverflowStub(CodeEmitInfo* info, int bci, LIR_Opr method) : _info(info), _bci(bci), _method(method) { } virtual void emit_code(LIR_Assembler* e); virtual void visit(LIR_OpVisitState* visitor) { visitor->do_slow_case(_info); + visitor->do_input(_method); } #ifndef PRODUCT @@ -101,7 +102,6 @@ public: #endif // PRODUCT }; -#endif // TIERED class ConversionStub: public CodeStub { private: diff --git a/src/share/vm/c1/c1_Compilation.cpp b/src/share/vm/c1/c1_Compilation.cpp index 38235a3b3..b1dd7e201 100644 --- a/src/share/vm/c1/c1_Compilation.cpp +++ b/src/share/vm/c1/c1_Compilation.cpp @@ -290,9 +290,13 @@ int Compilation::compile_java_method() { CHECK_BAILOUT_(no_frame_size); + if (is_profiling()) { + method()->build_method_data(); + } + { PhaseTraceTime timeit(_t_buildIR); - build_hir(); + build_hir(); } if (BailoutAfterHIR) { BAILOUT_("Bailing out because of -XX:+BailoutAfterHIR", no_frame_size); @@ -447,6 +451,7 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho , _masm(NULL) , _has_exception_handlers(false) , _has_fpu_code(true) // pessimistic assumption +, _would_profile(false) , _has_unsafe_access(false) , _has_method_handle_invokes(false) , _bailout_msg(NULL) @@ -461,12 +466,16 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho #endif // PRODUCT { PhaseTraceTime timeit(_t_compile); - _arena = Thread::current()->resource_area(); _env->set_compiler_data(this); _exception_info_list = new ExceptionInfoList(); _implicit_exception_table.set_size(0); compile_method(); + if (is_profiling() && _would_profile) { + ciMethodData *md = method->method_data(); + assert (md != NULL, "Should have MDO"); + md->set_would_profile(_would_profile); + } } Compilation::~Compilation() { diff --git a/src/share/vm/c1/c1_Compilation.hpp b/src/share/vm/c1/c1_Compilation.hpp index 82c1699ea..a66db0891 100644 --- a/src/share/vm/c1/c1_Compilation.hpp +++ b/src/share/vm/c1/c1_Compilation.hpp @@ -69,6 +69,7 @@ class Compilation: public StackObj { bool _has_exception_handlers; bool _has_fpu_code; bool _has_unsafe_access; + bool _would_profile; bool _has_method_handle_invokes; // True if this method has MethodHandle invokes. const char* _bailout_msg; ExceptionInfoList* _exception_info_list; @@ -143,6 +144,7 @@ class Compilation: public StackObj { void set_has_exception_handlers(bool f) { _has_exception_handlers = f; } void set_has_fpu_code(bool f) { _has_fpu_code = f; } void set_has_unsafe_access(bool f) { _has_unsafe_access = f; } + void set_would_profile(bool f) { _would_profile = f; } // Add a set of exception handlers covering the given PC offset void add_exception_handlers_for_pco(int pco, XHandlers* exception_handlers); // Statistics gathering @@ -202,6 +204,30 @@ class Compilation: public StackObj { void compile_only_this_scope(outputStream* st, IRScope* scope); void exclude_this_method(); #endif // PRODUCT + + bool is_profiling() { + return env()->comp_level() == CompLevel_full_profile || + env()->comp_level() == CompLevel_limited_profile; + } + bool count_invocations() { return is_profiling(); } + bool count_backedges() { return is_profiling(); } + + // Helpers for generation of profile information + bool profile_branches() { + return env()->comp_level() == CompLevel_full_profile && + C1UpdateMethodData && C1ProfileBranches; + } + bool profile_calls() { + return env()->comp_level() == CompLevel_full_profile && + C1UpdateMethodData && C1ProfileCalls; + } + bool profile_inlined_calls() { + return profile_calls() && C1ProfileInlinedCalls; + } + bool profile_checkcasts() { + return env()->comp_level() == CompLevel_full_profile && + C1UpdateMethodData && C1ProfileCheckcasts; + } }; diff --git a/src/share/vm/c1/c1_Compiler.hpp b/src/share/vm/c1/c1_Compiler.hpp index a8e6eacd7..43eb204f7 100644 --- a/src/share/vm/c1/c1_Compiler.hpp +++ b/src/share/vm/c1/c1_Compiler.hpp @@ -39,9 +39,7 @@ class Compiler: public AbstractCompiler { // Name of this compiler virtual const char* name() { return "C1"; } -#ifdef TIERED - virtual bool is_c1() { return true; }; -#endif // TIERED + virtual bool is_c1() { return true; }; BufferBlob* build_buffer_blob(); diff --git a/src/share/vm/c1/c1_GraphBuilder.cpp b/src/share/vm/c1/c1_GraphBuilder.cpp index 1e80c4598..3eb3d97f5 100644 --- a/src/share/vm/c1/c1_GraphBuilder.cpp +++ b/src/share/vm/c1/c1_GraphBuilder.cpp @@ -1144,8 +1144,16 @@ void GraphBuilder::increment() { void GraphBuilder::_goto(int from_bci, int to_bci) { - profile_bci(from_bci); - append(new Goto(block_at(to_bci), to_bci <= from_bci)); + Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci); + if (is_profiling()) { + compilation()->set_would_profile(true); + } + if (profile_branches()) { + x->set_profiled_method(method()); + x->set_profiled_bci(bci()); + x->set_should_profile(true); + } + append(x); } @@ -1153,11 +1161,45 @@ void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* sta BlockBegin* tsux = block_at(stream()->get_dest()); BlockBegin* fsux = block_at(stream()->next_bci()); bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci(); - If* if_node = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb))->as_If(); - if (profile_branches() && (if_node != NULL)) { - if_node->set_profiled_method(method()); - if_node->set_profiled_bci(bci()); - if_node->set_should_profile(true); + Instruction *i = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb)); + + if (is_profiling()) { + If* if_node = i->as_If(); + if (if_node != NULL) { + // Note that we'd collect profile data in this method if we wanted it. + compilation()->set_would_profile(true); + // At level 2 we need the proper bci to count backedges + if_node->set_profiled_bci(bci()); + if (profile_branches()) { + // Successors can be rotated by the canonicalizer, check for this case. + if_node->set_profiled_method(method()); + if_node->set_should_profile(true); + if (if_node->tsux() == fsux) { + if_node->set_swapped(true); + } + } + return; + } + + // Check if this If was reduced to Goto. + Goto *goto_node = i->as_Goto(); + if (goto_node != NULL) { + compilation()->set_would_profile(true); + if (profile_branches()) { + goto_node->set_profiled_method(method()); + goto_node->set_profiled_bci(bci()); + goto_node->set_should_profile(true); + // Find out which successor is used. + if (goto_node->default_sux() == tsux) { + goto_node->set_direction(Goto::taken); + } else if (goto_node->default_sux() == fsux) { + goto_node->set_direction(Goto::not_taken); + } else { + ShouldNotReachHere(); + } + } + return; + } } } @@ -1698,8 +1740,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) { if (recv != NULL && (code == Bytecodes::_invokespecial || - !is_loaded || target->is_final() || - profile_calls())) { + !is_loaded || target->is_final())) { // invokespecial always needs a NULL check. invokevirtual where // the target is final or where it's not known that whether the // target is final requires a NULL check. Otherwise normal @@ -1709,15 +1750,23 @@ void GraphBuilder::invoke(Bytecodes::Code code) { null_check(recv); } - if (profile_calls()) { - assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set"); - ciKlass* target_klass = NULL; - if (cha_monomorphic_target != NULL) { - target_klass = cha_monomorphic_target->holder(); - } else if (exact_target != NULL) { - target_klass = exact_target->holder(); + if (is_profiling()) { + if (recv != NULL && profile_calls()) { + null_check(recv); + } + // Note that we'd collect profile data in this method if we wanted it. + compilation()->set_would_profile(true); + + if (profile_calls()) { + assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set"); + ciKlass* target_klass = NULL; + if (cha_monomorphic_target != NULL) { + target_klass = cha_monomorphic_target->holder(); + } else if (exact_target != NULL) { + target_klass = exact_target->holder(); + } + profile_call(recv, target_klass); } - profile_call(recv, target_klass); } Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before); @@ -1782,10 +1831,16 @@ void GraphBuilder::check_cast(int klass_index) { CheckCast* c = new CheckCast(klass, apop(), state_before); apush(append_split(c)); c->set_direct_compare(direct_compare(klass)); - if (profile_checkcasts()) { - c->set_profiled_method(method()); - c->set_profiled_bci(bci()); - c->set_should_profile(true); + + if (is_profiling()) { + // Note that we'd collect profile data in this method if we wanted it. + compilation()->set_would_profile(true); + + if (profile_checkcasts()) { + c->set_profiled_method(method()); + c->set_profiled_bci(bci()); + c->set_should_profile(true); + } } } @@ -1868,7 +1923,7 @@ Value GraphBuilder::round_fp(Value fp_value) { Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) { - Canonicalizer canon(instr, bci); + Canonicalizer canon(compilation(), instr, bci); Instruction* i1 = canon.canonical(); if (i1->bci() != -99) { // Canonicalizer returned an instruction which was already @@ -2651,18 +2706,6 @@ BlockBegin* GraphBuilder::header_block(BlockBegin* entry, BlockBegin::Flag f, Va h->set_depth_first_number(0); Value l = h; - if (profile_branches()) { - // Increment the invocation count on entry to the method. We - // can't use profile_invocation here because append isn't setup to - // work properly at this point. The instruction have to be - // appended to the instruction stream by hand. - Value m = new Constant(new ObjectConstant(compilation()->method())); - h->set_next(m, 0); - Value p = new ProfileCounter(m, methodOopDesc::interpreter_invocation_counter_offset_in_bytes(), 1); - m->set_next(p, 0); - l = p; - } - BlockEnd* g = new Goto(entry, false); l->set_next(g, entry->bci()); h->set_end(g); @@ -2688,10 +2731,10 @@ BlockBegin* GraphBuilder::setup_start_block(int osr_bci, BlockBegin* std_entry, // also necessary when profiling so that there's a single block that // can increment the interpreter_invocation_count. BlockBegin* new_header_block; - if (std_entry->number_of_preds() == 0 && !profile_branches()) { - new_header_block = std_entry; - } else { + if (std_entry->number_of_preds() > 0 || count_invocations() || count_backedges()) { new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state); + } else { + new_header_block = std_entry; } // setup start block (root for the IR graph) @@ -3115,16 +3158,21 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) { Values* args = state()->pop_arguments(callee->arg_size()); ValueStack* locks = lock_stack(); - if (profile_calls()) { + + if (is_profiling()) { // Don't profile in the special case where the root method // is the intrinsic if (callee != method()) { - Value recv = NULL; - if (has_receiver) { - recv = args->at(0); - null_check(recv); + // Note that we'd collect profile data in this method if we wanted it. + compilation()->set_would_profile(true); + if (profile_calls()) { + Value recv = NULL; + if (has_receiver) { + recv = args->at(0); + null_check(recv); + } + profile_call(recv, NULL); } - profile_call(recv, NULL); } } @@ -3296,7 +3344,9 @@ void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) { assert(!callee->is_native(), "callee must not be native"); - + if (count_backedges() && callee->has_loops()) { + INLINE_BAILOUT("too complex for tiered"); + } // first perform tests of things it's not possible to inline if (callee->has_exception_handlers() && !InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers"); @@ -3365,11 +3415,18 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) { null_check(recv); } - if (profile_inlined_calls()) { - profile_call(recv, holder_known ? callee->holder() : NULL); - } + if (is_profiling()) { + // Note that we'd collect profile data in this method if we wanted it. + // this may be redundant here... + compilation()->set_would_profile(true); - profile_invocation(callee); + if (profile_calls()) { + profile_call(recv, holder_known ? callee->holder() : NULL); + } + if (profile_inlined_calls()) { + profile_invocation(callee, state(), 0); + } + } // Introduce a new callee continuation point - if the callee has // more than one return instruction or the return does not allow @@ -3755,30 +3812,10 @@ void GraphBuilder::print_stats() { } #endif // PRODUCT - void GraphBuilder::profile_call(Value recv, ciKlass* known_holder) { append(new ProfileCall(method(), bci(), recv, known_holder)); } - -void GraphBuilder::profile_invocation(ciMethod* callee) { - if (profile_calls()) { - // increment the interpreter_invocation_count for the inlinee - Value m = append(new Constant(new ObjectConstant(callee))); - append(new ProfileCounter(m, methodOopDesc::interpreter_invocation_counter_offset_in_bytes(), 1)); - } -} - - -void GraphBuilder::profile_bci(int bci) { - if (profile_branches()) { - ciMethodData* md = method()->method_data(); - if (md == NULL) { - BAILOUT("out of memory building methodDataOop"); - } - ciProfileData* data = md->bci_to_data(bci); - assert(data != NULL && data->is_JumpData(), "need JumpData for goto"); - Value mdo = append(new Constant(new ObjectConstant(md))); - append(new ProfileCounter(mdo, md->byte_offset_of_slot(data, JumpData::taken_offset()), 1)); - } +void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state, int bci) { + append(new ProfileInvoke(callee, state, bci)); } diff --git a/src/share/vm/c1/c1_GraphBuilder.hpp b/src/share/vm/c1/c1_GraphBuilder.hpp index 4ce9dd2bd..1a6c6f28d 100644 --- a/src/share/vm/c1/c1_GraphBuilder.hpp +++ b/src/share/vm/c1/c1_GraphBuilder.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -342,27 +342,17 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC { NOT_PRODUCT(void print_inline_result(ciMethod* callee, bool res);) - // methodDataOop profiling helpers void profile_call(Value recv, ciKlass* predicted_holder); - void profile_invocation(ciMethod* method); - void profile_bci(int bci); - - // Helpers for generation of profile information - bool profile_branches() { - return _compilation->env()->comp_level() == CompLevel_fast_compile && - Tier1UpdateMethodData && Tier1ProfileBranches; - } - bool profile_calls() { - return _compilation->env()->comp_level() == CompLevel_fast_compile && - Tier1UpdateMethodData && Tier1ProfileCalls; - } - bool profile_inlined_calls() { - return profile_calls() && Tier1ProfileInlinedCalls; - } - bool profile_checkcasts() { - return _compilation->env()->comp_level() == CompLevel_fast_compile && - Tier1UpdateMethodData && Tier1ProfileCheckcasts; - } + void profile_invocation(ciMethod* inlinee, ValueStack* state, int bci); + + // Shortcuts to profiling control. + bool is_profiling() { return _compilation->is_profiling(); } + bool count_invocations() { return _compilation->count_invocations(); } + bool count_backedges() { return _compilation->count_backedges(); } + bool profile_branches() { return _compilation->profile_branches(); } + bool profile_calls() { return _compilation->profile_calls(); } + bool profile_inlined_calls() { return _compilation->profile_inlined_calls(); } + bool profile_checkcasts() { return _compilation->profile_checkcasts(); } public: NOT_PRODUCT(void print_stats();) diff --git a/src/share/vm/c1/c1_IR.cpp b/src/share/vm/c1/c1_IR.cpp index 4df75f40f..cb5e2098e 100644 --- a/src/share/vm/c1/c1_IR.cpp +++ b/src/share/vm/c1/c1_IR.cpp @@ -296,19 +296,21 @@ IR::IR(Compilation* compilation, ciMethod* method, int osr_bci) : void IR::optimize() { Optimizer opt(this); - if (DoCEE) { - opt.eliminate_conditional_expressions(); + if (!compilation()->profile_branches()) { + if (DoCEE) { + opt.eliminate_conditional_expressions(); #ifndef PRODUCT - if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after CEE"); print(true); } - if (PrintIR || PrintIR1 ) { tty->print_cr("IR after CEE"); print(false); } + if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after CEE"); print(true); } + if (PrintIR || PrintIR1 ) { tty->print_cr("IR after CEE"); print(false); } #endif - } - if (EliminateBlocks) { - opt.eliminate_blocks(); + } + if (EliminateBlocks) { + opt.eliminate_blocks(); #ifndef PRODUCT - if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after block elimination"); print(true); } - if (PrintIR || PrintIR1 ) { tty->print_cr("IR after block elimination"); print(false); } + if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after block elimination"); print(true); } + if (PrintIR || PrintIR1 ) { tty->print_cr("IR after block elimination"); print(false); } #endif + } } if (EliminateNullChecks) { opt.eliminate_null_checks(); @@ -484,6 +486,8 @@ class ComputeLinearScanOrder : public StackObj { BitMap2D _loop_map; // two-dimensional bit set: a bit is set if a block is contained in a loop BlockList _work_list; // temporary list (used in mark_loops and compute_order) + Compilation* _compilation; + // accessors for _visited_blocks and _active_blocks void init_visited() { _active_blocks.clear(); _visited_blocks.clear(); } bool is_visited(BlockBegin* b) const { return _visited_blocks.at(b->block_id()); } @@ -526,8 +530,9 @@ class ComputeLinearScanOrder : public StackObj { NOT_PRODUCT(void print_blocks();) DEBUG_ONLY(void verify();) + Compilation* compilation() const { return _compilation; } public: - ComputeLinearScanOrder(BlockBegin* start_block); + ComputeLinearScanOrder(Compilation* c, BlockBegin* start_block); // accessors for final result BlockList* linear_scan_order() const { return _linear_scan_order; } @@ -535,7 +540,7 @@ class ComputeLinearScanOrder : public StackObj { }; -ComputeLinearScanOrder::ComputeLinearScanOrder(BlockBegin* start_block) : +ComputeLinearScanOrder::ComputeLinearScanOrder(Compilation* c, BlockBegin* start_block) : _max_block_id(BlockBegin::number_of_blocks()), _num_blocks(0), _num_loops(0), @@ -547,13 +552,18 @@ ComputeLinearScanOrder::ComputeLinearScanOrder(BlockBegin* start_block) : _loop_end_blocks(8), _work_list(8), _linear_scan_order(NULL), // initialized later with correct size - _loop_map(0, 0) // initialized later with correct size + _loop_map(0, 0), // initialized later with correct size + _compilation(c) { TRACE_LINEAR_SCAN(2, "***** computing linear-scan block order"); init_visited(); count_edges(start_block, NULL); + if (compilation()->is_profiling()) { + compilation()->method()->method_data()->set_compilation_stats(_num_loops, _num_blocks); + } + if (_num_loops > 0) { mark_loops(); clear_non_natural_loops(start_block); @@ -1130,7 +1140,7 @@ void ComputeLinearScanOrder::verify() { void IR::compute_code() { assert(is_valid(), "IR must be valid"); - ComputeLinearScanOrder compute_order(start()); + ComputeLinearScanOrder compute_order(compilation(), start()); _num_loops = compute_order.num_loops(); _code = compute_order.linear_scan_order(); } diff --git a/src/share/vm/c1/c1_Instruction.cpp b/src/share/vm/c1/c1_Instruction.cpp index 018047a35..e0728b2f3 100644 --- a/src/share/vm/c1/c1_Instruction.cpp +++ b/src/share/vm/c1/c1_Instruction.cpp @@ -740,9 +740,9 @@ void BlockBegin::block_values_do(ValueVisitor* f) { #ifndef PRODUCT - #define TRACE_PHI(code) if (PrintPhiFunctions) { code; } + #define TRACE_PHI(code) if (PrintPhiFunctions) { code; } #else - #define TRACE_PHI(coce) + #define TRACE_PHI(coce) #endif @@ -1011,3 +1011,7 @@ int Phi::operand_count() const { void Throw::state_values_do(ValueVisitor* f) { BlockEnd::state_values_do(f); } + +void ProfileInvoke::state_values_do(ValueVisitor* f) { + if (state() != NULL) state()->values_do(f); +} diff --git a/src/share/vm/c1/c1_Instruction.hpp b/src/share/vm/c1/c1_Instruction.hpp index 98e9d41bc..8b310c015 100644 --- a/src/share/vm/c1/c1_Instruction.hpp +++ b/src/share/vm/c1/c1_Instruction.hpp @@ -98,7 +98,7 @@ class UnsafePrefetch; class UnsafePrefetchRead; class UnsafePrefetchWrite; class ProfileCall; -class ProfileCounter; +class ProfileInvoke; // A Value is a reference to the instruction creating the value typedef Instruction* Value; @@ -195,7 +195,7 @@ class InstructionVisitor: public StackObj { virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x) = 0; virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) = 0; virtual void do_ProfileCall (ProfileCall* x) = 0; - virtual void do_ProfileCounter (ProfileCounter* x) = 0; + virtual void do_ProfileInvoke (ProfileInvoke* x) = 0; }; @@ -1733,20 +1733,45 @@ BASE(BlockEnd, StateSplit) LEAF(Goto, BlockEnd) + public: + enum Direction { + none, // Just a regular goto + taken, not_taken // Goto produced from If + }; + private: + ciMethod* _profiled_method; + int _profiled_bci; + Direction _direction; public: // creation - Goto(BlockBegin* sux, ValueStack* state_before, bool is_safepoint = false) : BlockEnd(illegalType, state_before, is_safepoint) { + Goto(BlockBegin* sux, ValueStack* state_before, bool is_safepoint = false) + : BlockEnd(illegalType, state_before, is_safepoint) + , _direction(none) + , _profiled_method(NULL) + , _profiled_bci(0) { BlockList* s = new BlockList(1); s->append(sux); set_sux(s); } - Goto(BlockBegin* sux, bool is_safepoint) : BlockEnd(illegalType, NULL, is_safepoint) { + Goto(BlockBegin* sux, bool is_safepoint) : BlockEnd(illegalType, NULL, is_safepoint) + , _direction(none) + , _profiled_method(NULL) + , _profiled_bci(0) { BlockList* s = new BlockList(1); s->append(sux); set_sux(s); } + bool should_profile() const { return check_flag(ProfileMDOFlag); } + ciMethod* profiled_method() const { return _profiled_method; } // set only for profiled branches + int profiled_bci() const { return _profiled_bci; } + Direction direction() const { return _direction; } + + void set_should_profile(bool value) { set_flag(ProfileMDOFlag, value); } + void set_profiled_method(ciMethod* method) { _profiled_method = method; } + void set_profiled_bci(int bci) { _profiled_bci = bci; } + void set_direction(Direction d) { _direction = d; } }; @@ -1757,6 +1782,8 @@ LEAF(If, BlockEnd) Value _y; ciMethod* _profiled_method; int _profiled_bci; // Canonicalizer may alter bci of If node + bool _swapped; // Is the order reversed with respect to the original If in the + // bytecode stream? public: // creation // unordered_is_true is valid for float/double compares only @@ -1767,6 +1794,7 @@ LEAF(If, BlockEnd) , _y(y) , _profiled_method(NULL) , _profiled_bci(0) + , _swapped(false) { ASSERT_VALUES set_flag(UnorderedIsTrueFlag, unordered_is_true); @@ -1788,7 +1816,8 @@ LEAF(If, BlockEnd) BlockBegin* usux() const { return sux_for(unordered_is_true()); } bool should_profile() const { return check_flag(ProfileMDOFlag); } ciMethod* profiled_method() const { return _profiled_method; } // set only for profiled branches - int profiled_bci() const { return _profiled_bci; } // set only for profiled branches + int profiled_bci() const { return _profiled_bci; } // set for profiled branches and tiered + bool is_swapped() const { return _swapped; } // manipulation void swap_operands() { @@ -1807,7 +1836,7 @@ LEAF(If, BlockEnd) void set_should_profile(bool value) { set_flag(ProfileMDOFlag, value); } void set_profiled_method(ciMethod* method) { _profiled_method = method; } void set_profiled_bci(int bci) { _profiled_bci = bci; } - + void set_swapped(bool value) { _swapped = value; } // generic virtual void input_values_do(ValueVisitor* f) { BlockEnd::input_values_do(f); f->visit(&_x); f->visit(&_y); } }; @@ -2235,7 +2264,6 @@ LEAF(UnsafePrefetchWrite, UnsafePrefetch) } }; - LEAF(ProfileCall, Instruction) private: ciMethod* _method; @@ -2263,35 +2291,32 @@ LEAF(ProfileCall, Instruction) virtual void input_values_do(ValueVisitor* f) { if (_recv != NULL) f->visit(&_recv); } }; +// Use to trip invocation counter of an inlined method -// -// Simple node representing a counter update generally used for updating MDOs -// -LEAF(ProfileCounter, Instruction) +LEAF(ProfileInvoke, Instruction) private: - Value _mdo; - int _offset; - int _increment; + ciMethod* _inlinee; + ValueStack* _state; + int _bci_of_invoke; public: - ProfileCounter(Value mdo, int offset, int increment = 1) + ProfileInvoke(ciMethod* inlinee, ValueStack* state, int bci) : Instruction(voidType) - , _mdo(mdo) - , _offset(offset) - , _increment(increment) + , _inlinee(inlinee) + , _bci_of_invoke(bci) + , _state(state) { - // The ProfileCounter has side-effects and must occur precisely where located + // The ProfileInvoke has side-effects and must occur precisely where located QQQ??? pin(); } - Value mdo() { return _mdo; } - int offset() { return _offset; } - int increment() { return _increment; } - - virtual void input_values_do(ValueVisitor* f) { f->visit(&_mdo); } + ciMethod* inlinee() { return _inlinee; } + ValueStack* state() { return _state; } + int bci_of_invoke() { return _bci_of_invoke; } + virtual void input_values_do(ValueVisitor*) {} + virtual void state_values_do(ValueVisitor*); }; - class BlockPair: public CompilationResourceObj { private: BlockBegin* _from; diff --git a/src/share/vm/c1/c1_InstructionPrinter.cpp b/src/share/vm/c1/c1_InstructionPrinter.cpp index fa473b161..84e7b6fb8 100644 --- a/src/share/vm/c1/c1_InstructionPrinter.cpp +++ b/src/share/vm/c1/c1_InstructionPrinter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -819,7 +819,6 @@ void InstructionPrinter::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { output()->put(')'); } - void InstructionPrinter::do_ProfileCall(ProfileCall* x) { output()->print("profile "); print_value(x->recv()); @@ -831,20 +830,11 @@ void InstructionPrinter::do_ProfileCall(ProfileCall* x) { output()->put(')'); } +void InstructionPrinter::do_ProfileInvoke(ProfileInvoke* x) { + output()->print("profile_invoke "); + output()->print(" %s.%s", x->inlinee()->holder()->name()->as_utf8(), x->inlinee()->name()->as_utf8()); + output()->put(')'); -void InstructionPrinter::do_ProfileCounter(ProfileCounter* x) { - - ObjectConstant* oc = x->mdo()->type()->as_ObjectConstant(); - if (oc != NULL && oc->value()->is_method() && - x->offset() == methodOopDesc::interpreter_invocation_counter_offset_in_bytes()) { - print_value(x->mdo()); - output()->print(".interpreter_invocation_count += %d", x->increment()); - } else { - output()->print("counter ["); - print_value(x->mdo()); - output()->print(" + %d] += %d", x->offset(), x->increment()); - } } - #endif // PRODUCT diff --git a/src/share/vm/c1/c1_InstructionPrinter.hpp b/src/share/vm/c1/c1_InstructionPrinter.hpp index 7599abc07..340c16237 100644 --- a/src/share/vm/c1/c1_InstructionPrinter.hpp +++ b/src/share/vm/c1/c1_InstructionPrinter.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -123,6 +123,6 @@ class InstructionPrinter: public InstructionVisitor { virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x); virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x); virtual void do_ProfileCall (ProfileCall* x); - virtual void do_ProfileCounter (ProfileCounter* x); + virtual void do_ProfileInvoke (ProfileInvoke* x); }; #endif // PRODUCT diff --git a/src/share/vm/c1/c1_LIR.cpp b/src/share/vm/c1/c1_LIR.cpp index a55ed09fc..fd9c11bd9 100644 --- a/src/share/vm/c1/c1_LIR.cpp +++ b/src/share/vm/c1/c1_LIR.cpp @@ -345,9 +345,8 @@ void LIR_OpBranch::negate_cond() { LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, - CodeStub* stub, - ciMethod* profiled_method, - int profiled_bci) + CodeStub* stub) + : LIR_Op(code, result, NULL) , _object(object) , _array(LIR_OprFact::illegalOpr) @@ -359,8 +358,10 @@ LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, , _stub(stub) , _info_for_patch(info_for_patch) , _info_for_exception(info_for_exception) - , _profiled_method(profiled_method) - , _profiled_bci(profiled_bci) { + , _profiled_method(NULL) + , _profiled_bci(-1) + , _should_profile(false) +{ if (code == lir_checkcast) { assert(info_for_exception != NULL, "checkcast throws exceptions"); } else if (code == lir_instanceof) { @@ -372,7 +373,7 @@ LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, -LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci) +LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception) : LIR_Op(code, LIR_OprFact::illegalOpr, NULL) , _object(object) , _array(array) @@ -384,8 +385,10 @@ LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array, L , _stub(NULL) , _info_for_patch(NULL) , _info_for_exception(info_for_exception) - , _profiled_method(profiled_method) - , _profiled_bci(profiled_bci) { + , _profiled_method(NULL) + , _profiled_bci(-1) + , _should_profile(false) +{ if (code == lir_store_check) { _stub = new ArrayStoreExceptionStub(info_for_exception); assert(info_for_exception != NULL, "store_check throws exceptions"); @@ -495,6 +498,8 @@ void LIR_OpVisitState::visit(LIR_Op* op) { case lir_monaddr: // input and result always valid, info always invalid case lir_null_check: // input and info always valid, result always invalid case lir_move: // input and result always valid, may have info + case lir_pack64: // input and result always valid + case lir_unpack64: // input and result always valid case lir_prefetchr: // input always valid, result and info always invalid case lir_prefetchw: // input always valid, result and info always invalid { @@ -903,7 +908,6 @@ void LIR_OpVisitState::visit(LIR_Op* op) { assert(opProfileCall->_tmp1->is_valid(), "used"); do_temp(opProfileCall->_tmp1); break; } - default: ShouldNotReachHere(); } @@ -1015,7 +1019,11 @@ void LIR_OpAllocArray::emit_code(LIR_Assembler* masm) { } void LIR_OpTypeCheck::emit_code(LIR_Assembler* masm) { - masm->emit_opTypeCheck(this); + if (code() == lir_checkcast) { + masm->emit_checkcast(this); + } else { + masm->emit_opTypeCheck(this); + } if (stub()) { masm->emit_code_stub(stub()); } @@ -1041,12 +1049,10 @@ void LIR_OpDelay::emit_code(LIR_Assembler* masm) { masm->emit_delay(this); } - void LIR_OpProfileCall::emit_code(LIR_Assembler* masm) { masm->emit_profile_call(this); } - // LIR_List LIR_List::LIR_List(Compilation* compilation, BlockBegin* block) : _operations(8) @@ -1364,19 +1370,23 @@ void LIR_List::checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub, ciMethod* profiled_method, int profiled_bci) { - append(new LIR_OpTypeCheck(lir_checkcast, result, object, klass, - tmp1, tmp2, tmp3, fast_check, info_for_exception, info_for_patch, stub, - profiled_method, profiled_bci)); + LIR_OpTypeCheck* c = new LIR_OpTypeCheck(lir_checkcast, result, object, klass, + tmp1, tmp2, tmp3, fast_check, info_for_exception, info_for_patch, stub); + if (profiled_method != NULL) { + c->set_profiled_method(profiled_method); + c->set_profiled_bci(profiled_bci); + c->set_should_profile(true); + } + append(c); } - void LIR_List::instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch) { - append(new LIR_OpTypeCheck(lir_instanceof, result, object, klass, tmp1, tmp2, tmp3, fast_check, NULL, info_for_patch, NULL, NULL, 0)); + append(new LIR_OpTypeCheck(lir_instanceof, result, object, klass, tmp1, tmp2, tmp3, fast_check, NULL, info_for_patch, NULL)); } void LIR_List::store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception) { - append(new LIR_OpTypeCheck(lir_store_check, object, array, tmp1, tmp2, tmp3, info_for_exception, NULL, 0)); + append(new LIR_OpTypeCheck(lir_store_check, object, array, tmp1, tmp2, tmp3, info_for_exception)); } @@ -1611,6 +1621,8 @@ const char * LIR_Op::name() const { case lir_convert: s = "convert"; break; case lir_alloc_object: s = "alloc_obj"; break; case lir_monaddr: s = "mon_addr"; break; + case lir_pack64: s = "pack64"; break; + case lir_unpack64: s = "unpack64"; break; // LIR_Op2 case lir_cmp: s = "cmp"; break; case lir_cmp_l2i: s = "cmp_l2i"; break; @@ -1664,7 +1676,6 @@ const char * LIR_Op::name() const { case lir_cas_int: s = "cas_int"; break; // LIR_OpProfileCall case lir_profile_call: s = "profile_call"; break; - case lir_none: ShouldNotReachHere();break; default: s = "illegal_op"; break; } @@ -1922,7 +1933,6 @@ void LIR_OpProfileCall::print_instr(outputStream* out) const { tmp1()->print(out); out->print(" "); } - #endif // PRODUCT // Implementation of LIR_InsertionBuffer diff --git a/src/share/vm/c1/c1_LIR.hpp b/src/share/vm/c1/c1_LIR.hpp index 5c7dc4feb..1e4699fef 100644 --- a/src/share/vm/c1/c1_LIR.hpp +++ b/src/share/vm/c1/c1_LIR.hpp @@ -849,6 +849,8 @@ enum LIR_Code { , lir_monaddr , lir_roundfp , lir_safepoint + , lir_pack64 + , lir_unpack64 , lir_unwind , end_op1 , begin_op2 @@ -1464,18 +1466,16 @@ class LIR_OpTypeCheck: public LIR_Op { CodeEmitInfo* _info_for_patch; CodeEmitInfo* _info_for_exception; CodeStub* _stub; - // Helpers for Tier1UpdateMethodData ciMethod* _profiled_method; int _profiled_bci; + bool _should_profile; public: LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, - CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub, - ciMethod* profiled_method, int profiled_bci); + CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub); LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array, - LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, - ciMethod* profiled_method, int profiled_bci); + LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception); LIR_Opr object() const { return _object; } LIR_Opr array() const { assert(code() == lir_store_check, "not valid"); return _array; } @@ -1489,8 +1489,12 @@ public: CodeStub* stub() const { return _stub; } // methodDataOop profiling - ciMethod* profiled_method() { return _profiled_method; } - int profiled_bci() { return _profiled_bci; } + void set_profiled_method(ciMethod *method) { _profiled_method = method; } + void set_profiled_bci(int bci) { _profiled_bci = bci; } + void set_should_profile(bool b) { _should_profile = b; } + ciMethod* profiled_method() const { return _profiled_method; } + int profiled_bci() const { return _profiled_bci; } + bool should_profile() const { return _should_profile; } virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; } @@ -1771,7 +1775,6 @@ class LIR_OpProfileCall : public LIR_Op { virtual void print_instr(outputStream* out) const PRODUCT_RETURN; }; - class LIR_InsertionBuffer; //--------------------------------LIR_List--------------------------------------------------- @@ -1835,6 +1838,7 @@ class LIR_List: public CompilationResourceObj { //---------- mutators --------------- void insert_before(int i, LIR_List* op_list) { _operations.insert_before(i, op_list->instructions_list()); } void insert_before(int i, LIR_Op* op) { _operations.insert_before(i, op); } + void remove_at(int i) { _operations.remove_at(i); } //---------- printing ------------- void print_instructions() PRODUCT_RETURN; @@ -1908,6 +1912,9 @@ class LIR_List: public CompilationResourceObj { void logical_or (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or, left, right, dst)); } void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor, left, right, dst)); } + void pack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_pack64, src, dst, T_LONG, lir_patch_none, NULL)); } + void unpack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_unpack64, src, dst, T_LONG, lir_patch_none, NULL)); } + void null_check(LIR_Opr opr, CodeEmitInfo* info) { append(new LIR_Op1(lir_null_check, opr, info)); } void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); @@ -2034,15 +2041,17 @@ class LIR_List: public CompilationResourceObj { void fpop_raw() { append(new LIR_Op0(lir_fpop_raw)); } + void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch); + void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception); + void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub, ciMethod* profiled_method, int profiled_bci); - void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch); - void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception); - // methodDataOop profiling - void profile_call(ciMethod* method, int bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) { append(new LIR_OpProfileCall(lir_profile_call, method, bci, mdo, recv, t1, cha_klass)); } + void profile_call(ciMethod* method, int bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) { + append(new LIR_OpProfileCall(lir_profile_call, method, bci, mdo, recv, t1, cha_klass)); + } }; void print_LIR(BlockList* blocks); diff --git a/src/share/vm/c1/c1_LIRAssembler.cpp b/src/share/vm/c1/c1_LIRAssembler.cpp index ee3c5ea8c..de2a1a9f2 100644 --- a/src/share/vm/c1/c1_LIRAssembler.cpp +++ b/src/share/vm/c1/c1_LIRAssembler.cpp @@ -548,6 +548,16 @@ void LIR_Assembler::emit_op1(LIR_Op1* op) { monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr()); break; +#ifdef SPARC + case lir_pack64: + pack64(op->in_opr(), op->result_opr()); + break; + + case lir_unpack64: + unpack64(op->in_opr(), op->result_opr()); + break; +#endif + case lir_unwind: unwind_op(op->in_opr()); break; diff --git a/src/share/vm/c1/c1_LIRAssembler.hpp b/src/share/vm/c1/c1_LIRAssembler.hpp index e40ebd51d..e2210a6cf 100644 --- a/src/share/vm/c1/c1_LIRAssembler.hpp +++ b/src/share/vm/c1/c1_LIRAssembler.hpp @@ -187,6 +187,7 @@ class LIR_Assembler: public CompilationResourceObj { void emit_alloc_obj(LIR_OpAllocObj* op); void emit_alloc_array(LIR_OpAllocArray* op); void emit_opTypeCheck(LIR_OpTypeCheck* op); + void emit_checkcast(LIR_OpTypeCheck* op); void emit_compare_and_swap(LIR_OpCompareAndSwap* op); void emit_lock(LIR_OpLock* op); void emit_call(LIR_OpJavaCall* op); diff --git a/src/share/vm/c1/c1_LIRGenerator.cpp b/src/share/vm/c1/c1_LIRGenerator.cpp index 4e7605f26..e519cfcfb 100644 --- a/src/share/vm/c1/c1_LIRGenerator.cpp +++ b/src/share/vm/c1/c1_LIRGenerator.cpp @@ -480,16 +480,6 @@ void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result } -// increment a counter returning the incremented value -LIR_Opr LIRGenerator::increment_and_return_counter(LIR_Opr base, int offset, int increment) { - LIR_Address* counter = new LIR_Address(base, offset, T_INT); - LIR_Opr result = new_register(T_INT); - __ load(counter, result); - __ add(result, LIR_OprFact::intConst(increment), result); - __ store(result, counter); - return result; -} - void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) { LIR_Opr result_op = result; @@ -821,7 +811,6 @@ LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) { return tmp; } - void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) { if (if_instr->should_profile()) { ciMethod* method = if_instr->profiled_method(); @@ -836,24 +825,32 @@ void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) { assert(data->is_BranchData(), "need BranchData for two-way branches"); int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset()); int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset()); + if (if_instr->is_swapped()) { + int t = taken_count_offset; + taken_count_offset = not_taken_count_offset; + not_taken_count_offset = t; + } + LIR_Opr md_reg = new_register(T_OBJECT); - __ move(LIR_OprFact::oopConst(md->constant_encoding()), md_reg); - LIR_Opr data_offset_reg = new_register(T_INT); + __ oop2reg(md->constant_encoding(), md_reg); + + LIR_Opr data_offset_reg = new_pointer_register(); __ cmove(lir_cond(cond), - LIR_OprFact::intConst(taken_count_offset), - LIR_OprFact::intConst(not_taken_count_offset), + LIR_OprFact::intptrConst(taken_count_offset), + LIR_OprFact::intptrConst(not_taken_count_offset), data_offset_reg); - LIR_Opr data_reg = new_register(T_INT); - LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, T_INT); + + // MDO cells are intptr_t, so the data_reg width is arch-dependent. + LIR_Opr data_reg = new_pointer_register(); + LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type()); __ move(LIR_OprFact::address(data_addr), data_reg); - LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT); // Use leal instead of add to avoid destroying condition codes on x86 + LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT); __ leal(LIR_OprFact::address(fake_incr_value), data_reg); __ move(data_reg, LIR_OprFact::address(data_addr)); } } - // Phi technique: // This is about passing live values from one basic block to the other. // In code generated with Java it is rather rare that more than one @@ -1305,8 +1302,6 @@ void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patc LIR_Opr flag_val = new_register(T_INT); __ load(mark_active_flag_addr, flag_val); - LabelObj* start_store = new LabelObj(); - LIR_PatchCode pre_val_patch_code = patch ? lir_patch_normal : lir_patch_none; @@ -1757,7 +1752,7 @@ void LIRGenerator::do_Throw(Throw* x) { #ifndef PRODUCT if (PrintC1Statistics) { - increment_counter(Runtime1::throw_count_address()); + increment_counter(Runtime1::throw_count_address(), T_INT); } #endif @@ -2191,12 +2186,41 @@ void LIRGenerator::do_Goto(Goto* x) { ValueStack* state = x->state_before() ? x->state_before() : x->state(); // increment backedge counter if needed - increment_backedge_counter(state_for(x, state)); - + CodeEmitInfo* info = state_for(x, state); + increment_backedge_counter(info, info->bci()); CodeEmitInfo* safepoint_info = state_for(x, state); __ safepoint(safepoint_poll_register(), safepoint_info); } + // Gotos can be folded Ifs, handle this case. + if (x->should_profile()) { + ciMethod* method = x->profiled_method(); + assert(method != NULL, "method should be set if branch is profiled"); + ciMethodData* md = method->method_data(); + if (md == NULL) { + bailout("out of memory building methodDataOop"); + return; + } + ciProfileData* data = md->bci_to_data(x->profiled_bci()); + assert(data != NULL, "must have profiling data"); + int offset; + if (x->direction() == Goto::taken) { + assert(data->is_BranchData(), "need BranchData for two-way branches"); + offset = md->byte_offset_of_slot(data, BranchData::taken_offset()); + } else if (x->direction() == Goto::not_taken) { + assert(data->is_BranchData(), "need BranchData for two-way branches"); + offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset()); + } else { + assert(data->is_JumpData(), "need JumpData for branches"); + offset = md->byte_offset_of_slot(data, JumpData::taken_offset()); + } + LIR_Opr md_reg = new_register(T_OBJECT); + __ oop2reg(md->constant_encoding(), md_reg); + + increment_counter(new LIR_Address(md_reg, offset, + NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment); + } + // emit phi-instruction move after safepoint since this simplifies // describing the state as the safepoint. move_to_phi(x->state()); @@ -2279,7 +2303,10 @@ void LIRGenerator::do_Base(Base* x) { } // increment invocation counters if needed - increment_invocation_counter(new CodeEmitInfo(0, scope()->start()->state(), NULL)); + if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting. + CodeEmitInfo* info = new CodeEmitInfo(InvocationEntryBci, scope()->start()->state(), NULL); + increment_invocation_counter(info); + } // all blocks with a successor must end with an unconditional jump // to the successor even if they are consecutive @@ -2613,12 +2640,12 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) { } } - void LIRGenerator::do_ProfileCall(ProfileCall* x) { // Need recv in a temporary register so it interferes with the other temporaries LIR_Opr recv = LIR_OprFact::illegalOpr; LIR_Opr mdo = new_register(T_OBJECT); - LIR_Opr tmp = new_register(T_INT); + // tmp is used to hold the counters on SPARC + LIR_Opr tmp = new_pointer_register(); if (x->recv() != NULL) { LIRItem value(x->recv(), this); value.load_item(); @@ -2628,14 +2655,69 @@ void LIRGenerator::do_ProfileCall(ProfileCall* x) { __ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder()); } - -void LIRGenerator::do_ProfileCounter(ProfileCounter* x) { - LIRItem mdo(x->mdo(), this); - mdo.load_item(); - - increment_counter(new LIR_Address(mdo.result(), x->offset(), T_INT), x->increment()); +void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) { + // We can safely ignore accessors here, since c2 will inline them anyway, + // accessors are also always mature. + if (!x->inlinee()->is_accessor()) { + CodeEmitInfo* info = state_for(x, x->state(), true); + // Increment invocation counter, don't notify the runtime, because we don't inline loops, + increment_event_counter_impl(info, x->inlinee(), 0, InvocationEntryBci, false, false); + } } +void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) { + int freq_log; + int level = compilation()->env()->comp_level(); + if (level == CompLevel_limited_profile) { + freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog); + } else if (level == CompLevel_full_profile) { + freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog); + } else { + ShouldNotReachHere(); + } + // Increment the appropriate invocation/backedge counter and notify the runtime. + increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true); +} + +void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info, + ciMethod *method, int frequency, + int bci, bool backedge, bool notify) { + assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0"); + int level = _compilation->env()->comp_level(); + assert(level > CompLevel_simple, "Shouldn't be here"); + + int offset = -1; + LIR_Opr counter_holder = new_register(T_OBJECT); + LIR_Opr meth; + if (level == CompLevel_limited_profile) { + offset = in_bytes(backedge ? methodOopDesc::backedge_counter_offset() : + methodOopDesc::invocation_counter_offset()); + __ oop2reg(method->constant_encoding(), counter_holder); + meth = counter_holder; + } else if (level == CompLevel_full_profile) { + offset = in_bytes(backedge ? methodDataOopDesc::backedge_counter_offset() : + methodDataOopDesc::invocation_counter_offset()); + __ oop2reg(method->method_data()->constant_encoding(), counter_holder); + meth = new_register(T_OBJECT); + __ oop2reg(method->constant_encoding(), meth); + } else { + ShouldNotReachHere(); + } + LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT); + LIR_Opr result = new_register(T_INT); + __ load(counter, result); + __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result); + __ store(result, counter); + if (notify) { + LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT); + __ logical_and(result, mask, result); + __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0)); + // The bci for info can point to cmp for if's we want the if bci + CodeStub* overflow = new CounterOverflowStub(info, bci, meth); + __ branch(lir_cond_equal, T_INT, overflow); + __ branch_destination(overflow->continuation()); + } +} LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) { LIRItemList args(1); @@ -2748,28 +2830,3 @@ LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args, return result; } - - -void LIRGenerator::increment_invocation_counter(CodeEmitInfo* info, bool backedge) { -#ifdef TIERED - if (_compilation->env()->comp_level() == CompLevel_fast_compile && - (method()->code_size() >= Tier1BytecodeLimit || backedge)) { - int limit = InvocationCounter::Tier1InvocationLimit; - int offset = in_bytes(methodOopDesc::invocation_counter_offset() + - InvocationCounter::counter_offset()); - if (backedge) { - limit = InvocationCounter::Tier1BackEdgeLimit; - offset = in_bytes(methodOopDesc::backedge_counter_offset() + - InvocationCounter::counter_offset()); - } - - LIR_Opr meth = new_register(T_OBJECT); - __ oop2reg(method()->constant_encoding(), meth); - LIR_Opr result = increment_and_return_counter(meth, offset, InvocationCounter::count_increment); - __ cmp(lir_cond_aboveEqual, result, LIR_OprFact::intConst(limit)); - CodeStub* overflow = new CounterOverflowStub(info, info->bci()); - __ branch(lir_cond_aboveEqual, T_INT, overflow); - __ branch_destination(overflow->continuation()); - } -#endif -} diff --git a/src/share/vm/c1/c1_LIRGenerator.hpp b/src/share/vm/c1/c1_LIRGenerator.hpp index 4a69bd842..f1c53941a 100644 --- a/src/share/vm/c1/c1_LIRGenerator.hpp +++ b/src/share/vm/c1/c1_LIRGenerator.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2006, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -196,6 +196,9 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure { LIR_Opr load_constant(Constant* x); LIR_Opr load_constant(LIR_Const* constant); + // Given an immediate value, return an operand usable in logical ops. + LIR_Opr load_immediate(int x, BasicType type); + void set_result(Value x, LIR_Opr opr) { assert(opr->is_valid(), "must set to valid value"); assert(x->operand()->is_illegal(), "operand should never change"); @@ -213,8 +216,6 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure { LIR_Opr round_item(LIR_Opr opr); LIR_Opr force_to_spill(LIR_Opr value, BasicType t); - void profile_branch(If* if_instr, If::Condition cond); - PhiResolverState& resolver_state() { return _resolver_state; } void move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val); @@ -285,12 +286,9 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure { void arithmetic_call_op (Bytecodes::Code code, LIR_Opr result, LIR_OprList* args); - void increment_counter(address counter, int step = 1); + void increment_counter(address counter, BasicType type, int step = 1); void increment_counter(LIR_Address* addr, int step = 1); - // increment a counter returning the incremented value - LIR_Opr increment_and_return_counter(LIR_Opr base, int offset, int increment); - // is_strictfp is only needed for mul and div (and only generates different code on i486) void arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp, CodeEmitInfo* info = NULL); // machine dependent. returns true if it emitted code for the multiply @@ -347,9 +345,21 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure { bool can_store_as_constant(Value i, BasicType type) const; LIR_Opr safepoint_poll_register(); - void increment_invocation_counter(CodeEmitInfo* info, bool backedge = false); - void increment_backedge_counter(CodeEmitInfo* info) { - increment_invocation_counter(info, true); + + void profile_branch(If* if_instr, If::Condition cond); + void increment_event_counter_impl(CodeEmitInfo* info, + ciMethod *method, int frequency, + int bci, bool backedge, bool notify); + void increment_event_counter(CodeEmitInfo* info, int bci, bool backedge); + void increment_invocation_counter(CodeEmitInfo *info) { + if (compilation()->count_invocations()) { + increment_event_counter(info, InvocationEntryBci, false); + } + } + void increment_backedge_counter(CodeEmitInfo* info, int bci) { + if (compilation()->count_backedges()) { + increment_event_counter(info, bci, true); + } } CodeEmitInfo* state_for(Instruction* x, ValueStack* state, bool ignore_xhandler = false); @@ -503,7 +513,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure { virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x); virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x); virtual void do_ProfileCall (ProfileCall* x); - virtual void do_ProfileCounter (ProfileCounter* x); + virtual void do_ProfileInvoke (ProfileInvoke* x); }; diff --git a/src/share/vm/c1/c1_Optimizer.cpp b/src/share/vm/c1/c1_Optimizer.cpp index fd5ddd53e..d3d51cedb 100644 --- a/src/share/vm/c1/c1_Optimizer.cpp +++ b/src/share/vm/c1/c1_Optimizer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -430,7 +430,7 @@ public: void do_UnsafePrefetchRead (UnsafePrefetchRead* x); void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x); void do_ProfileCall (ProfileCall* x); - void do_ProfileCounter (ProfileCounter* x); + void do_ProfileInvoke (ProfileInvoke* x); }; @@ -598,7 +598,7 @@ void NullCheckVisitor::do_UnsafePutObject(UnsafePutObject* x) {} void NullCheckVisitor::do_UnsafePrefetchRead (UnsafePrefetchRead* x) {} void NullCheckVisitor::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {} void NullCheckVisitor::do_ProfileCall (ProfileCall* x) { nce()->clear_last_explicit_null_check(); } -void NullCheckVisitor::do_ProfileCounter (ProfileCounter* x) {} +void NullCheckVisitor::do_ProfileInvoke (ProfileInvoke* x) {} void NullCheckEliminator::visit(Value* p) { diff --git a/src/share/vm/c1/c1_Runtime1.cpp b/src/share/vm/c1/c1_Runtime1.cpp index 5100c5ebb..df05521d0 100644 --- a/src/share/vm/c1/c1_Runtime1.cpp +++ b/src/share/vm/c1/c1_Runtime1.cpp @@ -140,9 +140,7 @@ void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) { case slow_subtype_check_id: case fpu2long_stub_id: case unwind_exception_id: -#ifndef TIERED - case counter_overflow_id: // Not generated outside the tiered world -#endif + case counter_overflow_id: #if defined(SPARC) || defined(PPC) case handle_exception_nofpu_id: // Unused on sparc #endif @@ -322,31 +320,60 @@ JRT_ENTRY(void, Runtime1::post_jvmti_exception_throw(JavaThread* thread)) } JRT_END -#ifdef TIERED -JRT_ENTRY(void, Runtime1::counter_overflow(JavaThread* thread, int bci)) - RegisterMap map(thread, false); - frame fr = thread->last_frame().sender(&map); +// This is a helper to allow us to safepoint but allow the outer entry +// to be safepoint free if we need to do an osr +static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, methodOopDesc* m) { + nmethod* osr_nm = NULL; + methodHandle method(THREAD, m); + + RegisterMap map(THREAD, false); + frame fr = THREAD->last_frame().sender(&map); nmethod* nm = (nmethod*) fr.cb(); - assert(nm!= NULL && nm->is_nmethod(), "what?"); - methodHandle method(thread, nm->method()); - if (bci == 0) { - // invocation counter overflow - if (!Tier1CountOnly) { - CompilationPolicy::policy()->method_invocation_event(method, CHECK); - } else { - method()->invocation_counter()->reset(); - } - } else { - if (!Tier1CountOnly) { - // Twe have a bci but not the destination bci and besides a backedge - // event is more for OSR which we don't want here. - CompilationPolicy::policy()->method_invocation_event(method, CHECK); - } else { - method()->backedge_counter()->reset(); + assert(nm!= NULL && nm->is_nmethod(), "Sanity check"); + methodHandle enclosing_method(THREAD, nm->method()); + + CompLevel level = (CompLevel)nm->comp_level(); + int bci = InvocationEntryBci; + if (branch_bci != InvocationEntryBci) { + // Compute desination bci + address pc = method()->code_base() + branch_bci; + Bytecodes::Code branch = Bytecodes::code_at(pc, method()); + int offset = 0; + switch (branch) { + case Bytecodes::_if_icmplt: case Bytecodes::_iflt: + case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt: + case Bytecodes::_if_icmple: case Bytecodes::_ifle: + case Bytecodes::_if_icmpge: case Bytecodes::_ifge: + case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq: + case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne: + case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto: + offset = (int16_t)Bytes::get_Java_u2(pc + 1); + break; + case Bytecodes::_goto_w: + offset = Bytes::get_Java_u4(pc + 1); + break; + default: ; } + bci = branch_bci + offset; } + + osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, THREAD); + return osr_nm; +} + +JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* thread, int bci, methodOopDesc* method)) + nmethod* osr_nm; + JRT_BLOCK + osr_nm = counter_overflow_helper(thread, bci, method); + if (osr_nm != NULL) { + RegisterMap map(thread, false); + frame fr = thread->last_frame().sender(&map); + VM_DeoptimizeFrame deopt(thread, fr.id()); + VMThread::execute(&deopt); + } + JRT_BLOCK_END + return NULL; JRT_END -#endif // TIERED extern void vm_exit(int code); @@ -898,7 +925,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff); assert(n_copy->data() == 0 || - n_copy->data() == (int)Universe::non_oop_word(), + n_copy->data() == (intptr_t)Universe::non_oop_word(), "illegal init value"); assert(load_klass() != NULL, "klass not set"); n_copy->set_data((intx) (load_klass())); diff --git a/src/share/vm/c1/c1_Runtime1.hpp b/src/share/vm/c1/c1_Runtime1.hpp index 60bb8550a..38571439c 100644 --- a/src/share/vm/c1/c1_Runtime1.hpp +++ b/src/share/vm/c1/c1_Runtime1.hpp @@ -123,9 +123,7 @@ class Runtime1: public AllStatic { static void new_object_array(JavaThread* thread, klassOopDesc* klass, jint length); static void new_multi_array (JavaThread* thread, klassOopDesc* klass, int rank, jint* dims); -#ifdef TIERED - static void counter_overflow(JavaThread* thread, int bci); -#endif // TIERED + static address counter_overflow(JavaThread* thread, int bci, methodOopDesc* method); static void unimplemented_entry (JavaThread* thread, StubID id); diff --git a/src/share/vm/c1/c1_ValueMap.hpp b/src/share/vm/c1/c1_ValueMap.hpp index 9c54e19a9..0bf25f031 100644 --- a/src/share/vm/c1/c1_ValueMap.hpp +++ b/src/share/vm/c1/c1_ValueMap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -185,11 +185,11 @@ class ValueNumberingVisitor: public InstructionVisitor { void do_ExceptionObject(ExceptionObject* x) { /* nothing to do */ } void do_RoundFP (RoundFP* x) { /* nothing to do */ } void do_UnsafeGetRaw (UnsafeGetRaw* x) { /* nothing to do */ } + void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do */ }; void do_UnsafeGetObject(UnsafeGetObject* x) { /* nothing to do */ } void do_UnsafePrefetchRead (UnsafePrefetchRead* x) { /* nothing to do */ } void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ } void do_ProfileCall (ProfileCall* x) { /* nothing to do */ } - void do_ProfileCounter (ProfileCounter* x) { /* nothing to do */ } }; diff --git a/src/share/vm/c1/c1_globals.hpp b/src/share/vm/c1/c1_globals.hpp index 6a7188836..25633a638 100644 --- a/src/share/vm/c1/c1_globals.hpp +++ b/src/share/vm/c1/c1_globals.hpp @@ -25,12 +25,6 @@ // // Defines all global flags used by the client compiler. // -#ifndef TIERED - #define NOT_TIERED(x) x -#else - #define NOT_TIERED(x) -#endif - #define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct) \ \ /* Printing */ \ @@ -55,7 +49,7 @@ notproduct(bool, PrintIRDuringConstruction, false, \ "Print IR as it's being constructed (helpful for debugging frontend)")\ \ - notproduct(bool, PrintPhiFunctions, false, \ + notproduct(bool, PrintPhiFunctions, false, \ "Print phi functions when they are created and simplified") \ \ notproduct(bool, PrintIR, false, \ @@ -279,41 +273,29 @@ product_pd(intx, SafepointPollOffset, \ "Offset added to polling address (Intel only)") \ \ - product(bool, UseNewFeature1, false, \ - "Enable new feature for testing. This is a dummy flag.") \ - \ - product(bool, UseNewFeature2, false, \ - "Enable new feature for testing. This is a dummy flag.") \ - \ - product(bool, UseNewFeature3, false, \ - "Enable new feature for testing. This is a dummy flag.") \ - \ - product(bool, UseNewFeature4, false, \ - "Enable new feature for testing. This is a dummy flag.") \ - \ develop(bool, ComputeExactFPURegisterUsage, true, \ "Compute additional live set for fpu registers to simplify fpu stack merge (Intel only)") \ \ - product(bool, Tier1ProfileCalls, true, \ + product(bool, C1ProfileCalls, true, \ "Profile calls when generating code for updating MDOs") \ \ - product(bool, Tier1ProfileVirtualCalls, true, \ + product(bool, C1ProfileVirtualCalls, true, \ "Profile virtual calls when generating code for updating MDOs") \ \ - product(bool, Tier1ProfileInlinedCalls, true, \ + product(bool, C1ProfileInlinedCalls, true, \ "Profile inlined calls when generating code for updating MDOs") \ \ - product(bool, Tier1ProfileBranches, true, \ + product(bool, C1ProfileBranches, true, \ "Profile branches when generating code for updating MDOs") \ \ - product(bool, Tier1ProfileCheckcasts, true, \ + product(bool, C1ProfileCheckcasts, true, \ "Profile checkcasts when generating code for updating MDOs") \ \ - product(bool, Tier1OptimizeVirtualCallProfiling, true, \ - "Use CHA and exact type results at call sites when updating MDOs") \ + product(bool, C1OptimizeVirtualCallProfiling, true, \ + "Use CHA and exact type results at call sites when updating MDOs")\ \ - develop(bool, Tier1CountOnly, false, \ - "Don't schedule tier 2 compiles. Enter VM only") \ + product(bool, C1UpdateMethodData, trueInTiered, \ + "Update methodDataOops in Tier1-generated code") \ \ develop(bool, PrintCFGToFile, false, \ "print control flow graph to a separate file during compilation") \ -- cgit v1.2.3