aboutsummaryrefslogtreecommitdiff
path: root/src/share/vm/opto/macro.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/share/vm/opto/macro.cpp')
-rw-r--r--src/share/vm/opto/macro.cpp234
1 files changed, 116 insertions, 118 deletions
diff --git a/src/share/vm/opto/macro.cpp b/src/share/vm/opto/macro.cpp
index a5250292b..90f1f0299 100644
--- a/src/share/vm/opto/macro.cpp
+++ b/src/share/vm/opto/macro.cpp
@@ -103,20 +103,20 @@ void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcal
Node* PhaseMacroExpand::opt_bits_test(Node* ctrl, Node* region, int edge, Node* word, int mask, int bits, bool return_fast_path) {
Node* cmp;
if (mask != 0) {
- Node* and_node = transform_later(new (C, 3) AndXNode(word, MakeConX(mask)));
- cmp = transform_later(new (C, 3) CmpXNode(and_node, MakeConX(bits)));
+ Node* and_node = transform_later(new (C) AndXNode(word, MakeConX(mask)));
+ cmp = transform_later(new (C) CmpXNode(and_node, MakeConX(bits)));
} else {
cmp = word;
}
- Node* bol = transform_later(new (C, 2) BoolNode(cmp, BoolTest::ne));
- IfNode* iff = new (C, 2) IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN );
+ Node* bol = transform_later(new (C) BoolNode(cmp, BoolTest::ne));
+ IfNode* iff = new (C) IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN );
transform_later(iff);
// Fast path taken.
- Node *fast_taken = transform_later( new (C, 1) IfFalseNode(iff) );
+ Node *fast_taken = transform_later( new (C) IfFalseNode(iff) );
// Fast path not-taken, i.e. slow path
- Node *slow_taken = transform_later( new (C, 1) IfTrueNode(iff) );
+ Node *slow_taken = transform_later( new (C) IfTrueNode(iff) );
if (return_fast_path) {
region->init_req(edge, slow_taken); // Capture slow-control
@@ -141,10 +141,9 @@ void PhaseMacroExpand::copy_predefined_input_for_runtime_call(Node * ctrl, CallN
CallNode* PhaseMacroExpand::make_slow_call(CallNode *oldcall, const TypeFunc* slow_call_type, address slow_call, const char* leaf_name, Node* slow_path, Node* parm0, Node* parm1) {
// Slow-path call
- int size = slow_call_type->domain()->cnt();
CallNode *call = leaf_name
- ? (CallNode*)new (C, size) CallLeafNode ( slow_call_type, slow_call, leaf_name, TypeRawPtr::BOTTOM )
- : (CallNode*)new (C, size) CallStaticJavaNode( slow_call_type, slow_call, OptoRuntime::stub_name(slow_call), oldcall->jvms()->bci(), TypeRawPtr::BOTTOM );
+ ? (CallNode*)new (C) CallLeafNode ( slow_call_type, slow_call, leaf_name, TypeRawPtr::BOTTOM )
+ : (CallNode*)new (C) CallStaticJavaNode( slow_call_type, slow_call, OptoRuntime::stub_name(slow_call), oldcall->jvms()->bci(), TypeRawPtr::BOTTOM );
// Slow path call has no side-effects, uses few values
copy_predefined_input_for_runtime_call(slow_path, oldcall, call );
@@ -412,7 +411,7 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *
GrowableArray <Node *> values(length, length, NULL, false);
// create a new Phi for the value
- PhiNode *phi = new (C, length) PhiNode(mem->in(0), phi_type, NULL, instance_id, alias_idx, offset);
+ PhiNode *phi = new (C) PhiNode(mem->in(0), phi_type, NULL, instance_id, alias_idx, offset);
transform_later(phi);
value_phis->push(phi, mem->_idx);
@@ -720,7 +719,7 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
SafePointNode* sfpt = safepoints.pop();
Node* mem = sfpt->memory();
uint first_ind = sfpt->req();
- SafePointScalarObjectNode* sobj = new (C, 1) SafePointScalarObjectNode(res_type,
+ SafePointScalarObjectNode* sobj = new (C) SafePointScalarObjectNode(res_type,
#ifdef ASSERT
alloc,
#endif
@@ -828,7 +827,7 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
if (field_val->is_EncodeP()) {
field_val = field_val->in(1);
} else {
- field_val = transform_later(new (C, 2) DecodeNNode(field_val, field_val->bottom_type()->make_ptr()));
+ field_val = transform_later(new (C) DecodeNNode(field_val, field_val->bottom_type()->make_ptr()));
}
}
sfpt->add_req(field_val);
@@ -995,7 +994,7 @@ bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
//---------------------------set_eden_pointers-------------------------
void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr) {
if (UseTLAB) { // Private allocation: load from TLS
- Node* thread = transform_later(new (C, 1) ThreadLocalNode());
+ Node* thread = transform_later(new (C) ThreadLocalNode());
int tlab_top_offset = in_bytes(JavaThread::tlab_top_offset());
int tlab_end_offset = in_bytes(JavaThread::tlab_end_offset());
eden_top_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_top_offset);
@@ -1137,18 +1136,18 @@ void PhaseMacroExpand::expand_allocate_common(
assert (initial_slow_test == NULL || !always_slow, "arguments must be consistent");
// generate the initial test if necessary
if (initial_slow_test != NULL ) {
- slow_region = new (C, 3) RegionNode(3);
+ slow_region = new (C) RegionNode(3);
// Now make the initial failure test. Usually a too-big test but
// might be a TRUE for finalizers or a fancy class check for
// newInstance0.
- IfNode *toobig_iff = new (C, 2) IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
+ IfNode *toobig_iff = new (C) IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
transform_later(toobig_iff);
// Plug the failing-too-big test into the slow-path region
- Node *toobig_true = new (C, 1) IfTrueNode( toobig_iff );
+ Node *toobig_true = new (C) IfTrueNode( toobig_iff );
transform_later(toobig_true);
slow_region ->init_req( too_big_or_final_path, toobig_true );
- toobig_false = new (C, 1) IfFalseNode( toobig_iff );
+ toobig_false = new (C) IfFalseNode( toobig_iff );
transform_later(toobig_false);
} else { // No initial test, just fall into next case
toobig_false = ctrl;
@@ -1181,10 +1180,10 @@ void PhaseMacroExpand::expand_allocate_common(
Node *eden_end = make_load(ctrl, mem, eden_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS);
// allocate the Region and Phi nodes for the result
- result_region = new (C, 3) RegionNode(3);
- result_phi_rawmem = new (C, 3) PhiNode(result_region, Type::MEMORY, TypeRawPtr::BOTTOM);
- result_phi_rawoop = new (C, 3) PhiNode(result_region, TypeRawPtr::BOTTOM);
- result_phi_i_o = new (C, 3) PhiNode(result_region, Type::ABIO); // I/O is used for Prefetch
+ result_region = new (C) RegionNode(3);
+ result_phi_rawmem = new (C) PhiNode(result_region, Type::MEMORY, TypeRawPtr::BOTTOM);
+ result_phi_rawoop = new (C) PhiNode(result_region, TypeRawPtr::BOTTOM);
+ result_phi_i_o = new (C) PhiNode(result_region, Type::ABIO); // I/O is used for Prefetch
// We need a Region for the loop-back contended case.
enum { fall_in_path = 1, contended_loopback_path = 2 };
@@ -1194,8 +1193,8 @@ void PhaseMacroExpand::expand_allocate_common(
contended_region = toobig_false;
contended_phi_rawmem = mem;
} else {
- contended_region = new (C, 3) RegionNode(3);
- contended_phi_rawmem = new (C, 3) PhiNode(contended_region, Type::MEMORY, TypeRawPtr::BOTTOM);
+ contended_region = new (C) RegionNode(3);
+ contended_phi_rawmem = new (C) PhiNode(contended_region, Type::MEMORY, TypeRawPtr::BOTTOM);
// Now handle the passing-too-big test. We fall into the contended
// loop-back merge point.
contended_region ->init_req(fall_in_path, toobig_false);
@@ -1207,23 +1206,23 @@ void PhaseMacroExpand::expand_allocate_common(
// Load(-locked) the heap top.
// See note above concerning the control input when using a TLAB
Node *old_eden_top = UseTLAB
- ? new (C, 3) LoadPNode (ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM)
- : new (C, 3) LoadPLockedNode(contended_region, contended_phi_rawmem, eden_top_adr);
+ ? new (C) LoadPNode (ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM)
+ : new (C) LoadPLockedNode(contended_region, contended_phi_rawmem, eden_top_adr);
transform_later(old_eden_top);
// Add to heap top to get a new heap top
- Node *new_eden_top = new (C, 4) AddPNode(top(), old_eden_top, size_in_bytes);
+ Node *new_eden_top = new (C) AddPNode(top(), old_eden_top, size_in_bytes);
transform_later(new_eden_top);
// Check for needing a GC; compare against heap end
- Node *needgc_cmp = new (C, 3) CmpPNode(new_eden_top, eden_end);
+ Node *needgc_cmp = new (C) CmpPNode(new_eden_top, eden_end);
transform_later(needgc_cmp);
- Node *needgc_bol = new (C, 2) BoolNode(needgc_cmp, BoolTest::ge);
+ Node *needgc_bol = new (C) BoolNode(needgc_cmp, BoolTest::ge);
transform_later(needgc_bol);
- IfNode *needgc_iff = new (C, 2) IfNode(contended_region, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN);
+ IfNode *needgc_iff = new (C) IfNode(contended_region, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN);
transform_later(needgc_iff);
// Plug the failing-heap-space-need-gc test into the slow-path region
- Node *needgc_true = new (C, 1) IfTrueNode(needgc_iff);
+ Node *needgc_true = new (C) IfTrueNode(needgc_iff);
transform_later(needgc_true);
if (initial_slow_test) {
slow_region->init_req(need_gc_path, needgc_true);
@@ -1234,7 +1233,7 @@ void PhaseMacroExpand::expand_allocate_common(
slow_region = needgc_true;
}
// No need for a GC. Setup for the Store-Conditional
- Node *needgc_false = new (C, 1) IfFalseNode(needgc_iff);
+ Node *needgc_false = new (C) IfFalseNode(needgc_iff);
transform_later(needgc_false);
// Grab regular I/O before optional prefetch may change it.
@@ -1254,37 +1253,37 @@ void PhaseMacroExpand::expand_allocate_common(
// memory state.
if (UseTLAB) {
Node* store_eden_top =
- new (C, 4) StorePNode(needgc_false, contended_phi_rawmem, eden_top_adr,
+ new (C) StorePNode(needgc_false, contended_phi_rawmem, eden_top_adr,
TypeRawPtr::BOTTOM, new_eden_top);
transform_later(store_eden_top);
fast_oop_ctrl = needgc_false; // No contention, so this is the fast path
fast_oop_rawmem = store_eden_top;
} else {
Node* store_eden_top =
- new (C, 5) StorePConditionalNode(needgc_false, contended_phi_rawmem, eden_top_adr,
+ new (C) StorePConditionalNode(needgc_false, contended_phi_rawmem, eden_top_adr,
new_eden_top, fast_oop/*old_eden_top*/);
transform_later(store_eden_top);
- Node *contention_check = new (C, 2) BoolNode(store_eden_top, BoolTest::ne);
+ Node *contention_check = new (C) BoolNode(store_eden_top, BoolTest::ne);
transform_later(contention_check);
- store_eden_top = new (C, 1) SCMemProjNode(store_eden_top);
+ store_eden_top = new (C) SCMemProjNode(store_eden_top);
transform_later(store_eden_top);
// If not using TLABs, check to see if there was contention.
- IfNode *contention_iff = new (C, 2) IfNode (needgc_false, contention_check, PROB_MIN, COUNT_UNKNOWN);
+ IfNode *contention_iff = new (C) IfNode (needgc_false, contention_check, PROB_MIN, COUNT_UNKNOWN);
transform_later(contention_iff);
- Node *contention_true = new (C, 1) IfTrueNode(contention_iff);
+ Node *contention_true = new (C) IfTrueNode(contention_iff);
transform_later(contention_true);
// If contention, loopback and try again.
contended_region->init_req(contended_loopback_path, contention_true);
contended_phi_rawmem->init_req(contended_loopback_path, store_eden_top);
// Fast-path succeeded with no contention!
- Node *contention_false = new (C, 1) IfFalseNode(contention_iff);
+ Node *contention_false = new (C) IfFalseNode(contention_iff);
transform_later(contention_false);
fast_oop_ctrl = contention_false;
// Bump total allocated bytes for this thread
- Node* thread = new (C, 1) ThreadLocalNode();
+ Node* thread = new (C) ThreadLocalNode();
transform_later(thread);
Node* alloc_bytes_adr = basic_plus_adr(top()/*not oop*/, thread,
in_bytes(JavaThread::allocated_bytes_offset()));
@@ -1293,10 +1292,10 @@ void PhaseMacroExpand::expand_allocate_common(
#ifdef _LP64
Node* alloc_size = size_in_bytes;
#else
- Node* alloc_size = new (C, 2) ConvI2LNode(size_in_bytes);
+ Node* alloc_size = new (C) ConvI2LNode(size_in_bytes);
transform_later(alloc_size);
#endif
- Node* new_alloc_bytes = new (C, 3) AddLNode(alloc_bytes, alloc_size);
+ Node* new_alloc_bytes = new (C) AddLNode(alloc_bytes, alloc_size);
transform_later(new_alloc_bytes);
fast_oop_rawmem = make_store(fast_oop_ctrl, store_eden_top, alloc_bytes_adr,
0, new_alloc_bytes, T_LONG);
@@ -1323,9 +1322,9 @@ void PhaseMacroExpand::expand_allocate_common(
mb->init_req(TypeFunc::Memory, fast_oop_rawmem);
mb->init_req(TypeFunc::Control, fast_oop_ctrl);
- fast_oop_ctrl = new (C, 1) ProjNode(mb,TypeFunc::Control);
+ fast_oop_ctrl = new (C) ProjNode(mb,TypeFunc::Control);
transform_later(fast_oop_ctrl);
- fast_oop_rawmem = new (C, 1) ProjNode(mb,TypeFunc::Memory);
+ fast_oop_rawmem = new (C) ProjNode(mb,TypeFunc::Memory);
transform_later(fast_oop_rawmem);
} else {
// Add the MemBarStoreStore after the InitializeNode so that
@@ -1339,9 +1338,9 @@ void PhaseMacroExpand::expand_allocate_common(
MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
transform_later(mb);
- Node* ctrl = new (C, 1) ProjNode(init,TypeFunc::Control);
+ Node* ctrl = new (C) ProjNode(init,TypeFunc::Control);
transform_later(ctrl);
- Node* mem = new (C, 1) ProjNode(init,TypeFunc::Memory);
+ Node* mem = new (C) ProjNode(init,TypeFunc::Memory);
transform_later(mem);
// The MemBarStoreStore depends on control and memory coming
@@ -1349,9 +1348,9 @@ void PhaseMacroExpand::expand_allocate_common(
mb->init_req(TypeFunc::Memory, mem);
mb->init_req(TypeFunc::Control, ctrl);
- ctrl = new (C, 1) ProjNode(mb,TypeFunc::Control);
+ ctrl = new (C) ProjNode(mb,TypeFunc::Control);
transform_later(ctrl);
- mem = new (C, 1) ProjNode(mb,TypeFunc::Memory);
+ mem = new (C) ProjNode(mb,TypeFunc::Memory);
transform_later(mem);
// All nodes that depended on the InitializeNode for control
@@ -1365,13 +1364,13 @@ void PhaseMacroExpand::expand_allocate_common(
if (C->env()->dtrace_extended_probes()) {
// Slow-path call
int size = TypeFunc::Parms + 2;
- CallLeafNode *call = new (C, size) CallLeafNode(OptoRuntime::dtrace_object_alloc_Type(),
- CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc_base),
- "dtrace_object_alloc",
- TypeRawPtr::BOTTOM);
+ CallLeafNode *call = new (C) CallLeafNode(OptoRuntime::dtrace_object_alloc_Type(),
+ CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc_base),
+ "dtrace_object_alloc",
+ TypeRawPtr::BOTTOM);
// Get base of thread-local storage area
- Node* thread = new (C, 1) ThreadLocalNode();
+ Node* thread = new (C) ThreadLocalNode();
transform_later(thread);
call->init_req(TypeFunc::Parms+0, thread);
@@ -1382,9 +1381,9 @@ void PhaseMacroExpand::expand_allocate_common(
call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr));
call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr));
transform_later(call);
- fast_oop_ctrl = new (C, 1) ProjNode(call,TypeFunc::Control);
+ fast_oop_ctrl = new (C) ProjNode(call,TypeFunc::Control);
transform_later(fast_oop_ctrl);
- fast_oop_rawmem = new (C, 1) ProjNode(call,TypeFunc::Memory);
+ fast_oop_rawmem = new (C) ProjNode(call,TypeFunc::Memory);
transform_later(fast_oop_rawmem);
}
@@ -1399,11 +1398,10 @@ void PhaseMacroExpand::expand_allocate_common(
}
// Generate slow-path call
- CallNode *call = new (C, slow_call_type->domain()->cnt())
- CallStaticJavaNode(slow_call_type, slow_call_address,
- OptoRuntime::stub_name(slow_call_address),
- alloc->jvms()->bci(),
- TypePtr::BOTTOM);
+ CallNode *call = new (C) CallStaticJavaNode(slow_call_type, slow_call_address,
+ OptoRuntime::stub_name(slow_call_address),
+ alloc->jvms()->bci(),
+ TypePtr::BOTTOM);
call->init_req( TypeFunc::Control, slow_region );
call->init_req( TypeFunc::I_O , top() ) ; // does no i/o
call->init_req( TypeFunc::Memory , slow_mem ); // may gc ptrs
@@ -1457,7 +1455,7 @@ void PhaseMacroExpand::expand_allocate_common(
// _memproj_catchall so we end up with a call that has only 1 memory projection.
if (_memproj_catchall != NULL ) {
if (_memproj_fallthrough == NULL) {
- _memproj_fallthrough = new (C, 1) ProjNode(call, TypeFunc::Memory);
+ _memproj_fallthrough = new (C) ProjNode(call, TypeFunc::Memory);
transform_later(_memproj_fallthrough);
}
for (DUIterator_Fast imax, i = _memproj_catchall->fast_outs(imax); i < imax; i++) {
@@ -1489,7 +1487,7 @@ void PhaseMacroExpand::expand_allocate_common(
// _ioproj_catchall so we end up with a call that has only 1 i_o projection.
if (_ioproj_catchall != NULL ) {
if (_ioproj_fallthrough == NULL) {
- _ioproj_fallthrough = new (C, 1) ProjNode(call, TypeFunc::I_O);
+ _ioproj_fallthrough = new (C) ProjNode(call, TypeFunc::I_O);
transform_later(_ioproj_fallthrough);
}
for (DUIterator_Fast imax, i = _ioproj_catchall->fast_outs(imax); i < imax; i++) {
@@ -1623,46 +1621,46 @@ Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false,
// As an allocation hits the watermark, we will prefetch starting
// at a "distance" away from watermark.
- Node *pf_region = new (C, 3) RegionNode(3);
- Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY,
+ Node *pf_region = new (C) RegionNode(3);
+ Node *pf_phi_rawmem = new (C) PhiNode( pf_region, Type::MEMORY,
TypeRawPtr::BOTTOM );
// I/O is used for Prefetch
- Node *pf_phi_abio = new (C, 3) PhiNode( pf_region, Type::ABIO );
+ Node *pf_phi_abio = new (C) PhiNode( pf_region, Type::ABIO );
- Node *thread = new (C, 1) ThreadLocalNode();
+ Node *thread = new (C) ThreadLocalNode();
transform_later(thread);
- Node *eden_pf_adr = new (C, 4) AddPNode( top()/*not oop*/, thread,
+ Node *eden_pf_adr = new (C) AddPNode( top()/*not oop*/, thread,
_igvn.MakeConX(in_bytes(JavaThread::tlab_pf_top_offset())) );
transform_later(eden_pf_adr);
- Node *old_pf_wm = new (C, 3) LoadPNode( needgc_false,
+ Node *old_pf_wm = new (C) LoadPNode( needgc_false,
contended_phi_rawmem, eden_pf_adr,
TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM );
transform_later(old_pf_wm);
// check against new_eden_top
- Node *need_pf_cmp = new (C, 3) CmpPNode( new_eden_top, old_pf_wm );
+ Node *need_pf_cmp = new (C) CmpPNode( new_eden_top, old_pf_wm );
transform_later(need_pf_cmp);
- Node *need_pf_bol = new (C, 2) BoolNode( need_pf_cmp, BoolTest::ge );
+ Node *need_pf_bol = new (C) BoolNode( need_pf_cmp, BoolTest::ge );
transform_later(need_pf_bol);
- IfNode *need_pf_iff = new (C, 2) IfNode( needgc_false, need_pf_bol,
+ IfNode *need_pf_iff = new (C) IfNode( needgc_false, need_pf_bol,
PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN );
transform_later(need_pf_iff);
// true node, add prefetchdistance
- Node *need_pf_true = new (C, 1) IfTrueNode( need_pf_iff );
+ Node *need_pf_true = new (C) IfTrueNode( need_pf_iff );
transform_later(need_pf_true);
- Node *need_pf_false = new (C, 1) IfFalseNode( need_pf_iff );
+ Node *need_pf_false = new (C) IfFalseNode( need_pf_iff );
transform_later(need_pf_false);
- Node *new_pf_wmt = new (C, 4) AddPNode( top(), old_pf_wm,
+ Node *new_pf_wmt = new (C) AddPNode( top(), old_pf_wm,
_igvn.MakeConX(AllocatePrefetchDistance) );
transform_later(new_pf_wmt );
new_pf_wmt->set_req(0, need_pf_true);
- Node *store_new_wmt = new (C, 4) StorePNode( need_pf_true,
+ Node *store_new_wmt = new (C) StorePNode( need_pf_true,
contended_phi_rawmem, eden_pf_adr,
TypeRawPtr::BOTTOM, new_pf_wmt );
transform_later(store_new_wmt);
@@ -1677,10 +1675,10 @@ Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false,
uint distance = 0;
for ( uint i = 0; i < lines; i++ ) {
- prefetch_adr = new (C, 4) AddPNode( old_pf_wm, new_pf_wmt,
+ prefetch_adr = new (C) AddPNode( old_pf_wm, new_pf_wmt,
_igvn.MakeConX(distance) );
transform_later(prefetch_adr);
- prefetch = new (C, 3) PrefetchAllocationNode( i_o, prefetch_adr );
+ prefetch = new (C) PrefetchAllocationNode( i_o, prefetch_adr );
transform_later(prefetch);
distance += step_size;
i_o = prefetch;
@@ -1703,9 +1701,9 @@ Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false,
} else if( UseTLAB && AllocatePrefetchStyle == 3 ) {
// Insert a prefetch for each allocation.
// This code is used for Sparc with BIS.
- Node *pf_region = new (C, 3) RegionNode(3);
- Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY,
- TypeRawPtr::BOTTOM );
+ Node *pf_region = new (C) RegionNode(3);
+ Node *pf_phi_rawmem = new (C) PhiNode( pf_region, Type::MEMORY,
+ TypeRawPtr::BOTTOM );
// Generate several prefetch instructions.
uint lines = (length != NULL) ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
@@ -1713,29 +1711,29 @@ Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false,
uint distance = AllocatePrefetchDistance;
// Next cache address.
- Node *cache_adr = new (C, 4) AddPNode(old_eden_top, old_eden_top,
+ Node *cache_adr = new (C) AddPNode(old_eden_top, old_eden_top,
_igvn.MakeConX(distance));
transform_later(cache_adr);
- cache_adr = new (C, 2) CastP2XNode(needgc_false, cache_adr);
+ cache_adr = new (C) CastP2XNode(needgc_false, cache_adr);
transform_later(cache_adr);
Node* mask = _igvn.MakeConX(~(intptr_t)(step_size-1));
- cache_adr = new (C, 3) AndXNode(cache_adr, mask);
+ cache_adr = new (C) AndXNode(cache_adr, mask);
transform_later(cache_adr);
- cache_adr = new (C, 2) CastX2PNode(cache_adr);
+ cache_adr = new (C) CastX2PNode(cache_adr);
transform_later(cache_adr);
// Prefetch
- Node *prefetch = new (C, 3) PrefetchAllocationNode( contended_phi_rawmem, cache_adr );
+ Node *prefetch = new (C) PrefetchAllocationNode( contended_phi_rawmem, cache_adr );
prefetch->set_req(0, needgc_false);
transform_later(prefetch);
contended_phi_rawmem = prefetch;
Node *prefetch_adr;
distance = step_size;
for ( uint i = 1; i < lines; i++ ) {
- prefetch_adr = new (C, 4) AddPNode( cache_adr, cache_adr,
+ prefetch_adr = new (C) AddPNode( cache_adr, cache_adr,
_igvn.MakeConX(distance) );
transform_later(prefetch_adr);
- prefetch = new (C, 3) PrefetchAllocationNode( contended_phi_rawmem, prefetch_adr );
+ prefetch = new (C) PrefetchAllocationNode( contended_phi_rawmem, prefetch_adr );
transform_later(prefetch);
distance += step_size;
contended_phi_rawmem = prefetch;
@@ -1749,10 +1747,10 @@ Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false,
uint step_size = AllocatePrefetchStepSize;
uint distance = AllocatePrefetchDistance;
for ( uint i = 0; i < lines; i++ ) {
- prefetch_adr = new (C, 4) AddPNode( old_eden_top, new_eden_top,
+ prefetch_adr = new (C) AddPNode( old_eden_top, new_eden_top,
_igvn.MakeConX(distance) );
transform_later(prefetch_adr);
- prefetch = new (C, 3) PrefetchAllocationNode( i_o, prefetch_adr );
+ prefetch = new (C) PrefetchAllocationNode( i_o, prefetch_adr );
// Do not let it float too high, since if eden_top == eden_end,
// both might be null.
if( i == 0 ) { // Set control for first prefetch, next follows it
@@ -2101,12 +2099,12 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
* }
*/
- region = new (C, 5) RegionNode(5);
+ region = new (C) RegionNode(5);
// create a Phi for the memory state
- mem_phi = new (C, 5) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
+ mem_phi = new (C) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
- Node* fast_lock_region = new (C, 3) RegionNode(3);
- Node* fast_lock_mem_phi = new (C, 3) PhiNode( fast_lock_region, Type::MEMORY, TypeRawPtr::BOTTOM);
+ Node* fast_lock_region = new (C) RegionNode(3);
+ Node* fast_lock_mem_phi = new (C) PhiNode( fast_lock_region, Type::MEMORY, TypeRawPtr::BOTTOM);
// First, check mark word for the biased lock pattern.
Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type());
@@ -2136,10 +2134,10 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
}
Node *proto_node = make_load(ctrl, mem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeX_X, TypeX_X->basic_type());
- Node* thread = transform_later(new (C, 1) ThreadLocalNode());
- Node* cast_thread = transform_later(new (C, 2) CastP2XNode(ctrl, thread));
- Node* o_node = transform_later(new (C, 3) OrXNode(cast_thread, proto_node));
- Node* x_node = transform_later(new (C, 3) XorXNode(o_node, mark_node));
+ Node* thread = transform_later(new (C) ThreadLocalNode());
+ Node* cast_thread = transform_later(new (C) CastP2XNode(ctrl, thread));
+ Node* o_node = transform_later(new (C) OrXNode(cast_thread, proto_node));
+ Node* x_node = transform_later(new (C) XorXNode(o_node, mark_node));
// Get slow path - mark word does NOT match the value.
Node* not_biased_ctrl = opt_bits_test(ctrl, region, 3, x_node,
@@ -2162,17 +2160,17 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
// We are going to try to reset the mark of this object to the prototype
// value and fall through to the CAS-based locking scheme.
Node* adr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
- Node* cas = new (C, 5) StoreXConditionalNode(not_biased_ctrl, mem, adr,
- proto_node, mark_node);
+ Node* cas = new (C) StoreXConditionalNode(not_biased_ctrl, mem, adr,
+ proto_node, mark_node);
transform_later(cas);
- Node* proj = transform_later( new (C, 1) SCMemProjNode(cas));
+ Node* proj = transform_later( new (C) SCMemProjNode(cas));
fast_lock_mem_phi->init_req(2, proj);
// Second, check epoch bits.
- Node* rebiased_region = new (C, 3) RegionNode(3);
- Node* old_phi = new (C, 3) PhiNode( rebiased_region, TypeX_X);
- Node* new_phi = new (C, 3) PhiNode( rebiased_region, TypeX_X);
+ Node* rebiased_region = new (C) RegionNode(3);
+ Node* old_phi = new (C) PhiNode( rebiased_region, TypeX_X);
+ Node* new_phi = new (C) PhiNode( rebiased_region, TypeX_X);
// Get slow path - mark word does NOT match epoch bits.
Node* epoch_ctrl = opt_bits_test(ctrl, rebiased_region, 1, x_node,
@@ -2189,9 +2187,9 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
Node* cmask = MakeConX(markOopDesc::biased_lock_mask_in_place |
markOopDesc::age_mask_in_place |
markOopDesc::epoch_mask_in_place);
- Node* old = transform_later(new (C, 3) AndXNode(mark_node, cmask));
- cast_thread = transform_later(new (C, 2) CastP2XNode(ctrl, thread));
- Node* new_mark = transform_later(new (C, 3) OrXNode(cast_thread, old));
+ Node* old = transform_later(new (C) AndXNode(mark_node, cmask));
+ cast_thread = transform_later(new (C) CastP2XNode(ctrl, thread));
+ Node* new_mark = transform_later(new (C) OrXNode(cast_thread, old));
old_phi->init_req(1, old);
new_phi->init_req(1, new_mark);
@@ -2201,10 +2199,10 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
// Try to acquire the bias of the object using an atomic operation.
// If this fails we will go in to the runtime to revoke the object's bias.
- cas = new (C, 5) StoreXConditionalNode(rebiased_region, mem, adr,
+ cas = new (C) StoreXConditionalNode(rebiased_region, mem, adr,
new_phi, old_phi);
transform_later(cas);
- proj = transform_later( new (C, 1) SCMemProjNode(cas));
+ proj = transform_later( new (C) SCMemProjNode(cas));
// Get slow path - Failed to CAS.
not_biased_ctrl = opt_bits_test(rebiased_region, region, 4, cas, 0, 0);
@@ -2212,8 +2210,8 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
// region->in(4) is set to fast path - the object is rebiased to the current thread.
// Failed to CAS.
- slow_path = new (C, 3) RegionNode(3);
- Node *slow_mem = new (C, 3) PhiNode( slow_path, Type::MEMORY, TypeRawPtr::BOTTOM);
+ slow_path = new (C) RegionNode(3);
+ Node *slow_mem = new (C) PhiNode( slow_path, Type::MEMORY, TypeRawPtr::BOTTOM);
slow_path->init_req(1, not_biased_ctrl); // Capture slow-control
slow_mem->init_req(1, proj);
@@ -2237,9 +2235,9 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
lock->set_req(TypeFunc::Memory, slow_mem);
} else {
- region = new (C, 3) RegionNode(3);
+ region = new (C) RegionNode(3);
// create a Phi for the memory state
- mem_phi = new (C, 3) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
+ mem_phi = new (C) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
// Optimize test; set region slot 2
slow_path = opt_bits_test(ctrl, region, 2, flock, 0, 0);
@@ -2270,7 +2268,7 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
transform_later(region);
_igvn.replace_node(_fallthroughproj, region);
- Node *memproj = transform_later( new(C, 1) ProjNode(call, TypeFunc::Memory) );
+ Node *memproj = transform_later( new(C) ProjNode(call, TypeFunc::Memory) );
mem_phi->init_req(1, memproj );
transform_later(mem_phi);
_igvn.replace_node(_memproj_fallthrough, mem_phi);
@@ -2295,9 +2293,9 @@ void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) {
if (UseOptoBiasInlining) {
// Check for biased locking unlock case, which is a no-op.
// See the full description in MacroAssembler::biased_locking_exit().
- region = new (C, 4) RegionNode(4);
+ region = new (C) RegionNode(4);
// create a Phi for the memory state
- mem_phi = new (C, 4) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
+ mem_phi = new (C) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
mem_phi->init_req(3, mem);
Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type());
@@ -2305,12 +2303,12 @@ void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) {
markOopDesc::biased_lock_mask_in_place,
markOopDesc::biased_lock_pattern);
} else {
- region = new (C, 3) RegionNode(3);
+ region = new (C) RegionNode(3);
// create a Phi for the memory state
- mem_phi = new (C, 3) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
+ mem_phi = new (C) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
}
- FastUnlockNode *funlock = new (C, 3) FastUnlockNode( ctrl, obj, box );
+ FastUnlockNode *funlock = new (C) FastUnlockNode( ctrl, obj, box );
funlock = transform_later( funlock )->as_FastUnlock();
// Optimize test; set region slot 2
Node *slow_path = opt_bits_test(ctrl, region, 2, funlock, 0, 0);
@@ -2335,7 +2333,7 @@ void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) {
transform_later(region);
_igvn.replace_node(_fallthroughproj, region);
- Node *memproj = transform_later( new(C, 1) ProjNode(call, TypeFunc::Memory) );
+ Node *memproj = transform_later( new(C) ProjNode(call, TypeFunc::Memory) );
mem_phi->init_req(1, memproj );
mem_phi->init_req(2, mem);
transform_later(mem_phi);