aboutsummaryrefslogtreecommitdiff
path: root/src/share
diff options
context:
space:
mode:
authorjrose <none@none>2010-06-02 22:45:42 -0700
committerjrose <none@none>2010-06-02 22:45:42 -0700
commitf5c11173784f3afbc5a741bfa24860f68e2326b0 (patch)
tree8e32b99f1f669fe44fc4f518799c45b1b067e57c /src/share
parentdb3885202d9095c5a296d45d3758e40bad411679 (diff)
parent90aebc75b907b0ddfeb3ef2d0576a47f3afc7656 (diff)
Merge
Diffstat (limited to 'src/share')
-rw-r--r--src/share/vm/asm/codeBuffer.hpp4
-rw-r--r--src/share/vm/c1/c1_FrameMap.hpp5
-rw-r--r--src/share/vm/c1/c1_GraphBuilder.cpp6
-rw-r--r--src/share/vm/c1/c1_IR.cpp10
-rw-r--r--src/share/vm/c1/c1_IR.hpp9
-rw-r--r--src/share/vm/c1/c1_LIR.cpp5
-rw-r--r--src/share/vm/c1/c1_LIR.hpp24
-rw-r--r--src/share/vm/c1/c1_LIRAssembler.cpp14
-rw-r--r--src/share/vm/c1/c1_LIRAssembler.hpp6
-rw-r--r--src/share/vm/c1/c1_LIRGenerator.cpp35
-rw-r--r--src/share/vm/ci/ciMethod.cpp24
-rw-r--r--src/share/vm/ci/ciStreams.cpp48
-rw-r--r--src/share/vm/ci/ciStreams.hpp118
-rw-r--r--src/share/vm/ci/ciTypeFlow.cpp3
-rw-r--r--src/share/vm/classfile/verifier.cpp59
-rw-r--r--src/share/vm/classfile/verifier.hpp10
-rw-r--r--src/share/vm/code/codeBlob.cpp98
-rw-r--r--src/share/vm/code/codeBlob.hpp62
-rw-r--r--src/share/vm/code/codeCache.cpp16
-rw-r--r--src/share/vm/code/compiledIC.cpp8
-rw-r--r--src/share/vm/code/nmethod.cpp115
-rw-r--r--src/share/vm/code/nmethod.hpp54
-rw-r--r--src/share/vm/code/oopRecorder.cpp6
-rw-r--r--src/share/vm/code/oopRecorder.hpp6
-rw-r--r--src/share/vm/code/relocInfo.cpp51
-rw-r--r--src/share/vm/code/relocInfo.hpp26
-rw-r--r--src/share/vm/compiler/compileBroker.cpp7
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp19
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp9
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp4
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp6
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp2
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp10
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp12
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp2
-rw-r--r--src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp4
-rw-r--r--src/share/vm/gc_interface/collectedHeap.cpp4
-rw-r--r--src/share/vm/includeDB_core2
-rw-r--r--src/share/vm/interpreter/bytecode.cpp132
-rw-r--r--src/share/vm/interpreter/bytecode.hpp161
-rw-r--r--src/share/vm/interpreter/bytecodeStream.cpp23
-rw-r--r--src/share/vm/interpreter/bytecodeStream.hpp146
-rw-r--r--src/share/vm/interpreter/bytecodeTracer.cpp81
-rw-r--r--src/share/vm/interpreter/bytecodes.cpp190
-rw-r--r--src/share/vm/interpreter/bytecodes.hpp72
-rw-r--r--src/share/vm/interpreter/interpreter.cpp5
-rw-r--r--src/share/vm/interpreter/interpreterRuntime.cpp14
-rw-r--r--src/share/vm/interpreter/interpreterRuntime.hpp12
-rw-r--r--src/share/vm/interpreter/rewriter.cpp25
-rw-r--r--src/share/vm/interpreter/rewriter.hpp6
-rw-r--r--src/share/vm/interpreter/templateTable.cpp22
-rw-r--r--src/share/vm/interpreter/templateTable.hpp14
-rw-r--r--src/share/vm/memory/iterator.cpp13
-rw-r--r--src/share/vm/memory/space.cpp8
-rw-r--r--src/share/vm/memory/threadLocalAllocBuffer.inline.hpp2
-rw-r--r--src/share/vm/memory/universe.cpp4
-rw-r--r--src/share/vm/oops/arrayOop.hpp2
-rw-r--r--src/share/vm/oops/constantPoolKlass.cpp17
-rw-r--r--src/share/vm/oops/constantPoolOop.cpp10
-rw-r--r--src/share/vm/oops/constantPoolOop.hpp8
-rw-r--r--src/share/vm/oops/generateOopMap.cpp26
-rw-r--r--src/share/vm/oops/methodKlass.cpp2
-rw-r--r--src/share/vm/oops/oop.hpp4
-rw-r--r--src/share/vm/oops/oop.inline.hpp17
-rw-r--r--src/share/vm/opto/bytecodeInfo.cpp8
-rw-r--r--src/share/vm/opto/compile.cpp20
-rw-r--r--src/share/vm/opto/connode.cpp2
-rw-r--r--src/share/vm/opto/lcm.cpp39
-rw-r--r--src/share/vm/opto/matcher.cpp23
-rw-r--r--src/share/vm/opto/matcher.hpp32
-rw-r--r--src/share/vm/opto/parse2.cpp4
-rw-r--r--src/share/vm/prims/jvmtiClassFileReconstituter.cpp4
-rw-r--r--src/share/vm/prims/jvmtiExport.cpp75
-rw-r--r--src/share/vm/prims/jvmtiExport.hpp7
-rw-r--r--src/share/vm/prims/methodComparator.cpp53
-rw-r--r--src/share/vm/prims/methodHandleWalk.cpp10
-rw-r--r--src/share/vm/runtime/arguments.cpp43
-rw-r--r--src/share/vm/runtime/globals.hpp6
-rw-r--r--src/share/vm/runtime/sharedRuntime.cpp4
-rw-r--r--src/share/vm/runtime/vmStructs.cpp14
-rw-r--r--src/share/vm/utilities/copy.hpp16
-rw-r--r--src/share/vm/utilities/globalDefinitions.cpp12
-rw-r--r--src/share/vm/utilities/globalDefinitions.hpp23
83 files changed, 1360 insertions, 954 deletions
diff --git a/src/share/vm/asm/codeBuffer.hpp b/src/share/vm/asm/codeBuffer.hpp
index f3748c364..2f1a8d3c8 100644
--- a/src/share/vm/asm/codeBuffer.hpp
+++ b/src/share/vm/asm/codeBuffer.hpp
@@ -510,9 +510,9 @@ class CodeBuffer: public StackObj {
copy_relocations_to(blob);
copy_code_to(blob);
}
- void copy_oops_to(CodeBlob* blob) {
+ void copy_oops_to(nmethod* nm) {
if (!oop_recorder()->is_unused()) {
- oop_recorder()->copy_to(blob);
+ oop_recorder()->copy_to(nm);
}
}
diff --git a/src/share/vm/c1/c1_FrameMap.hpp b/src/share/vm/c1/c1_FrameMap.hpp
index 73172973e..dcbe31d18 100644
--- a/src/share/vm/c1/c1_FrameMap.hpp
+++ b/src/share/vm/c1/c1_FrameMap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -150,6 +150,9 @@ class FrameMap : public CompilationResourceObj {
// Opr representing the stack_pointer on this platform
static LIR_Opr stack_pointer();
+ // JSR 292
+ static LIR_Opr method_handle_invoke_SP_save_opr();
+
static BasicTypeArray* signature_type_array_for(const ciMethod* method);
static BasicTypeArray* signature_type_array_for(const char * signature);
diff --git a/src/share/vm/c1/c1_GraphBuilder.cpp b/src/share/vm/c1/c1_GraphBuilder.cpp
index ba8b85442..b28128999 100644
--- a/src/share/vm/c1/c1_GraphBuilder.cpp
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp
@@ -2438,13 +2438,13 @@ BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) {
case Bytecodes::_invokestatic : // fall through
case Bytecodes::_invokedynamic : // fall through
case Bytecodes::_invokeinterface: invoke(code); break;
- case Bytecodes::_new : new_instance(s.get_index_big()); break;
+ case Bytecodes::_new : new_instance(s.get_index_u2()); break;
case Bytecodes::_newarray : new_type_array(); break;
case Bytecodes::_anewarray : new_object_array(); break;
case Bytecodes::_arraylength : ipush(append(new ArrayLength(apop(), lock_stack()))); break;
case Bytecodes::_athrow : throw_op(s.cur_bci()); break;
- case Bytecodes::_checkcast : check_cast(s.get_index_big()); break;
- case Bytecodes::_instanceof : instance_of(s.get_index_big()); break;
+ case Bytecodes::_checkcast : check_cast(s.get_index_u2()); break;
+ case Bytecodes::_instanceof : instance_of(s.get_index_u2()); break;
// Note: we do not have special handling for the monitorenter bytecode if DeoptC1 && DeoptOnAsyncException
case Bytecodes::_monitorenter : monitorenter(apop(), s.cur_bci()); break;
case Bytecodes::_monitorexit : monitorexit (apop(), s.cur_bci()); break;
diff --git a/src/share/vm/c1/c1_IR.cpp b/src/share/vm/c1/c1_IR.cpp
index a534634e9..75b516d5e 100644
--- a/src/share/vm/c1/c1_IR.cpp
+++ b/src/share/vm/c1/c1_IR.cpp
@@ -230,7 +230,8 @@ CodeEmitInfo::CodeEmitInfo(int bci, ValueStack* stack, XHandlers* exception_hand
, _stack(stack)
, _exception_handlers(exception_handlers)
, _next(NULL)
- , _id(-1) {
+ , _id(-1)
+ , _is_method_handle_invoke(false) {
assert(_stack != NULL, "must be non null");
assert(_bci == SynchronizationEntryBCI || Bytecodes::is_defined(scope()->method()->java_code_at_bci(_bci)), "make sure bci points at a real bytecode");
}
@@ -241,7 +242,8 @@ CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, bool lock_stack_only)
, _exception_handlers(NULL)
, _bci(info->_bci)
, _scope_debug_info(NULL)
- , _oop_map(NULL) {
+ , _oop_map(NULL)
+ , _is_method_handle_invoke(info->_is_method_handle_invoke) {
if (lock_stack_only) {
if (info->_stack != NULL) {
_stack = info->_stack->copy_locks();
@@ -259,10 +261,10 @@ CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, bool lock_stack_only)
}
-void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke) {
+void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) {
// record the safepoint before recording the debug info for enclosing scopes
recorder->add_safepoint(pc_offset, _oop_map->deep_copy());
- _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, is_method_handle_invoke);
+ _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, _is_method_handle_invoke);
recorder->end_safepoint(pc_offset);
}
diff --git a/src/share/vm/c1/c1_IR.hpp b/src/share/vm/c1/c1_IR.hpp
index 3130c381a..6cce0fca4 100644
--- a/src/share/vm/c1/c1_IR.hpp
+++ b/src/share/vm/c1/c1_IR.hpp
@@ -269,6 +269,7 @@ class CodeEmitInfo: public CompilationResourceObj {
int _bci;
CodeEmitInfo* _next;
int _id;
+ bool _is_method_handle_invoke; // true if the associated call site is a MethodHandle call site.
FrameMap* frame_map() const { return scope()->compilation()->frame_map(); }
Compilation* compilation() const { return scope()->compilation(); }
@@ -287,7 +288,8 @@ class CodeEmitInfo: public CompilationResourceObj {
, _stack(NULL)
, _exception_handlers(NULL)
, _next(NULL)
- , _id(-1) {
+ , _id(-1)
+ , _is_method_handle_invoke(false) {
}
// make a copy
@@ -302,13 +304,16 @@ class CodeEmitInfo: public CompilationResourceObj {
int bci() const { return _bci; }
void add_register_oop(LIR_Opr opr);
- void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke = false);
+ void record_debug_info(DebugInformationRecorder* recorder, int pc_offset);
CodeEmitInfo* next() const { return _next; }
void set_next(CodeEmitInfo* next) { _next = next; }
int id() const { return _id; }
void set_id(int id) { _id = id; }
+
+ bool is_method_handle_invoke() const { return _is_method_handle_invoke; }
+ void set_is_method_handle_invoke(bool x) { _is_method_handle_invoke = x; }
};
diff --git a/src/share/vm/c1/c1_LIR.cpp b/src/share/vm/c1/c1_LIR.cpp
index 86815794c..6aa0f5e0f 100644
--- a/src/share/vm/c1/c1_LIR.cpp
+++ b/src/share/vm/c1/c1_LIR.cpp
@@ -715,7 +715,10 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
}
if (opJavaCall->_info) do_info(opJavaCall->_info);
- if (opJavaCall->is_method_handle_invoke()) do_temp(FrameMap::method_handle_invoke_SP_save_opr());
+ if (opJavaCall->is_method_handle_invoke()) {
+ opJavaCall->_method_handle_invoke_SP_save_opr = FrameMap::method_handle_invoke_SP_save_opr();
+ do_temp(opJavaCall->_method_handle_invoke_SP_save_opr);
+ }
do_call();
if (opJavaCall->_result->is_valid()) do_output(opJavaCall->_result);
diff --git a/src/share/vm/c1/c1_LIR.hpp b/src/share/vm/c1/c1_LIR.hpp
index 504b9f38b..aabcfaa84 100644
--- a/src/share/vm/c1/c1_LIR.hpp
+++ b/src/share/vm/c1/c1_LIR.hpp
@@ -505,15 +505,22 @@ class LIR_Address: public LIR_OprPtr {
, _type(type)
, _disp(0) { verify(); }
- LIR_Address(LIR_Opr base, int disp, BasicType type):
+ LIR_Address(LIR_Opr base, intx disp, BasicType type):
_base(base)
, _index(LIR_OprDesc::illegalOpr())
, _scale(times_1)
, _type(type)
, _disp(disp) { verify(); }
+ LIR_Address(LIR_Opr base, BasicType type):
+ _base(base)
+ , _index(LIR_OprDesc::illegalOpr())
+ , _scale(times_1)
+ , _type(type)
+ , _disp(0) { verify(); }
+
#ifdef X86
- LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, int disp, BasicType type):
+ LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type):
_base(base)
, _index(index)
, _scale(scale)
@@ -1033,8 +1040,9 @@ class LIR_OpJavaCall: public LIR_OpCall {
friend class LIR_OpVisitState;
private:
- ciMethod* _method;
- LIR_Opr _receiver;
+ ciMethod* _method;
+ LIR_Opr _receiver;
+ LIR_Opr _method_handle_invoke_SP_save_opr; // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr.
public:
LIR_OpJavaCall(LIR_Code code, ciMethod* method,
@@ -1043,14 +1051,18 @@ class LIR_OpJavaCall: public LIR_OpCall {
CodeEmitInfo* info)
: LIR_OpCall(code, addr, result, arguments, info)
, _receiver(receiver)
- , _method(method) { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
+ , _method(method)
+ , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
+ { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
LIR_OpJavaCall(LIR_Code code, ciMethod* method,
LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
LIR_OprList* arguments, CodeEmitInfo* info)
: LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
, _receiver(receiver)
- , _method(method) { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
+ , _method(method)
+ , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
+ { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
LIR_Opr receiver() const { return _receiver; }
ciMethod* method() const { return _method; }
diff --git a/src/share/vm/c1/c1_LIRAssembler.cpp b/src/share/vm/c1/c1_LIRAssembler.cpp
index 741fe2873..75c713d50 100644
--- a/src/share/vm/c1/c1_LIRAssembler.cpp
+++ b/src/share/vm/c1/c1_LIRAssembler.cpp
@@ -301,9 +301,9 @@ void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
}
-void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke) {
+void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
flush_debug_info(pc_offset);
- cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, is_method_handle_invoke);
+ cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
if (cinfo->exception_handlers() != NULL) {
compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
}
@@ -413,12 +413,6 @@ void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
verify_oop_map(op->info());
- // JSR 292
- // Preserve the SP over MethodHandle call sites.
- if (op->is_method_handle_invoke()) {
- preserve_SP(op);
- }
-
if (os::is_MP()) {
// must align calls sites, otherwise they can't be updated atomically on MP hardware
align_call(op->code());
@@ -444,10 +438,6 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
default: ShouldNotReachHere();
}
- if (op->is_method_handle_invoke()) {
- restore_SP(op);
- }
-
#if defined(X86) && defined(TIERED)
// C2 leave fpu stack dirty clean it
if (UseSSE < 2) {
diff --git a/src/share/vm/c1/c1_LIRAssembler.hpp b/src/share/vm/c1/c1_LIRAssembler.hpp
index ad7e53930..e40ebd51d 100644
--- a/src/share/vm/c1/c1_LIRAssembler.hpp
+++ b/src/share/vm/c1/c1_LIRAssembler.hpp
@@ -84,7 +84,7 @@ class LIR_Assembler: public CompilationResourceObj {
Address as_Address_hi(LIR_Address* addr);
// debug information
- void add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke = false);
+ void add_call_info(int pc_offset, CodeEmitInfo* cinfo);
void add_debug_info_for_branch(CodeEmitInfo* info);
void add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo);
void add_debug_info_for_div0_here(CodeEmitInfo* info);
@@ -212,10 +212,6 @@ class LIR_Assembler: public CompilationResourceObj {
void ic_call( LIR_OpJavaCall* op);
void vtable_call( LIR_OpJavaCall* op);
- // JSR 292
- void preserve_SP(LIR_OpJavaCall* op);
- void restore_SP( LIR_OpJavaCall* op);
-
void osr_entry();
void build_frame();
diff --git a/src/share/vm/c1/c1_LIRGenerator.cpp b/src/share/vm/c1/c1_LIRGenerator.cpp
index 469dd84a0..c200775d2 100644
--- a/src/share/vm/c1/c1_LIRGenerator.cpp
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp
@@ -1309,7 +1309,7 @@ void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patc
__ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
if (!addr_opr->is_address()) {
assert(addr_opr->is_register(), "must be");
- addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, 0, T_OBJECT));
+ addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
}
CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code,
info);
@@ -1325,7 +1325,7 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
new_val->as_constant_ptr()->as_jobject() == NULL) return;
if (!new_val->is_register()) {
- LIR_Opr new_val_reg = new_pointer_register();
+ LIR_Opr new_val_reg = new_register(T_OBJECT);
if (new_val->is_constant()) {
__ move(new_val, new_val_reg);
} else {
@@ -1337,7 +1337,7 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
if (addr->is_address()) {
LIR_Address* address = addr->as_address_ptr();
- LIR_Opr ptr = new_pointer_register();
+ LIR_Opr ptr = new_register(T_OBJECT);
if (!address->index()->is_valid() && address->disp() == 0) {
__ move(address->base(), ptr);
} else {
@@ -1350,7 +1350,6 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
LIR_Opr xor_res = new_pointer_register();
LIR_Opr xor_shift_res = new_pointer_register();
-
if (TwoOperandLIRForm ) {
__ move(addr, xor_res);
__ logical_xor(xor_res, new_val, xor_res);
@@ -1368,7 +1367,7 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
}
if (!new_val->is_register()) {
- LIR_Opr new_val_reg = new_pointer_register();
+ LIR_Opr new_val_reg = new_register(T_OBJECT);
__ leal(new_val, new_val_reg);
new_val = new_val_reg;
}
@@ -1377,7 +1376,7 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
__ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
CodeStub* slow = new G1PostBarrierStub(addr, new_val);
- __ branch(lir_cond_notEqual, T_INT, slow);
+ __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
__ branch_destination(slow->continuation());
}
@@ -2371,9 +2370,17 @@ void LIRGenerator::do_Invoke(Invoke* x) {
bool optimized = x->target_is_loaded() && x->target_is_final();
assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
+ // JSR 292
+ // Preserve the SP over MethodHandle call sites.
+ ciMethod* target = x->target();
+ if (target->is_method_handle_invoke()) {
+ info->set_is_method_handle_invoke(true);
+ __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
+ }
+
switch (x->code()) {
case Bytecodes::_invokestatic:
- __ call_static(x->target(), result_register,
+ __ call_static(target, result_register,
SharedRuntime::get_resolve_static_call_stub(),
arg_list, info);
break;
@@ -2383,17 +2390,17 @@ void LIRGenerator::do_Invoke(Invoke* x) {
// for final target we still produce an inline cache, in order
// to be able to call mixed mode
if (x->code() == Bytecodes::_invokespecial || optimized) {
- __ call_opt_virtual(x->target(), receiver, result_register,
+ __ call_opt_virtual(target, receiver, result_register,
SharedRuntime::get_resolve_opt_virtual_call_stub(),
arg_list, info);
} else if (x->vtable_index() < 0) {
- __ call_icvirtual(x->target(), receiver, result_register,
+ __ call_icvirtual(target, receiver, result_register,
SharedRuntime::get_resolve_virtual_call_stub(),
arg_list, info);
} else {
int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
- __ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info);
+ __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
}
break;
case Bytecodes::_invokedynamic: {
@@ -2432,7 +2439,7 @@ void LIRGenerator::do_Invoke(Invoke* x) {
// Load target MethodHandle from CallSite object.
__ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
- __ call_dynamic(x->target(), receiver, result_register,
+ __ call_dynamic(target, receiver, result_register,
SharedRuntime::get_resolve_opt_virtual_call_stub(),
arg_list, info);
break;
@@ -2442,6 +2449,12 @@ void LIRGenerator::do_Invoke(Invoke* x) {
break;
}
+ // JSR 292
+ // Restore the SP after MethodHandle call sites.
+ if (target->is_method_handle_invoke()) {
+ __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
+ }
+
if (x->type()->is_float() || x->type()->is_double()) {
// Force rounding of results from non-strictfp when in strictfp
// scope (or when we don't know the strictness of the callee, to
diff --git a/src/share/vm/ci/ciMethod.cpp b/src/share/vm/ci/ciMethod.cpp
index f445a7082..189b55104 100644
--- a/src/share/vm/ci/ciMethod.cpp
+++ b/src/share/vm/ci/ciMethod.cpp
@@ -690,20 +690,32 @@ int ciMethod::scale_count(int count, float prof_factor) {
// ------------------------------------------------------------------
// invokedynamic support
+
+// ------------------------------------------------------------------
+// ciMethod::is_method_handle_invoke
//
+// Return true if the method is a MethodHandle target.
bool ciMethod::is_method_handle_invoke() const {
- check_is_loaded();
- bool flag = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS);
+ bool flag = (holder()->name() == ciSymbol::java_dyn_MethodHandle() &&
+ methodOopDesc::is_method_handle_invoke_name(name()->sid()));
#ifdef ASSERT
- {
- VM_ENTRY_MARK;
- bool flag2 = get_methodOop()->is_method_handle_invoke();
- assert(flag == flag2, "consistent");
+ if (is_loaded()) {
+ bool flag2 = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS);
+ {
+ VM_ENTRY_MARK;
+ bool flag3 = get_methodOop()->is_method_handle_invoke();
+ assert(flag2 == flag3, "consistent");
+ assert(flag == flag3, "consistent");
+ }
}
#endif //ASSERT
return flag;
}
+// ------------------------------------------------------------------
+// ciMethod::is_method_handle_adapter
+//
+// Return true if the method is a generated MethodHandle adapter.
bool ciMethod::is_method_handle_adapter() const {
check_is_loaded();
VM_ENTRY_MARK;
diff --git a/src/share/vm/ci/ciStreams.cpp b/src/share/vm/ci/ciStreams.cpp
index 6ebcde1a4..2dfd8c7a5 100644
--- a/src/share/vm/ci/ciStreams.cpp
+++ b/src/share/vm/ci/ciStreams.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -81,27 +81,21 @@ int ciExceptionHandlerStream::count_remaining() {
// providing accessors for constant pool items.
// ------------------------------------------------------------------
-// ciBytecodeStream::wide
-//
-// Special handling for the wide bytcode
-Bytecodes::Code ciBytecodeStream::wide()
-{
- // Get following bytecode; do not return wide
- Bytecodes::Code bc = (Bytecodes::Code)_pc[1];
- _pc += 2; // Skip both bytecodes
- _pc += 2; // Skip index always
- if( bc == Bytecodes::_iinc )
- _pc += 2; // Skip optional constant
- _was_wide = _pc; // Flag last wide bytecode found
- return bc;
-}
-
-// ------------------------------------------------------------------
-// ciBytecodeStream::table
+// ciBytecodeStream::next_wide_or_table
//
// Special handling for switch ops
-Bytecodes::Code ciBytecodeStream::table( Bytecodes::Code bc ) {
- switch( bc ) { // Check for special bytecode handling
+Bytecodes::Code ciBytecodeStream::next_wide_or_table(Bytecodes::Code bc) {
+ switch (bc) { // Check for special bytecode handling
+ case Bytecodes::_wide:
+ // Special handling for the wide bytcode
+ // Get following bytecode; do not return wide
+ assert(Bytecodes::Code(_pc[0]) == Bytecodes::_wide, "");
+ bc = Bytecodes::java_code(_raw_bc = (Bytecodes::Code)_pc[1]);
+ assert(Bytecodes::wide_length_for(bc) > 2, "must make progress");
+ _pc += Bytecodes::wide_length_for(bc);
+ _was_wide = _pc; // Flag last wide bytecode found
+ assert(is_wide(), "accessor works right");
+ break;
case Bytecodes::_lookupswitch:
_pc++; // Skip wide bytecode
@@ -164,7 +158,7 @@ void ciBytecodeStream::force_bci(int bci) {
int ciBytecodeStream::get_klass_index() const {
switch(cur_bc()) {
case Bytecodes::_ldc:
- return get_index();
+ return get_index_u1();
case Bytecodes::_ldc_w:
case Bytecodes::_ldc2_w:
case Bytecodes::_checkcast:
@@ -173,7 +167,7 @@ int ciBytecodeStream::get_klass_index() const {
case Bytecodes::_multianewarray:
case Bytecodes::_new:
case Bytecodes::_newarray:
- return get_index_big();
+ return get_index_u2();
default:
ShouldNotReachHere();
return 0;
@@ -199,10 +193,10 @@ ciKlass* ciBytecodeStream::get_klass(bool& will_link) {
int ciBytecodeStream::get_constant_index() const {
switch(cur_bc()) {
case Bytecodes::_ldc:
- return get_index();
+ return get_index_u1();
case Bytecodes::_ldc_w:
case Bytecodes::_ldc2_w:
- return get_index_big();
+ return get_index_u2();
default:
ShouldNotReachHere();
return 0;
@@ -239,7 +233,7 @@ int ciBytecodeStream::get_field_index() {
cur_bc() == Bytecodes::_putfield ||
cur_bc() == Bytecodes::_getstatic ||
cur_bc() == Bytecodes::_putstatic, "wrong bc");
- return get_index_big();
+ return get_index_u2_cpcache();
}
@@ -319,7 +313,9 @@ int ciBytecodeStream::get_method_index() {
ShouldNotReachHere();
}
#endif
- return get_index_int();
+ if (has_index_u4())
+ return get_index_u4(); // invokedynamic
+ return get_index_u2_cpcache();
}
// ------------------------------------------------------------------
diff --git a/src/share/vm/ci/ciStreams.hpp b/src/share/vm/ci/ciStreams.hpp
index 77a15f2b3..c0f745444 100644
--- a/src/share/vm/ci/ciStreams.hpp
+++ b/src/share/vm/ci/ciStreams.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,15 +31,19 @@
// their original form during iteration.
class ciBytecodeStream : StackObj {
private:
- // Handling for the weird bytecodes
- Bytecodes::Code wide(); // Handle wide bytecode
- Bytecodes::Code table(Bytecodes::Code); // Handle complicated inline table
+ // Handling for the weird bytecodes
+ Bytecodes::Code next_wide_or_table(Bytecodes::Code); // Handle _wide & complicated inline table
static Bytecodes::Code check_java(Bytecodes::Code c) {
assert(Bytecodes::is_java_code(c), "should not return _fast bytecodes");
return c;
}
+ static Bytecodes::Code check_defined(Bytecodes::Code c) {
+ assert(Bytecodes::is_defined(c), "");
+ return c;
+ }
+
ciMethod* _method; // the method
ciInstanceKlass* _holder;
address _bc_start; // Start of current bytecode for table
@@ -50,11 +54,21 @@ private:
address _end; // Past end of bytecodes
address _pc; // Current PC
Bytecodes::Code _bc; // Current bytecode
+ Bytecodes::Code _raw_bc; // Current bytecode, raw form
void reset( address base, unsigned int size ) {
_bc_start =_was_wide = 0;
_start = _pc = base; _end = base + size; }
+ void assert_wide(bool require_wide) const {
+ if (require_wide)
+ { assert(is_wide(), "must be a wide instruction"); }
+ else { assert(!is_wide(), "must not be a wide instruction"); }
+ }
+
+ Bytecode* bytecode() const { return Bytecode_at(_bc_start); }
+ Bytecode* next_bytecode() const { return Bytecode_at(_pc); }
+
public:
// End-Of-Bytecodes
static Bytecodes::Code EOBC() {
@@ -92,11 +106,12 @@ public:
}
address cur_bcp() const { return _bc_start; } // Returns bcp to current instruction
- int next_bci() const { return _pc -_start; }
+ int next_bci() const { return _pc - _start; }
int cur_bci() const { return _bc_start - _start; }
int instruction_size() const { return _pc - _bc_start; }
Bytecodes::Code cur_bc() const{ return check_java(_bc); }
+ Bytecodes::Code cur_bc_raw() const { return check_defined(_raw_bc); }
Bytecodes::Code next_bc() { return Bytecodes::java_code((Bytecodes::Code)* _pc); }
// Return current ByteCode and increment PC to next bytecode, skipping all
@@ -109,85 +124,76 @@ public:
// Fetch Java bytecode
// All rewritten bytecodes maintain the size of original bytecode.
- _bc = Bytecodes::java_code((Bytecodes::Code)*_pc);
+ _bc = Bytecodes::java_code(_raw_bc = (Bytecodes::Code)*_pc);
int csize = Bytecodes::length_for(_bc); // Expected size
-
- if( _bc == Bytecodes::_wide ) {
- _bc=wide(); // Handle wide bytecode
- } else if( csize == 0 ) {
- _bc=table(_bc); // Handle inline tables
- } else {
- _pc += csize; // Bump PC past bytecode
+ _pc += csize; // Bump PC past bytecode
+ if (csize == 0) {
+ _bc = next_wide_or_table(_bc);
}
return check_java(_bc);
}
bool is_wide() const { return ( _pc == _was_wide ); }
+ // Does this instruction contain an index which refes into the CP cache?
+ bool uses_cp_cache() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); }
+
+ int get_index_u1() const {
+ return bytecode()->get_index_u1(cur_bc_raw());
+ }
+
// Get a byte index following this bytecode.
// If prefixed with a wide bytecode, get a wide index.
int get_index() const {
- assert_index_size(is_wide() ? 2 : 1);
return (_pc == _was_wide) // was widened?
- ? Bytes::get_Java_u2(_bc_start+2) // yes, return wide index
- : _bc_start[1]; // no, return narrow index
+ ? get_index_u2(true) // yes, return wide index
+ : get_index_u1(); // no, return narrow index
}
- // Get 2-byte index (getfield/putstatic/etc)
- int get_index_big() const {
- assert_index_size(2);
- return Bytes::get_Java_u2(_bc_start+1);
+ // Get 2-byte index (byte swapping depending on which bytecode)
+ int get_index_u2(bool is_wide = false) const {
+ return bytecode()->get_index_u2(cur_bc_raw(), is_wide);
}
- // Get 2-byte index (or 4-byte, for invokedynamic)
- int get_index_int() const {
- return has_giant_index() ? get_index_giant() : get_index_big();
+ // Get 2-byte index in native byte order. (Rewriter::rewrite makes these.)
+ int get_index_u2_cpcache() const {
+ return bytecode()->get_index_u2_cpcache(cur_bc_raw());
}
// Get 4-byte index, for invokedynamic.
- int get_index_giant() const {
- assert_index_size(4);
- return Bytes::get_native_u4(_bc_start+1);
+ int get_index_u4() const {
+ return bytecode()->get_index_u4(cur_bc_raw());
}
- bool has_giant_index() const { return (cur_bc() == Bytecodes::_invokedynamic); }
+ bool has_index_u4() const {
+ return bytecode()->has_index_u4(cur_bc_raw());
+ }
// Get dimensions byte (multinewarray)
int get_dimensions() const { return *(unsigned char*)(_pc-1); }
// Sign-extended index byte/short, no widening
- int get_byte() const { return (int8_t)(_pc[-1]); }
- int get_short() const { return (int16_t)Bytes::get_Java_u2(_pc-2); }
- int get_long() const { return (int32_t)Bytes::get_Java_u4(_pc-4); }
+ int get_constant_u1() const { return bytecode()->get_constant_u1(instruction_size()-1, cur_bc_raw()); }
+ int get_constant_u2(bool is_wide = false) const { return bytecode()->get_constant_u2(instruction_size()-2, cur_bc_raw(), is_wide); }
// Get a byte signed constant for "iinc". Invalid for other bytecodes.
// If prefixed with a wide bytecode, get a wide constant
- int get_iinc_con() const {return (_pc==_was_wide) ? get_short() :get_byte();}
+ int get_iinc_con() const {return (_pc==_was_wide) ? (jshort) get_constant_u2(true) : (jbyte) get_constant_u1();}
// 2-byte branch offset from current pc
- int get_dest( ) const {
- assert( Bytecodes::length_at(_bc_start) == sizeof(jshort)+1, "get_dest called with bad bytecode" );
- return _bc_start-_start + (short)Bytes::get_Java_u2(_pc-2);
+ int get_dest() const {
+ return cur_bci() + bytecode()->get_offset_s2(cur_bc_raw());
}
// 2-byte branch offset from next pc
- int next_get_dest( ) const {
- address next_bc_start = _pc;
- assert( _pc < _end, "" );
- Bytecodes::Code next_bc = (Bytecodes::Code)*_pc;
- assert( next_bc != Bytecodes::_wide, "");
- int next_csize = Bytecodes::length_for(next_bc);
- assert( next_csize != 0, "" );
- assert( next_bc <= Bytecodes::_jsr_w, "");
- address next_pc = _pc + next_csize;
- assert( Bytecodes::length_at(next_bc_start) == sizeof(jshort)+1, "next_get_dest called with bad bytecode" );
- return next_bc_start-_start + (short)Bytes::get_Java_u2(next_pc-2);
+ int next_get_dest() const {
+ assert(_pc < _end, "");
+ return next_bci() + next_bytecode()->get_offset_s2(Bytecodes::_ifeq);
}
// 4-byte branch offset from current pc
- int get_far_dest( ) const {
- assert( Bytecodes::length_at(_bc_start) == sizeof(jint)+1, "dest4 called with bad bytecode" );
- return _bc_start-_start + (int)Bytes::get_Java_u4(_pc-4);
+ int get_far_dest() const {
+ return cur_bci() + bytecode()->get_offset_s4(cur_bc_raw());
}
// For a lookup or switch table, return target destination
@@ -234,22 +240,6 @@ public:
ciCPCache* get_cpcache();
ciCallSite* get_call_site();
-
- private:
- void assert_index_size(int required_size) const {
-#ifdef ASSERT
- int isize = instruction_size() - (is_wide() ? 1 : 0) - 1;
- if (isize == 2 && cur_bc() == Bytecodes::_iinc)
- isize = 1;
- else if (isize <= 2)
- ; // no change
- else if (has_giant_index())
- isize = 4;
- else
- isize = 2;
- assert(isize = required_size, "wrong index size");
-#endif
- }
};
diff --git a/src/share/vm/ci/ciTypeFlow.cpp b/src/share/vm/ci/ciTypeFlow.cpp
index 74dae3e45..c90f51fc4 100644
--- a/src/share/vm/ci/ciTypeFlow.cpp
+++ b/src/share/vm/ci/ciTypeFlow.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2132,6 +2132,7 @@ bool ciTypeFlow::can_trap(ciBytecodeStream& str) {
if (!Bytecodes::can_trap(str.cur_bc())) return false;
switch (str.cur_bc()) {
+ // %%% FIXME: ldc of Class can generate an exception
case Bytecodes::_ldc:
case Bytecodes::_ldc_w:
case Bytecodes::_ldc2_w:
diff --git a/src/share/vm/classfile/verifier.cpp b/src/share/vm/classfile/verifier.cpp
index fbd6797bd..ca76e7148 100644
--- a/src/share/vm/classfile/verifier.cpp
+++ b/src/share/vm/classfile/verifier.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -257,6 +257,9 @@ void ClassVerifier::verify_class(TRAPS) {
int num_methods = methods->length();
for (int index = 0; index < num_methods; index++) {
+ // Check for recursive re-verification before each method.
+ if (was_recursively_verified()) return;
+
methodOop m = (methodOop)methods->obj_at(index);
if (m->is_native() || m->is_abstract()) {
// If m is native or abstract, skip it. It is checked in class file
@@ -265,6 +268,12 @@ void ClassVerifier::verify_class(TRAPS) {
}
verify_method(methodHandle(THREAD, m), CHECK_VERIFY(this));
}
+
+ if (_verify_verbose || TraceClassInitialization) {
+ if (was_recursively_verified())
+ tty->print_cr("Recursive verification detected for: %s",
+ _klass->external_name());
+ }
}
void ClassVerifier::verify_method(methodHandle m, TRAPS) {
@@ -329,6 +338,9 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
// instruction in sequence
Bytecodes::Code opcode;
while (!bcs.is_last_bytecode()) {
+ // Check for recursive re-verification before each bytecode.
+ if (was_recursively_verified()) return;
+
opcode = bcs.raw_next();
u2 bci = bcs.bci();
@@ -413,13 +425,13 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break;
case Bytecodes::_ldc :
verify_ldc(
- opcode, bcs.get_index(), &current_frame,
+ opcode, bcs.get_index_u1(), &current_frame,
cp, bci, CHECK_VERIFY(this));
no_control_flow = false; break;
case Bytecodes::_ldc_w :
case Bytecodes::_ldc2_w :
verify_ldc(
- opcode, bcs.get_index_big(), &current_frame,
+ opcode, bcs.get_index_u2(), &current_frame,
cp, bci, CHECK_VERIFY(this));
no_control_flow = false; break;
case Bytecodes::_iload :
@@ -1185,7 +1197,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break;
case Bytecodes::_new :
{
- index = bcs.get_index_big();
+ index = bcs.get_index_u2();
verify_cp_class_type(index, cp, CHECK_VERIFY(this));
VerificationType new_class_type =
cp_index_to_type(index, cp, CHECK_VERIFY(this));
@@ -1205,7 +1217,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break;
case Bytecodes::_anewarray :
verify_anewarray(
- bcs.get_index_big(), cp, &current_frame, CHECK_VERIFY(this));
+ bcs.get_index_u2(), cp, &current_frame, CHECK_VERIFY(this));
no_control_flow = false; break;
case Bytecodes::_arraylength :
type = current_frame.pop_stack(
@@ -1218,7 +1230,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break;
case Bytecodes::_checkcast :
{
- index = bcs.get_index_big();
+ index = bcs.get_index_u2();
verify_cp_class_type(index, cp, CHECK_VERIFY(this));
current_frame.pop_stack(
VerificationType::reference_check(), CHECK_VERIFY(this));
@@ -1228,7 +1240,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break;
}
case Bytecodes::_instanceof : {
- index = bcs.get_index_big();
+ index = bcs.get_index_u2();
verify_cp_class_type(index, cp, CHECK_VERIFY(this));
current_frame.pop_stack(
VerificationType::reference_check(), CHECK_VERIFY(this));
@@ -1243,7 +1255,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break;
case Bytecodes::_multianewarray :
{
- index = bcs.get_index_big();
+ index = bcs.get_index_u2();
u2 dim = *(bcs.bcp()+3);
verify_cp_class_type(index, cp, CHECK_VERIFY(this));
VerificationType new_array_type =
@@ -1302,7 +1314,7 @@ char* ClassVerifier::generate_code_data(methodHandle m, u4 code_length, TRAPS) {
while (!bcs.is_last_bytecode()) {
if (bcs.raw_next() != Bytecodes::_illegal) {
int bci = bcs.bci();
- if (bcs.code() == Bytecodes::_new) {
+ if (bcs.raw_code() == Bytecodes::_new) {
code_data[bci] = NEW_OFFSET;
} else {
code_data[bci] = BYTECODE_OFFSET;
@@ -1473,20 +1485,9 @@ void ClassVerifier::verify_cp_type(
// In some situations, bytecode rewriting may occur while we're verifying.
// In this case, a constant pool cache exists and some indices refer to that
- // instead. Get the original index for the tag check
- constantPoolCacheOop cache = cp->cache();
- if (cache != NULL &&
- ((types == (1 << JVM_CONSTANT_InterfaceMethodref)) ||
- (types == (1 << JVM_CONSTANT_Methodref)) ||
- (types == (1 << JVM_CONSTANT_Fieldref)))) {
- int native_index = index;
- if (Bytes::is_Java_byte_ordering_different()) {
- native_index = Bytes::swap_u2(index);
- }
- assert((native_index >= 0) && (native_index < cache->length()),
- "Must be a legal index into the cp cache");
- index = cache->entry_at(native_index)->constant_pool_index();
- }
+ // instead. Be sure we don't pick up such indices by accident.
+ // We must check was_recursively_verified() before we get here.
+ guarantee(cp->cache() == NULL, "not rewritten yet");
verify_cp_index(cp, index, CHECK_VERIFY(this));
unsigned int tag = cp->tag_at(index).value();
@@ -1657,7 +1658,7 @@ void ClassVerifier::verify_switch(
int keys, delta;
current_frame->pop_stack(
VerificationType::integer_type(), CHECK_VERIFY(this));
- if (bcs->code() == Bytecodes::_tableswitch) {
+ if (bcs->raw_code() == Bytecodes::_tableswitch) {
jint low = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
jint high = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
if (low > high) {
@@ -1713,7 +1714,7 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
StackMapFrame* current_frame,
constantPoolHandle cp,
TRAPS) {
- u2 index = bcs->get_index_big();
+ u2 index = bcs->get_index_u2();
verify_cp_type(index, cp, 1 << JVM_CONSTANT_Fieldref, CHECK_VERIFY(this));
// Get field name and signature
@@ -1753,7 +1754,7 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
&sig_stream, field_type, CHECK_VERIFY(this));
u2 bci = bcs->bci();
bool is_assignable;
- switch (bcs->code()) {
+ switch (bcs->raw_code()) {
case Bytecodes::_getstatic: {
for (int i = 0; i < n; i++) {
current_frame->push_stack(field_type[i], CHECK_VERIFY(this));
@@ -1873,7 +1874,7 @@ void ClassVerifier::verify_invoke_init(
ref_class_type.name(), CHECK_VERIFY(this));
methodOop m = instanceKlass::cast(ref_klass)->uncached_lookup_method(
vmSymbols::object_initializer_name(),
- cp->signature_ref_at(bcs->get_index_big()));
+ cp->signature_ref_at(bcs->get_index_u2()));
instanceKlassHandle mh(THREAD, m->method_holder());
if (m->is_protected() && !mh->is_same_class_package(_klass())) {
bool assignable = current_type().is_assignable_from(
@@ -1896,8 +1897,8 @@ void ClassVerifier::verify_invoke_instructions(
bool *this_uninit, VerificationType return_type,
constantPoolHandle cp, TRAPS) {
// Make sure the constant pool item is the right type
- u2 index = bcs->get_index_big();
- Bytecodes::Code opcode = bcs->code();
+ u2 index = bcs->get_index_u2();
+ Bytecodes::Code opcode = bcs->raw_code();
unsigned int types = (opcode == Bytecodes::_invokeinterface
? 1 << JVM_CONSTANT_InterfaceMethodref
: opcode == Bytecodes::_invokedynamic
diff --git a/src/share/vm/classfile/verifier.hpp b/src/share/vm/classfile/verifier.hpp
index f4a6ea4ee..59cd08559 100644
--- a/src/share/vm/classfile/verifier.hpp
+++ b/src/share/vm/classfile/verifier.hpp
@@ -158,6 +158,16 @@ class ClassVerifier : public StackObj {
methodHandle _method; // current method being verified
VerificationType _this_type; // the verification type of the current class
+ // Some recursive calls from the verifier to the name resolver
+ // can cause the current class to be re-verified and rewritten.
+ // If this happens, the original verification should not continue,
+ // because constant pool indexes will have changed.
+ // The rewriter is preceded by the verifier. If the verifier throws
+ // an error, rewriting is prevented. Also, rewriting always precedes
+ // bytecode execution or compilation. Thus, is_rewritten implies
+ // that a class has been verified and prepared for execution.
+ bool was_recursively_verified() { return _klass->is_rewritten(); }
+
public:
enum {
BYTECODE_OFFSET = 1,
diff --git a/src/share/vm/code/codeBlob.cpp b/src/share/vm/code/codeBlob.cpp
index f9da9ea55..2b8410105 100644
--- a/src/share/vm/code/codeBlob.cpp
+++ b/src/share/vm/code/codeBlob.cpp
@@ -66,8 +66,6 @@ CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_comple
_relocation_size = locs_size;
_instructions_offset = align_code_offset(header_size + locs_size);
_data_offset = size;
- _oops_offset = size;
- _oops_length = 0;
_frame_size = 0;
set_oop_maps(NULL);
}
@@ -94,9 +92,6 @@ CodeBlob::CodeBlob(
_relocation_size = round_to(cb->total_relocation_size(), oopSize);
_instructions_offset = align_code_offset(header_size + _relocation_size);
_data_offset = _instructions_offset + round_to(cb->total_code_size(), oopSize);
- _oops_offset = _size - round_to(cb->total_oop_size(), oopSize);
- _oops_length = 0; // temporary, until the copy_oops handshake
- assert(_oops_offset >= _data_offset, "codeBlob is too small");
assert(_data_offset <= size, "codeBlob is too small");
cb->copy_code_and_locs_to(this);
@@ -131,99 +126,6 @@ void CodeBlob::flush() {
}
-// Promote one word from an assembly-time handle to a live embedded oop.
-inline void CodeBlob::initialize_immediate_oop(oop* dest, jobject handle) {
- if (handle == NULL ||
- // As a special case, IC oops are initialized to 1 or -1.
- handle == (jobject) Universe::non_oop_word()) {
- (*dest) = (oop)handle;
- } else {
- (*dest) = JNIHandles::resolve_non_null(handle);
- }
-}
-
-
-void CodeBlob::copy_oops(GrowableArray<jobject>* array) {
- assert(_oops_length == 0, "do this handshake just once, please");
- int length = array->length();
- assert((address)(oops_begin() + length) <= data_end(), "oops big enough");
- oop* dest = oops_begin();
- for (int index = 0 ; index < length; index++) {
- initialize_immediate_oop(&dest[index], array->at(index));
- }
- _oops_length = length;
-
- // Now we can fix up all the oops in the code.
- // We need to do this in the code because
- // the assembler uses jobjects as placeholders.
- // The code and relocations have already been
- // initialized by the CodeBlob constructor,
- // so it is valid even at this early point to
- // iterate over relocations and patch the code.
- fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
-}
-
-
-relocInfo::relocType CodeBlob::reloc_type_for_address(address pc) {
- RelocIterator iter(this, pc, pc+1);
- while (iter.next()) {
- return (relocInfo::relocType) iter.type();
- }
- // No relocation info found for pc
- ShouldNotReachHere();
- return relocInfo::none; // dummy return value
-}
-
-
-bool CodeBlob::is_at_poll_return(address pc) {
- RelocIterator iter(this, pc, pc+1);
- while (iter.next()) {
- if (iter.type() == relocInfo::poll_return_type)
- return true;
- }
- return false;
-}
-
-
-bool CodeBlob::is_at_poll_or_poll_return(address pc) {
- RelocIterator iter(this, pc, pc+1);
- while (iter.next()) {
- relocInfo::relocType t = iter.type();
- if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
- return true;
- }
- return false;
-}
-
-
-void CodeBlob::fix_oop_relocations(address begin, address end,
- bool initialize_immediates) {
- // re-patch all oop-bearing instructions, just in case some oops moved
- RelocIterator iter(this, begin, end);
- while (iter.next()) {
- if (iter.type() == relocInfo::oop_type) {
- oop_Relocation* reloc = iter.oop_reloc();
- if (initialize_immediates && reloc->oop_is_immediate()) {
- oop* dest = reloc->oop_addr();
- initialize_immediate_oop(dest, (jobject) *dest);
- }
- // Refresh the oop-related bits of this instruction.
- reloc->fix_oop_relocation();
- }
-
- // There must not be any interfering patches or breakpoints.
- assert(!(iter.type() == relocInfo::breakpoint_type
- && iter.breakpoint_reloc()->active()),
- "no active breakpoint");
- }
-}
-
-void CodeBlob::do_unloading(BoolObjectClosure* is_alive,
- OopClosure* keep_alive,
- bool unloading_occurred) {
- ShouldNotReachHere();
-}
-
OopMap* CodeBlob::oop_map_for_return_address(address return_address) {
address pc = return_address ;
assert (oop_maps() != NULL, "nope");
diff --git a/src/share/vm/code/codeBlob.hpp b/src/share/vm/code/codeBlob.hpp
index 5f00a5e3f..1fc0c2f0c 100644
--- a/src/share/vm/code/codeBlob.hpp
+++ b/src/share/vm/code/codeBlob.hpp
@@ -54,17 +54,12 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
// that range. There is a similar range(s) on returns
// which we don't detect.
int _data_offset; // offset to where data region begins
- int _oops_offset; // offset to where embedded oop table begins (inside data)
- int _oops_length; // number of embedded oops
int _frame_size; // size of stack frame
OopMapSet* _oop_maps; // OopMap for this CodeBlob
CodeComments _comments;
friend class OopRecorder;
- void fix_oop_relocations(address begin, address end, bool initialize_immediates);
- inline void initialize_immediate_oop(oop* dest, jobject handle);
-
public:
// Returns the space needed for CodeBlob
static unsigned int allocation_size(CodeBuffer* cb, int header_size);
@@ -115,14 +110,11 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
address instructions_end() const { return (address) header_begin() + _data_offset; }
address data_begin() const { return (address) header_begin() + _data_offset; }
address data_end() const { return (address) header_begin() + _size; }
- oop* oops_begin() const { return (oop*) (header_begin() + _oops_offset); }
- oop* oops_end() const { return oops_begin() + _oops_length; }
// Offsets
int relocation_offset() const { return _header_size; }
int instructions_offset() const { return _instructions_offset; }
int data_offset() const { return _data_offset; }
- int oops_offset() const { return _oops_offset; }
// Sizes
int size() const { return _size; }
@@ -130,40 +122,16 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
int relocation_size() const { return (address) relocation_end() - (address) relocation_begin(); }
int instructions_size() const { return instructions_end() - instructions_begin(); }
int data_size() const { return data_end() - data_begin(); }
- int oops_size() const { return (address) oops_end() - (address) oops_begin(); }
// Containment
bool blob_contains(address addr) const { return header_begin() <= addr && addr < data_end(); }
bool relocation_contains(relocInfo* addr) const{ return relocation_begin() <= addr && addr < relocation_end(); }
bool instructions_contains(address addr) const { return instructions_begin() <= addr && addr < instructions_end(); }
bool data_contains(address addr) const { return data_begin() <= addr && addr < data_end(); }
- bool oops_contains(oop* addr) const { return oops_begin() <= addr && addr < oops_end(); }
bool contains(address addr) const { return instructions_contains(addr); }
bool is_frame_complete_at(address addr) const { return instructions_contains(addr) &&
addr >= instructions_begin() + _frame_complete_offset; }
- // Relocation support
- void fix_oop_relocations(address begin, address end) {
- fix_oop_relocations(begin, end, false);
- }
- void fix_oop_relocations() {
- fix_oop_relocations(NULL, NULL, false);
- }
- relocInfo::relocType reloc_type_for_address(address pc);
- bool is_at_poll_return(address pc);
- bool is_at_poll_or_poll_return(address pc);
-
- // Support for oops in scopes and relocs:
- // Note: index 0 is reserved for null.
- oop oop_at(int index) const { return index == 0? (oop)NULL: *oop_addr_at(index); }
- oop* oop_addr_at(int index) const{ // for GC
- // relocation indexes are biased by 1 (because 0 is reserved)
- assert(index > 0 && index <= _oops_length, "must be a valid non-zero index");
- return &oops_begin()[index-1];
- }
-
- void copy_oops(GrowableArray<jobject>* oops);
-
// CodeCache support: really only used by the nmethods, but in order to get
// asserts and certain bookkeeping to work in the CodeCache they are defined
// virtual here.
@@ -175,12 +143,6 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
// GC support
virtual bool is_alive() const = 0;
- virtual void do_unloading(BoolObjectClosure* is_alive,
- OopClosure* keep_alive,
- bool unloading_occurred);
- virtual void oops_do(OopClosure* f) = 0;
- // (All CodeBlob subtypes other than NMethod currently have
- // an empty oops_do() method.
// OopMap for frame
OopMapSet* oop_maps() const { return _oop_maps; }
@@ -245,11 +207,6 @@ class BufferBlob: public CodeBlob {
// GC/Verification support
void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ }
bool is_alive() const { return true; }
- void do_unloading(BoolObjectClosure* is_alive,
- OopClosure* keep_alive,
- bool unloading_occurred) { /* do nothing */ }
-
- void oops_do(OopClosure* f) { /* do nothing*/ }
void verify();
void print() const PRODUCT_RETURN;
@@ -334,10 +291,6 @@ class RuntimeStub: public CodeBlob {
// GC/Verification support
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ }
bool is_alive() const { return true; }
- void do_unloading(BoolObjectClosure* is_alive,
- OopClosure* keep_alive,
- bool unloading_occurred) { /* do nothing */ }
- void oops_do(OopClosure* f) { /* do-nothing*/ }
void verify();
void print() const PRODUCT_RETURN;
@@ -363,9 +316,6 @@ class SingletonBlob: public CodeBlob {
{};
bool is_alive() const { return true; }
- void do_unloading(BoolObjectClosure* is_alive,
- OopClosure* keep_alive,
- bool unloading_occurred) { /* do-nothing*/ }
void verify(); // does nothing
void print() const PRODUCT_RETURN;
@@ -423,9 +373,6 @@ class DeoptimizationBlob: public SingletonBlob {
// GC for args
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ }
- // Iteration
- void oops_do(OopClosure* f) {}
-
// Printing
void print_value_on(outputStream* st) const PRODUCT_RETURN;
@@ -477,9 +424,6 @@ class UncommonTrapBlob: public SingletonBlob {
// Typing
bool is_uncommon_trap_stub() const { return true; }
-
- // Iteration
- void oops_do(OopClosure* f) {}
};
@@ -512,9 +456,6 @@ class ExceptionBlob: public SingletonBlob {
// Typing
bool is_exception_stub() const { return true; }
-
- // Iteration
- void oops_do(OopClosure* f) {}
};
#endif // COMPILER2
@@ -548,7 +489,4 @@ class SafepointBlob: public SingletonBlob {
// Typing
bool is_safepoint_stub() const { return true; }
-
- // Iteration
- void oops_do(OopClosure* f) {}
};
diff --git a/src/share/vm/code/codeCache.cpp b/src/share/vm/code/codeCache.cpp
index 763305829..54767b369 100644
--- a/src/share/vm/code/codeCache.cpp
+++ b/src/share/vm/code/codeCache.cpp
@@ -74,12 +74,12 @@ class CodeBlob_sizes {
total_size += cb->size();
header_size += cb->header_size();
relocation_size += cb->relocation_size();
- scopes_oop_size += cb->oops_size();
if (cb->is_nmethod()) {
- nmethod *nm = (nmethod*)cb;
+ nmethod* nm = cb->as_nmethod_or_null();
code_size += nm->code_size();
stub_size += nm->stub_size();
+ scopes_oop_size += nm->oops_size();
scopes_data_size += nm->scopes_data_size();
scopes_pcs_size += nm->scopes_pcs_size();
} else {
@@ -262,14 +262,14 @@ int CodeCache::alignment_offset() {
}
-// Mark code blobs for unloading if they contain otherwise
-// unreachable oops.
+// Mark nmethods for unloading if they contain otherwise unreachable
+// oops.
void CodeCache::do_unloading(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
bool unloading_occurred) {
assert_locked_or_safepoint(CodeCache_lock);
- FOR_ALL_ALIVE_BLOBS(cb) {
- cb->do_unloading(is_alive, keep_alive, unloading_occurred);
+ FOR_ALL_ALIVE_NMETHODS(nm) {
+ nm->do_unloading(is_alive, keep_alive, unloading_occurred);
}
}
@@ -509,9 +509,9 @@ void CodeCache::gc_epilogue() {
if (needs_cache_clean()) {
nm->cleanup_inline_caches();
}
- debug_only(nm->verify();)
+ DEBUG_ONLY(nm->verify());
+ nm->fix_oop_relocations();
}
- cb->fix_oop_relocations();
}
set_needs_cache_clean(false);
prune_scavenge_root_nmethods();
diff --git a/src/share/vm/code/compiledIC.cpp b/src/share/vm/code/compiledIC.cpp
index 9abc5f8e3..d2c8e2752 100644
--- a/src/share/vm/code/compiledIC.cpp
+++ b/src/share/vm/code/compiledIC.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -441,11 +441,11 @@ void CompiledIC::compute_monomorphic_entry(methodHandle method,
}
-inline static RelocIterator parse_ic(CodeBlob* code, address ic_call, oop* &_oop_addr, bool *is_optimized) {
+inline static RelocIterator parse_ic(nmethod* nm, address ic_call, oop* &_oop_addr, bool *is_optimized) {
address first_oop = NULL;
// Mergers please note: Sun SC5.x CC insists on an lvalue for a reference parameter.
- CodeBlob *code1 = code;
- return virtual_call_Relocation::parse_ic(code1, ic_call, first_oop, _oop_addr, is_optimized);
+ nmethod* tmp_nm = nm;
+ return virtual_call_Relocation::parse_ic(tmp_nm, ic_call, first_oop, _oop_addr, is_optimized);
}
CompiledIC::CompiledIC(NativeCall* ic_call)
diff --git a/src/share/vm/code/nmethod.cpp b/src/share/vm/code/nmethod.cpp
index 50d4d01fc..106ff2f03 100644
--- a/src/share/vm/code/nmethod.cpp
+++ b/src/share/vm/code/nmethod.cpp
@@ -99,12 +99,12 @@ struct nmethod_stats_struct {
code_size += nm->code_size();
stub_size += nm->stub_size();
consts_size += nm->consts_size();
+ oops_size += nm->oops_size();
scopes_data_size += nm->scopes_data_size();
scopes_pcs_size += nm->scopes_pcs_size();
dependencies_size += nm->dependencies_size();
handler_table_size += nm->handler_table_size();
nul_chk_table_size += nm->nul_chk_table_size();
- oops_size += nm->oops_size();
}
void print_nmethod_stats() {
if (nmethod_count == 0) return;
@@ -114,12 +114,12 @@ struct nmethod_stats_struct {
if (code_size != 0) tty->print_cr(" main code = %d", code_size);
if (stub_size != 0) tty->print_cr(" stub code = %d", stub_size);
if (consts_size != 0) tty->print_cr(" constants = %d", consts_size);
+ if (oops_size != 0) tty->print_cr(" oops = %d", oops_size);
if (scopes_data_size != 0) tty->print_cr(" scopes data = %d", scopes_data_size);
if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %d", scopes_pcs_size);
if (dependencies_size != 0) tty->print_cr(" dependencies = %d", dependencies_size);
if (handler_table_size != 0) tty->print_cr(" handler table = %d", handler_table_size);
if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %d", nul_chk_table_size);
- if (oops_size != 0) tty->print_cr(" oops = %d", oops_size);
}
int native_nmethod_count;
@@ -600,7 +600,8 @@ nmethod::nmethod(
#endif // def HAVE_DTRACE_H
_stub_offset = data_offset();
_consts_offset = data_offset();
- _scopes_data_offset = data_offset();
+ _oops_offset = data_offset();
+ _scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize);
_scopes_pcs_offset = _scopes_data_offset;
_dependencies_offset = _scopes_pcs_offset;
_handler_table_offset = _dependencies_offset;
@@ -690,7 +691,8 @@ nmethod::nmethod(
_orig_pc_offset = 0;
_stub_offset = data_offset();
_consts_offset = data_offset();
- _scopes_data_offset = data_offset();
+ _oops_offset = data_offset();
+ _scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize);
_scopes_pcs_offset = _scopes_data_offset;
_dependencies_offset = _scopes_pcs_offset;
_handler_table_offset = _dependencies_offset;
@@ -805,8 +807,9 @@ nmethod::nmethod(
_unwind_handler_offset = -1;
}
_consts_offset = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start());
- _scopes_data_offset = data_offset();
- _scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize);
+ _oops_offset = data_offset();
+ _scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size (), oopSize);
+ _scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize);
_dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size());
_handler_table_offset = _dependencies_offset + round_to(dependencies->size_in_bytes (), oopSize);
_nul_chk_table_offset = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
@@ -990,6 +993,79 @@ void nmethod::set_version(int v) {
}
+// Promote one word from an assembly-time handle to a live embedded oop.
+inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
+ if (handle == NULL ||
+ // As a special case, IC oops are initialized to 1 or -1.
+ handle == (jobject) Universe::non_oop_word()) {
+ (*dest) = (oop) handle;
+ } else {
+ (*dest) = JNIHandles::resolve_non_null(handle);
+ }
+}
+
+
+void nmethod::copy_oops(GrowableArray<jobject>* array) {
+ //assert(oops_size() == 0, "do this handshake just once, please");
+ int length = array->length();
+ assert((address)(oops_begin() + length) <= data_end(), "oops big enough");
+ oop* dest = oops_begin();
+ for (int index = 0 ; index < length; index++) {
+ initialize_immediate_oop(&dest[index], array->at(index));
+ }
+
+ // Now we can fix up all the oops in the code. We need to do this
+ // in the code because the assembler uses jobjects as placeholders.
+ // The code and relocations have already been initialized by the
+ // CodeBlob constructor, so it is valid even at this early point to
+ // iterate over relocations and patch the code.
+ fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
+}
+
+
+bool nmethod::is_at_poll_return(address pc) {
+ RelocIterator iter(this, pc, pc+1);
+ while (iter.next()) {
+ if (iter.type() == relocInfo::poll_return_type)
+ return true;
+ }
+ return false;
+}
+
+
+bool nmethod::is_at_poll_or_poll_return(address pc) {
+ RelocIterator iter(this, pc, pc+1);
+ while (iter.next()) {
+ relocInfo::relocType t = iter.type();
+ if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
+ return true;
+ }
+ return false;
+}
+
+
+void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
+ // re-patch all oop-bearing instructions, just in case some oops moved
+ RelocIterator iter(this, begin, end);
+ while (iter.next()) {
+ if (iter.type() == relocInfo::oop_type) {
+ oop_Relocation* reloc = iter.oop_reloc();
+ if (initialize_immediates && reloc->oop_is_immediate()) {
+ oop* dest = reloc->oop_addr();
+ initialize_immediate_oop(dest, (jobject) *dest);
+ }
+ // Refresh the oop-related bits of this instruction.
+ reloc->fix_oop_relocation();
+ }
+
+ // There must not be any interfering patches or breakpoints.
+ assert(!(iter.type() == relocInfo::breakpoint_type
+ && iter.breakpoint_reloc()->active()),
+ "no active breakpoint");
+ }
+}
+
+
ScopeDesc* nmethod::scope_desc_at(address pc) {
PcDesc* pd = pc_desc_at(pc);
guarantee(pd != NULL, "scope must be present");
@@ -1266,19 +1342,7 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
// and it hasn't already been reported for this nmethod then report it now.
// (the event may have been reported earilier if the GC marked it for unloading).
if (state == zombie) {
-
- DTRACE_METHOD_UNLOAD_PROBE(method());
-
- if (JvmtiExport::should_post_compiled_method_unload() &&
- !unload_reported()) {
- assert(method() != NULL, "checking");
- {
- HandleMark hm;
- JvmtiExport::post_compiled_method_unload_at_safepoint(
- method()->jmethod_id(), code_begin());
- }
- set_unload_reported();
- }
+ post_compiled_method_unload();
}
@@ -1430,6 +1494,12 @@ void nmethod::post_compiled_method_load_event() {
}
void nmethod::post_compiled_method_unload() {
+ if (unload_reported()) {
+ // During unloading we transition to unloaded and then to zombie
+ // and the unloading is reported during the first transition.
+ return;
+ }
+
assert(_method != NULL && !is_unloaded(), "just checking");
DTRACE_METHOD_UNLOAD_PROBE(method());
@@ -1439,8 +1509,7 @@ void nmethod::post_compiled_method_unload() {
if (JvmtiExport::should_post_compiled_method_unload()) {
assert(!unload_reported(), "already unloaded");
HandleMark hm;
- JvmtiExport::post_compiled_method_unload_at_safepoint(
- method()->jmethod_id(), code_begin());
+ JvmtiExport::post_compiled_method_unload(method()->jmethod_id(), code_begin());
}
// The JVMTI CompiledMethodUnload event can be enabled or disabled at
@@ -2282,6 +2351,10 @@ void nmethod::print() const {
consts_begin(),
consts_end(),
consts_size());
+ if (oops_size () > 0) tty->print_cr(" oops [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+ oops_begin(),
+ oops_end(),
+ oops_size());
if (scopes_data_size () > 0) tty->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
scopes_data_begin(),
scopes_data_end(),
diff --git a/src/share/vm/code/nmethod.hpp b/src/share/vm/code/nmethod.hpp
index b74541d2a..28f60296f 100644
--- a/src/share/vm/code/nmethod.hpp
+++ b/src/share/vm/code/nmethod.hpp
@@ -105,6 +105,7 @@ struct nmFlags {
// [Relocation]
// - relocation information
// - constant part (doubles, longs and floats used in nmethod)
+// - oop table
// [Code]
// - code body
// - exception handler
@@ -161,6 +162,7 @@ class nmethod : public CodeBlob {
#endif // def HAVE_DTRACE_H
int _stub_offset;
int _consts_offset;
+ int _oops_offset; // offset to where embedded oop table begins (inside data)
int _scopes_data_offset;
int _scopes_pcs_offset;
int _dependencies_offset;
@@ -347,7 +349,10 @@ class nmethod : public CodeBlob {
address stub_begin () const { return header_begin() + _stub_offset ; }
address stub_end () const { return header_begin() + _consts_offset ; }
address consts_begin () const { return header_begin() + _consts_offset ; }
- address consts_end () const { return header_begin() + _scopes_data_offset ; }
+ address consts_end () const { return header_begin() + _oops_offset ; }
+ oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
+ oop* oops_end () const { return (oop*) (header_begin() + _scopes_data_offset) ; }
+
address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
@@ -359,20 +364,24 @@ class nmethod : public CodeBlob {
address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
- int code_size () const { return code_end () - code_begin (); }
- int stub_size () const { return stub_end () - stub_begin (); }
- int consts_size () const { return consts_end () - consts_begin (); }
- int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
- int scopes_pcs_size () const { return (intptr_t)scopes_pcs_end () - (intptr_t)scopes_pcs_begin (); }
- int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
- int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
- int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
+ // Sizes
+ int code_size () const { return code_end () - code_begin (); }
+ int stub_size () const { return stub_end () - stub_begin (); }
+ int consts_size () const { return consts_end () - consts_begin (); }
+ int oops_size () const { return (address) oops_end () - (address) oops_begin (); }
+ int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
+ int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); }
+ int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
+ int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
+ int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
int total_size () const;
+ // Containment
bool code_contains (address addr) const { return code_begin () <= addr && addr < code_end (); }
bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
+ bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
@@ -431,6 +440,29 @@ class nmethod : public CodeBlob {
int version() const { return flags.version; }
void set_version(int v);
+ // Support for oops in scopes and relocs:
+ // Note: index 0 is reserved for null.
+ oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
+ oop* oop_addr_at(int index) const { // for GC
+ // relocation indexes are biased by 1 (because 0 is reserved)
+ assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
+ return &oops_begin()[index - 1];
+ }
+
+ void copy_oops(GrowableArray<jobject>* oops);
+
+ // Relocation support
+private:
+ void fix_oop_relocations(address begin, address end, bool initialize_immediates);
+ inline void initialize_immediate_oop(oop* dest, jobject handle);
+
+public:
+ void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
+ void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); }
+
+ bool is_at_poll_return(address pc);
+ bool is_at_poll_or_poll_return(address pc);
+
// Non-perm oop support
bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
protected:
@@ -511,8 +543,8 @@ class nmethod : public CodeBlob {
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
OopClosure* f);
- virtual void oops_do(OopClosure* f) { oops_do(f, false); }
- void oops_do(OopClosure* f, bool do_strong_roots_only);
+ void oops_do(OopClosure* f) { oops_do(f, false); }
+ void oops_do(OopClosure* f, bool do_strong_roots_only);
bool detect_scavenge_root_oops();
void verify_scavenge_root_oops() PRODUCT_RETURN;
diff --git a/src/share/vm/code/oopRecorder.cpp b/src/share/vm/code/oopRecorder.cpp
index 4a368c4ee..7688079b1 100644
--- a/src/share/vm/code/oopRecorder.cpp
+++ b/src/share/vm/code/oopRecorder.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,10 +50,10 @@ int OopRecorder::oop_size() {
return _handles->length() * sizeof(oop);
}
-void OopRecorder::copy_to(CodeBlob* code) {
+void OopRecorder::copy_to(nmethod* nm) {
assert(_complete, "must be frozen");
maybe_initialize(); // get non-null handles, even if we have no oops
- code->copy_oops(_handles);
+ nm->copy_oops(_handles);
}
void OopRecorder::maybe_initialize() {
diff --git a/src/share/vm/code/oopRecorder.hpp b/src/share/vm/code/oopRecorder.hpp
index da686476d..fda4d2905 100644
--- a/src/share/vm/code/oopRecorder.hpp
+++ b/src/share/vm/code/oopRecorder.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -70,8 +70,8 @@ class OopRecorder : public ResourceObj {
return _handles->length() + first_index;
}
- // copy the generated oop table to CodeBlob
- void copy_to(CodeBlob* code); // => code->copy_oops(_handles)
+ // copy the generated oop table to nmethod
+ void copy_to(nmethod* nm); // => nm->copy_oops(_handles)
bool is_unused() { return _handles == NULL && !_complete; }
#ifdef ASSERT
diff --git a/src/share/vm/code/relocInfo.cpp b/src/share/vm/code/relocInfo.cpp
index ef5955498..6afb3c54d 100644
--- a/src/share/vm/code/relocInfo.cpp
+++ b/src/share/vm/code/relocInfo.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -115,24 +115,25 @@ void relocInfo::remove_reloc_info_for_address(RelocIterator *itr, address pc, re
// ----------------------------------------------------------------------------------------------------
// Implementation of RelocIterator
-void RelocIterator::initialize(CodeBlob* cb, address begin, address limit) {
+void RelocIterator::initialize(nmethod* nm, address begin, address limit) {
initialize_misc();
- if (cb == NULL && begin != NULL) {
- // allow CodeBlob to be deduced from beginning address
- cb = CodeCache::find_blob(begin);
+ if (nm == NULL && begin != NULL) {
+ // allow nmethod to be deduced from beginning address
+ CodeBlob* cb = CodeCache::find_blob(begin);
+ nm = cb->as_nmethod_or_null();
}
- assert(cb != NULL, "must be able to deduce nmethod from other arguments");
+ assert(nm != NULL, "must be able to deduce nmethod from other arguments");
- _code = cb;
- _current = cb->relocation_begin()-1;
- _end = cb->relocation_end();
- _addr = (address) cb->instructions_begin();
+ _code = nm;
+ _current = nm->relocation_begin() - 1;
+ _end = nm->relocation_end();
+ _addr = (address) nm->instructions_begin();
assert(!has_current(), "just checking");
- address code_end = cb->instructions_end();
+ address code_end = nm->instructions_end();
- assert(begin == NULL || begin >= cb->instructions_begin(), "in bounds");
+ assert(begin == NULL || begin >= nm->instructions_begin(), "in bounds");
// FIX THIS assert(limit == NULL || limit <= code_end, "in bounds");
set_limits(begin, limit);
}
@@ -754,7 +755,7 @@ oop* oop_Relocation::oop_addr() {
// oop is stored in the code stream
return (oop*) pd_address_in_code();
} else {
- // oop is stored in table at CodeBlob::oops_begin
+ // oop is stored in table at nmethod::oops_begin
return code()->oop_addr_at(n);
}
}
@@ -776,26 +777,28 @@ void oop_Relocation::fix_oop_relocation() {
}
-RelocIterator virtual_call_Relocation::parse_ic(CodeBlob* &code, address &ic_call, address &first_oop,
+RelocIterator virtual_call_Relocation::parse_ic(nmethod* &nm, address &ic_call, address &first_oop,
oop* &oop_addr, bool *is_optimized) {
assert(ic_call != NULL, "ic_call address must be set");
assert(ic_call != NULL || first_oop != NULL, "must supply a non-null input");
- if (code == NULL) {
+ if (nm == NULL) {
+ CodeBlob* code;
if (ic_call != NULL) {
code = CodeCache::find_blob(ic_call);
} else if (first_oop != NULL) {
code = CodeCache::find_blob(first_oop);
}
- assert(code != NULL, "address to parse must be in CodeBlob");
+ nm = code->as_nmethod_or_null();
+ assert(nm != NULL, "address to parse must be in nmethod");
}
- assert(ic_call == NULL || code->contains(ic_call), "must be in CodeBlob");
- assert(first_oop == NULL || code->contains(first_oop), "must be in CodeBlob");
+ assert(ic_call == NULL || nm->contains(ic_call), "must be in nmethod");
+ assert(first_oop == NULL || nm->contains(first_oop), "must be in nmethod");
address oop_limit = NULL;
if (ic_call != NULL) {
// search for the ic_call at the given address
- RelocIterator iter(code, ic_call, ic_call+1);
+ RelocIterator iter(nm, ic_call, ic_call+1);
bool ret = iter.next();
assert(ret == true, "relocInfo must exist at this address");
assert(iter.addr() == ic_call, "must find ic_call");
@@ -814,7 +817,7 @@ RelocIterator virtual_call_Relocation::parse_ic(CodeBlob* &code, address &ic_cal
}
// search for the first_oop, to get its oop_addr
- RelocIterator all_oops(code, first_oop);
+ RelocIterator all_oops(nm, first_oop);
RelocIterator iter = all_oops;
iter.set_limit(first_oop+1);
bool found_oop = false;
@@ -842,7 +845,7 @@ RelocIterator virtual_call_Relocation::parse_ic(CodeBlob* &code, address &ic_cal
}
}
guarantee(!did_reset, "cannot find ic_call");
- iter = RelocIterator(code); // search the whole CodeBlob
+ iter = RelocIterator(nm); // search the whole nmethod
did_reset = true;
}
@@ -1175,9 +1178,9 @@ void RelocIterator::print() {
// For the debugger:
extern "C"
-void print_blob_locs(CodeBlob* cb) {
- cb->print();
- RelocIterator iter(cb);
+void print_blob_locs(nmethod* nm) {
+ nm->print();
+ RelocIterator iter(nm);
iter.print();
}
extern "C"
diff --git a/src/share/vm/code/relocInfo.hpp b/src/share/vm/code/relocInfo.hpp
index 8c4723ef4..7bb188737 100644
--- a/src/share/vm/code/relocInfo.hpp
+++ b/src/share/vm/code/relocInfo.hpp
@@ -512,7 +512,7 @@ class RelocIterator : public StackObj {
address _limit; // stop producing relocations after this _addr
relocInfo* _current; // the current relocation information
relocInfo* _end; // end marker; we're done iterating when _current == _end
- CodeBlob* _code; // compiled method containing _addr
+ nmethod* _code; // compiled method containing _addr
address _addr; // instruction to which the relocation applies
short _databuf; // spare buffer for compressed data
short* _data; // pointer to the relocation's data
@@ -549,7 +549,7 @@ class RelocIterator : public StackObj {
address compute_section_start(int n) const; // out-of-line helper
- void initialize(CodeBlob* nm, address begin, address limit);
+ void initialize(nmethod* nm, address begin, address limit);
friend class PatchingRelocIterator;
// make an uninitialized one, for PatchingRelocIterator:
@@ -557,7 +557,7 @@ class RelocIterator : public StackObj {
public:
// constructor
- RelocIterator(CodeBlob* cb, address begin = NULL, address limit = NULL);
+ RelocIterator(nmethod* nm, address begin = NULL, address limit = NULL);
RelocIterator(CodeSection* cb, address begin = NULL, address limit = NULL);
// get next reloc info, return !eos
@@ -592,7 +592,7 @@ class RelocIterator : public StackObj {
relocType type() const { return current()->type(); }
int format() const { return (relocInfo::have_format) ? current()->format() : 0; }
address addr() const { return _addr; }
- CodeBlob* code() const { return _code; }
+ nmethod* code() const { return _code; }
short* data() const { return _data; }
int datalen() const { return _datalen; }
bool has_current() const { return _datalen >= 0; }
@@ -790,9 +790,9 @@ class Relocation VALUE_OBJ_CLASS_SPEC {
public:
// accessors which only make sense for a bound Relocation
- address addr() const { return binding()->addr(); }
- CodeBlob* code() const { return binding()->code(); }
- bool addr_in_const() const { return binding()->addr_in_const(); }
+ address addr() const { return binding()->addr(); }
+ nmethod* code() const { return binding()->code(); }
+ bool addr_in_const() const { return binding()->addr_in_const(); }
protected:
short* data() const { return binding()->data(); }
int datalen() const { return binding()->datalen(); }
@@ -982,12 +982,12 @@ class virtual_call_Relocation : public CallRelocation {
// Figure out where an ic_call is hiding, given a set-oop or call.
// Either ic_call or first_oop must be non-null; the other is deduced.
- // Code if non-NULL must be the CodeBlob, else it is deduced.
+ // Code if non-NULL must be the nmethod, else it is deduced.
// The address of the patchable oop is also deduced.
// The returned iterator will enumerate over the oops and the ic_call,
// as well as any other relocations that happen to be in that span of code.
// Recognize relevant set_oops with: oop_reloc()->oop_addr() == oop_addr.
- static RelocIterator parse_ic(CodeBlob* &code, address &ic_call, address &first_oop, oop* &oop_addr, bool *is_optimized);
+ static RelocIterator parse_ic(nmethod* &nm, address &ic_call, address &first_oop, oop* &oop_addr, bool *is_optimized);
};
@@ -1304,8 +1304,8 @@ inline name##_Relocation* RelocIterator::name##_reloc() { \
APPLY_TO_RELOCATIONS(EACH_CASE);
#undef EACH_CASE
-inline RelocIterator::RelocIterator(CodeBlob* cb, address begin, address limit) {
- initialize(cb, begin, limit);
+inline RelocIterator::RelocIterator(nmethod* nm, address begin, address limit) {
+ initialize(nm, begin, limit);
}
// if you are going to patch code, you should use this subclass of
@@ -1323,8 +1323,8 @@ class PatchingRelocIterator : public RelocIterator {
void operator=(const RelocIterator&);
public:
- PatchingRelocIterator(CodeBlob* cb, address begin =NULL, address limit =NULL)
- : RelocIterator(cb, begin, limit) { prepass(); }
+ PatchingRelocIterator(nmethod* nm, address begin = NULL, address limit = NULL)
+ : RelocIterator(nm, begin, limit) { prepass(); }
~PatchingRelocIterator() { postpass(); }
};
diff --git a/src/share/vm/compiler/compileBroker.cpp b/src/share/vm/compiler/compileBroker.cpp
index e5603b46f..9ae477e3e 100644
--- a/src/share/vm/compiler/compileBroker.cpp
+++ b/src/share/vm/compiler/compileBroker.cpp
@@ -1651,14 +1651,15 @@ void CompileBroker::handle_full_code_cache() {
log->stamp();
log->end_elem();
}
- #ifndef PRODUCT
- warning("CodeCache is full. Compiler has been disabled");
+ warning("CodeCache is full. Compiler has been disabled.");
+ warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
+#ifndef PRODUCT
if (CompileTheWorld || ExitOnFullCodeCache) {
before_exit(JavaThread::current());
exit_globals(); // will delete tty
vm_direct_exit(CompileTheWorld ? 0 : 1);
}
- #endif
+#endif
if (UseCodeCacheFlushing) {
NMethodSweeper::handle_full_code_cache(true);
} else {
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
index fb4454bd9..28f6ef4b0 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
@@ -32,6 +32,23 @@
// highest ranked free list lock rank
int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
+// Defaults are 0 so things will break badly if incorrectly initialized.
+int CompactibleFreeListSpace::IndexSetStart = 0;
+int CompactibleFreeListSpace::IndexSetStride = 0;
+
+size_t MinChunkSize = 0;
+
+void CompactibleFreeListSpace::set_cms_values() {
+ // Set CMS global values
+ assert(MinChunkSize == 0, "already set");
+ #define numQuanta(x,y) ((x+y-1)/y)
+ MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
+
+ assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
+ IndexSetStart = MinObjAlignment;
+ IndexSetStride = MinObjAlignment;
+}
+
// Constructor
CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
MemRegion mr, bool use_adaptive_freelists,
@@ -302,7 +319,7 @@ size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
size_t count = 0;
- for (int i = MinChunkSize; i < IndexSetSize; i++) {
+ for (int i = (int)MinChunkSize; i < IndexSetSize; i++) {
debug_only(
ssize_t total_list_count = 0;
for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
index 69670ef1b..8bca5df52 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
@@ -91,10 +91,10 @@ class CompactibleFreeListSpace: public CompactibleSpace {
enum SomeConstants {
SmallForLinearAlloc = 16, // size < this then use _sLAB
SmallForDictionary = 257, // size < this then use _indexedFreeList
- IndexSetSize = SmallForDictionary, // keep this odd-sized
- IndexSetStart = MinObjAlignment,
- IndexSetStride = MinObjAlignment
+ IndexSetSize = SmallForDictionary // keep this odd-sized
};
+ static int IndexSetStart;
+ static int IndexSetStride;
private:
enum FitStrategyOptions {
@@ -278,6 +278,9 @@ class CompactibleFreeListSpace: public CompactibleSpace {
HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
+ // Set CMS global values
+ static void set_cms_values();
+
// Return the free chunk at the end of the space. If no such
// chunk exists, return NULL.
FreeChunk* find_chunk_at_end();
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
index a1f40e21e..0aa08ec30 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
@@ -159,7 +159,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
CardTableRS* ct, bool use_adaptive_freelists,
FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
CardGeneration(rs, initial_byte_size, level, ct),
- _dilatation_factor(((double)MinChunkSize)/((double)(oopDesc::header_size()))),
+ _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
_debug_collection_type(Concurrent_collection_type)
{
HeapWord* bottom = (HeapWord*) _virtual_space.low();
@@ -222,7 +222,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
// promoting generation, we'll instead just use the mimimum
// object size (which today is a header's worth of space);
// note that all arithmetic is in units of HeapWords.
- assert(MinChunkSize >= oopDesc::header_size(), "just checking");
+ assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
assert(_dilatation_factor >= 1.0, "from previous assert");
}
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp
index 6d083e105..19e3c1c0d 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp
@@ -133,9 +133,5 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
void print_on(outputStream* st);
};
-// Alignment helpers etc.
-#define numQuanta(x,y) ((x+y-1)/y)
-enum AlignmentConstants {
- MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment
-};
+extern size_t MinChunkSize;
diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
index 3e638d166..33ff94c8e 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
@@ -3644,7 +3644,7 @@ void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
do {
free_words = r->free()/HeapWordSize;
// If there's too little space, no one can allocate, so we're done.
- if (free_words < (size_t)oopDesc::header_size()) return;
+ if (free_words < CollectedHeap::min_fill_size()) return;
// Otherwise, try to claim it.
block = r->par_allocate(free_words);
} while (block == NULL);
diff --git a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
index a534ba26c..ad09d2149 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
@@ -2523,14 +2523,14 @@ record_concurrent_mark_cleanup_end(size_t freed_bytes,
}
if (ParallelGCThreads > 0) {
const size_t OverpartitionFactor = 4;
- const size_t MinChunkSize = 8;
- const size_t ChunkSize =
+ const size_t MinWorkUnit = 8;
+ const size_t WorkUnit =
MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
- MinChunkSize);
+ MinWorkUnit);
_collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
- ChunkSize);
+ WorkUnit);
ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
- (int) ChunkSize);
+ (int) WorkUnit);
_g1->workers()->run_task(&parKnownGarbageTask);
assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
index d0cb46a3a..83f442aa6 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
@@ -711,6 +711,7 @@ HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
// object in the region.
if (region_ptr->data_size() == RegionSize) {
result += pointer_delta(addr, region_addr);
+ DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);)
return result;
}
@@ -1487,13 +1488,14 @@ PSParallelCompact::provoke_split_fill_survivor(SpaceId id)
space->set_top_for_allocations();
}
- size_t obj_len = 8;
+ size_t min_size = CollectedHeap::min_fill_size();
+ size_t obj_len = min_size;
while (b + obj_len <= t) {
CollectedHeap::fill_with_object(b, obj_len);
mark_bitmap()->mark_obj(b, obj_len);
summary_data().add_obj(b, obj_len);
b += obj_len;
- obj_len = (obj_len & 0x18) + 8; // 8 16 24 32 8 16 24 32 ...
+ obj_len = (obj_len & (min_size*3)) + min_size; // 8 16 24 32 8 16 24 32 ...
}
if (b < t) {
// The loop didn't completely fill to t (top); adjust top downward.
@@ -1680,11 +1682,13 @@ void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
// +-------+
// Initially assume case a, c or e will apply.
- size_t obj_len = (size_t)oopDesc::header_size();
+ size_t obj_len = CollectedHeap::min_fill_size();
HeapWord* obj_beg = dense_prefix_end - obj_len;
#ifdef _LP64
- if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
+ if (MinObjAlignment > 1) { // object alignment > heap word size
+ // Cases a, c or e.
+ } else if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
// Case b above.
obj_beg = dense_prefix_end - 1;
} else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
index f7021be68..9f1fbf3ee 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
@@ -1414,6 +1414,8 @@ PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr)
{
assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
"must move left or to a different space");
+ assert(is_object_aligned((intptr_t)old_addr) && is_object_aligned((intptr_t)new_addr),
+ "checking alignment");
}
#endif // ASSERT
diff --git a/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp b/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
index a5baf1f95..22cace7a2 100644
--- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
+++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
@@ -761,7 +761,7 @@ HeapWord* MutableNUMASpace::allocate(size_t size) {
if (p != NULL) {
size_t remainder = s->free_in_words();
- if (remainder < (size_t)oopDesc::header_size() && remainder > 0) {
+ if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
s->set_top(s->top() - size);
p = NULL;
}
@@ -803,7 +803,7 @@ HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
HeapWord *p = s->cas_allocate(size);
if (p != NULL) {
size_t remainder = pointer_delta(s->end(), p + size);
- if (remainder < (size_t)oopDesc::header_size() && remainder > 0) {
+ if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
if (s->cas_deallocate(p, size)) {
// We were the last to allocate and created a fragment less than
// a minimal object.
diff --git a/src/share/vm/gc_interface/collectedHeap.cpp b/src/share/vm/gc_interface/collectedHeap.cpp
index d082a072d..6fd9f0228 100644
--- a/src/share/vm/gc_interface/collectedHeap.cpp
+++ b/src/share/vm/gc_interface/collectedHeap.cpp
@@ -239,11 +239,11 @@ oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
}
size_t CollectedHeap::filler_array_hdr_size() {
- return size_t(arrayOopDesc::header_size(T_INT));
+ return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
}
size_t CollectedHeap::filler_array_min_size() {
- return align_object_size(filler_array_hdr_size());
+ return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
}
size_t CollectedHeap::filler_array_max_size() {
diff --git a/src/share/vm/includeDB_core b/src/share/vm/includeDB_core
index 890639593..709b88ef5 100644
--- a/src/share/vm/includeDB_core
+++ b/src/share/vm/includeDB_core
@@ -827,6 +827,7 @@ ciStreams.cpp ciField.hpp
ciStreams.cpp ciStreams.hpp
ciStreams.cpp ciUtilities.hpp
+ciStreams.hpp bytecode.hpp
ciStreams.hpp ciClassList.hpp
ciStreams.hpp ciExceptionHandler.hpp
ciStreams.hpp ciInstanceKlass.hpp
@@ -3635,6 +3636,7 @@ rewriter.cpp bytecodes.hpp
rewriter.cpp gcLocker.hpp
rewriter.cpp generateOopMap.hpp
rewriter.cpp interpreter.hpp
+rewriter.cpp methodComparator.hpp
rewriter.cpp objArrayOop.hpp
rewriter.cpp oop.inline.hpp
rewriter.cpp oopFactory.hpp
diff --git a/src/share/vm/interpreter/bytecode.cpp b/src/share/vm/interpreter/bytecode.cpp
index 5be30c6af..212c80950 100644
--- a/src/share/vm/interpreter/bytecode.cpp
+++ b/src/share/vm/interpreter/bytecode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,19 +26,12 @@
#include "incls/_bytecode.cpp.incl"
// Implementation of Bytecode
-// Should eventually get rid of these functions and use ThisRelativeObj methods instead
-
-void Bytecode::set_code(Bytecodes::Code code) {
- Bytecodes::check(code);
- *addr_at(0) = u_char(code);
-}
-
-bool Bytecode::check_must_rewrite() const {
- assert(Bytecodes::can_rewrite(code()), "post-check only");
+bool Bytecode::check_must_rewrite(Bytecodes::Code code) const {
+ assert(Bytecodes::can_rewrite(code), "post-check only");
// Some codes are conditionally rewriting. Look closely at them.
- switch (code()) {
+ switch (code) {
case Bytecodes::_aload_0:
// Even if RewriteFrequentPairs is turned on,
// the _aload_0 code might delay its rewrite until
@@ -58,14 +51,85 @@ bool Bytecode::check_must_rewrite() const {
}
+#ifdef ASSERT
+
+void Bytecode::assert_same_format_as(Bytecodes::Code testbc, bool is_wide) const {
+ Bytecodes::Code thisbc = Bytecodes::cast(byte_at(0));
+ if (thisbc == Bytecodes::_breakpoint) return; // let the assertion fail silently
+ if (is_wide) {
+ assert(thisbc == Bytecodes::_wide, "expected a wide instruction");
+ thisbc = Bytecodes::cast(byte_at(1));
+ if (thisbc == Bytecodes::_breakpoint) return;
+ }
+ int thisflags = Bytecodes::flags(testbc, is_wide) & Bytecodes::_all_fmt_bits;
+ int testflags = Bytecodes::flags(thisbc, is_wide) & Bytecodes::_all_fmt_bits;
+ if (thisflags != testflags)
+ tty->print_cr("assert_same_format_as(%d) failed on bc=%d%s; %d != %d",
+ (int)testbc, (int)thisbc, (is_wide?"/wide":""), testflags, thisflags);
+ assert(thisflags == testflags, "expected format");
+}
+
+void Bytecode::assert_index_size(int size, Bytecodes::Code bc, bool is_wide) {
+ int have_fmt = (Bytecodes::flags(bc, is_wide)
+ & (Bytecodes::_fmt_has_u2 | Bytecodes::_fmt_has_u4 |
+ Bytecodes::_fmt_not_simple |
+ // Not an offset field:
+ Bytecodes::_fmt_has_o));
+ int need_fmt = -1;
+ switch (size) {
+ case 1: need_fmt = 0; break;
+ case 2: need_fmt = Bytecodes::_fmt_has_u2; break;
+ case 4: need_fmt = Bytecodes::_fmt_has_u4; break;
+ }
+ if (is_wide) need_fmt |= Bytecodes::_fmt_not_simple;
+ if (have_fmt != need_fmt) {
+ tty->print_cr("assert_index_size %d: bc=%d%s %d != %d", size, bc, (is_wide?"/wide":""), have_fmt, need_fmt);
+ assert(have_fmt == need_fmt, "assert_index_size");
+ }
+}
+
+void Bytecode::assert_offset_size(int size, Bytecodes::Code bc, bool is_wide) {
+ int have_fmt = Bytecodes::flags(bc, is_wide) & Bytecodes::_all_fmt_bits;
+ int need_fmt = -1;
+ switch (size) {
+ case 2: need_fmt = Bytecodes::_fmt_bo2; break;
+ case 4: need_fmt = Bytecodes::_fmt_bo4; break;
+ }
+ if (is_wide) need_fmt |= Bytecodes::_fmt_not_simple;
+ if (have_fmt != need_fmt) {
+ tty->print_cr("assert_offset_size %d: bc=%d%s %d != %d", size, bc, (is_wide?"/wide":""), have_fmt, need_fmt);
+ assert(have_fmt == need_fmt, "assert_offset_size");
+ }
+}
+
+void Bytecode::assert_constant_size(int size, int where, Bytecodes::Code bc, bool is_wide) {
+ int have_fmt = Bytecodes::flags(bc, is_wide) & (Bytecodes::_all_fmt_bits
+ // Ignore any 'i' field (for iinc):
+ & ~Bytecodes::_fmt_has_i);
+ int need_fmt = -1;
+ switch (size) {
+ case 1: need_fmt = Bytecodes::_fmt_bc; break;
+ case 2: need_fmt = Bytecodes::_fmt_bc | Bytecodes::_fmt_has_u2; break;
+ }
+ if (is_wide) need_fmt |= Bytecodes::_fmt_not_simple;
+ int length = is_wide ? Bytecodes::wide_length_for(bc) : Bytecodes::length_for(bc);
+ if (have_fmt != need_fmt || where + size != length) {
+ tty->print_cr("assert_constant_size %d @%d: bc=%d%s %d != %d", size, where, bc, (is_wide?"/wide":""), have_fmt, need_fmt);
+ }
+ assert(have_fmt == need_fmt, "assert_constant_size");
+ assert(where + size == length, "assert_constant_size oob");
+}
+
+void Bytecode::assert_native_index(Bytecodes::Code bc, bool is_wide) {
+ assert((Bytecodes::flags(bc, is_wide) & Bytecodes::_fmt_has_nbo) != 0, "native index");
+}
+
+#endif //ASSERT
// Implementation of Bytecode_tableupswitch
int Bytecode_tableswitch::dest_offset_at(int i) const {
- address x = aligned_addr_at(1);
- int x2 = aligned_offset(1 + (3 + i)*jintSize);
- int val = java_signed_word_at(x2);
- return java_signed_word_at(aligned_offset(1 + (3 + i)*jintSize));
+ return get_Java_u4_at(aligned_offset(1 + (3 + i)*jintSize));
}
@@ -74,6 +138,7 @@ int Bytecode_tableswitch::dest_offset_at(int i) const {
void Bytecode_invoke::verify() const {
Bytecodes::Code bc = adjusted_invoke_code();
assert(is_valid(), "check invoke");
+ assert(method()->constants()->cache() != NULL, "do not call this from verifier or rewriter");
}
@@ -116,27 +181,12 @@ methodHandle Bytecode_invoke::static_target(TRAPS) {
int Bytecode_invoke::index() const {
// Note: Rewriter::rewrite changes the Java_u2 of an invokedynamic to a native_u4,
// at the same time it allocates per-call-site CP cache entries.
- if (has_giant_index())
- return Bytes::get_native_u4(bcp() + 1);
+ Bytecodes::Code stdc = Bytecodes::java_code(code());
+ Bytecode* invoke = Bytecode_at(bcp());
+ if (invoke->has_index_u4(stdc))
+ return invoke->get_index_u4(stdc);
else
- return Bytes::get_Java_u2(bcp() + 1);
-}
-
-
-// Implementation of Bytecode_static
-
-void Bytecode_static::verify() const {
- assert(Bytecodes::java_code(code()) == Bytecodes::_putstatic
- || Bytecodes::java_code(code()) == Bytecodes::_getstatic, "check static");
-}
-
-
-BasicType Bytecode_static::result_type(methodOop method) const {
- int index = java_hwrd_at(1);
- constantPoolOop constants = method->constants();
- symbolOop field_type = constants->signature_ref_at(index);
- BasicType basic_type = FieldType::basic_type(field_type);
- return basic_type;
+ return invoke->get_index_u2_cpcache(stdc);
}
@@ -156,7 +206,8 @@ bool Bytecode_field::is_static() const {
int Bytecode_field::index() const {
- return java_hwrd_at(1);
+ Bytecode* invoke = Bytecode_at(bcp());
+ return invoke->get_index_u2_cpcache(Bytecodes::_getfield);
}
@@ -164,7 +215,14 @@ int Bytecode_field::index() const {
int Bytecode_loadconstant::index() const {
Bytecodes::Code stdc = Bytecodes::java_code(code());
- return stdc == Bytecodes::_ldc ? java_byte_at(1) : java_hwrd_at(1);
+ if (stdc != Bytecodes::_wide) {
+ if (Bytecodes::java_code(stdc) == Bytecodes::_ldc)
+ return get_index_u1(stdc);
+ else
+ return get_index_u2(stdc, false);
+ }
+ stdc = Bytecodes::code_at(addr_at(1));
+ return get_index_u2(stdc, true);
}
//------------------------------------------------------------------------------
diff --git a/src/share/vm/interpreter/bytecode.hpp b/src/share/vm/interpreter/bytecode.hpp
index 2eeee5d25..dd0068926 100644
--- a/src/share/vm/interpreter/bytecode.hpp
+++ b/src/share/vm/interpreter/bytecode.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,92 +26,100 @@
// relative to an objects 'this' pointer.
class ThisRelativeObj VALUE_OBJ_CLASS_SPEC {
- private:
- int sign_extend (int x, int size) const { const int s = (BytesPerInt - size)*BitsPerByte; return (x << s) >> s; }
-
public:
// Address computation
address addr_at (int offset) const { return (address)this + offset; }
+ int byte_at (int offset) const { return *(addr_at(offset)); }
address aligned_addr_at (int offset) const { return (address)round_to((intptr_t)addr_at(offset), jintSize); }
int aligned_offset (int offset) const { return aligned_addr_at(offset) - addr_at(0); }
- // Java unsigned accessors (using Java spec byte ordering)
- int java_byte_at (int offset) const { return *(jubyte*)addr_at(offset); }
- int java_hwrd_at (int offset) const { return java_byte_at(offset) << (1 * BitsPerByte) | java_byte_at(offset + 1); }
- int java_word_at (int offset) const { return java_hwrd_at(offset) << (2 * BitsPerByte) | java_hwrd_at(offset + 2); }
-
- // Java signed accessors (using Java spec byte ordering)
- int java_signed_byte_at(int offset) const { return sign_extend(java_byte_at(offset), 1); }
- int java_signed_hwrd_at(int offset) const { return sign_extend(java_hwrd_at(offset), 2); }
- int java_signed_word_at(int offset) const { return java_word_at(offset) ; }
-
- // Fast accessors (using the machine's natural byte ordering)
- int fast_byte_at (int offset) const { return *(jubyte *)addr_at(offset); }
- int fast_hwrd_at (int offset) const { return *(jushort*)addr_at(offset); }
- int fast_word_at (int offset) const { return *(juint *)addr_at(offset); }
-
- // Fast signed accessors (using the machine's natural byte ordering)
- int fast_signed_byte_at(int offset) const { return *(jbyte *)addr_at(offset); }
- int fast_signed_hwrd_at(int offset) const { return *(jshort*)addr_at(offset); }
- int fast_signed_word_at(int offset) const { return *(jint *)addr_at(offset); }
-
- // Fast manipulators (using the machine's natural byte ordering)
- void set_fast_byte_at (int offset, int x) const { *(jbyte *)addr_at(offset) = (jbyte )x; }
- void set_fast_hwrd_at (int offset, int x) const { *(jshort*)addr_at(offset) = (jshort)x; }
- void set_fast_word_at (int offset, int x) const { *(jint *)addr_at(offset) = (jint )x; }
+ // Word access:
+ int get_Java_u2_at (int offset) const { return Bytes::get_Java_u2(addr_at(offset)); }
+ int get_Java_u4_at (int offset) const { return Bytes::get_Java_u4(addr_at(offset)); }
+ int get_native_u2_at (int offset) const { return Bytes::get_native_u2(addr_at(offset)); }
+ int get_native_u4_at (int offset) const { return Bytes::get_native_u4(addr_at(offset)); }
};
// The base class for different kinds of bytecode abstractions.
// Provides the primitive operations to manipulate code relative
// to an objects 'this' pointer.
+// FIXME: Make this a ResourceObj, include the enclosing methodOop, and cache the opcode.
class Bytecode: public ThisRelativeObj {
protected:
u_char byte_at(int offset) const { return *addr_at(offset); }
- bool check_must_rewrite() const;
+ bool check_must_rewrite(Bytecodes::Code bc) const;
public:
// Attributes
address bcp() const { return addr_at(0); }
- address next_bcp() const { return addr_at(0) + Bytecodes::length_at(bcp()); }
int instruction_size() const { return Bytecodes::length_at(bcp()); }
+ // Warning: Use code() with caution on live bytecode streams. 4926272
Bytecodes::Code code() const { return Bytecodes::code_at(addr_at(0)); }
Bytecodes::Code java_code() const { return Bytecodes::java_code(code()); }
- bool must_rewrite() const { return Bytecodes::can_rewrite(code()) && check_must_rewrite(); }
- bool is_active_breakpoint() const { return Bytecodes::is_active_breakpoint_at(bcp()); }
+ bool must_rewrite(Bytecodes::Code code) const { return Bytecodes::can_rewrite(code) && check_must_rewrite(code); }
- int one_byte_index() const { assert_index_size(1); return byte_at(1); }
- int two_byte_index() const { assert_index_size(2); return (byte_at(1) << 8) + byte_at(2); }
+ // Creation
+ inline friend Bytecode* Bytecode_at(address bcp);
- int offset() const { return (two_byte_index() << 16) >> 16; }
- address destination() const { return bcp() + offset(); }
+ // Static functions for parsing bytecodes in place.
+ int get_index_u1(Bytecodes::Code bc) const {
+ assert_same_format_as(bc); assert_index_size(1, bc);
+ return *(jubyte*)addr_at(1);
+ }
+ int get_index_u2(Bytecodes::Code bc, bool is_wide = false) const {
+ assert_same_format_as(bc, is_wide); assert_index_size(2, bc, is_wide);
+ address p = addr_at(is_wide ? 2 : 1);
+ if (can_use_native_byte_order(bc, is_wide))
+ return Bytes::get_native_u2(p);
+ else return Bytes::get_Java_u2(p);
+ }
+ int get_index_u2_cpcache(Bytecodes::Code bc) const {
+ assert_same_format_as(bc); assert_index_size(2, bc); assert_native_index(bc);
+ return Bytes::get_native_u2(addr_at(1)) DEBUG_ONLY(+ constantPoolOopDesc::CPCACHE_INDEX_TAG);
+ }
+ int get_index_u4(Bytecodes::Code bc) const {
+ assert_same_format_as(bc); assert_index_size(4, bc);
+ assert(can_use_native_byte_order(bc), "");
+ return Bytes::get_native_u4(addr_at(1));
+ }
+ bool has_index_u4(Bytecodes::Code bc) const {
+ return bc == Bytecodes::_invokedynamic;
+ }
- // Attribute modification
- void set_code(Bytecodes::Code code);
+ int get_offset_s2(Bytecodes::Code bc) const {
+ assert_same_format_as(bc); assert_offset_size(2, bc);
+ return (jshort) Bytes::get_Java_u2(addr_at(1));
+ }
+ int get_offset_s4(Bytecodes::Code bc) const {
+ assert_same_format_as(bc); assert_offset_size(4, bc);
+ return (jint) Bytes::get_Java_u4(addr_at(1));
+ }
- // Creation
- inline friend Bytecode* Bytecode_at(address bcp);
+ int get_constant_u1(int offset, Bytecodes::Code bc) const {
+ assert_same_format_as(bc); assert_constant_size(1, offset, bc);
+ return *(jbyte*)addr_at(offset);
+ }
+ int get_constant_u2(int offset, Bytecodes::Code bc, bool is_wide = false) const {
+ assert_same_format_as(bc, is_wide); assert_constant_size(2, offset, bc, is_wide);
+ return (jshort) Bytes::get_Java_u2(addr_at(offset));
+ }
- private:
- void assert_index_size(int required_size) const {
-#ifdef ASSERT
- int isize = instruction_size() - 1;
- if (isize == 2 && code() == Bytecodes::_iinc)
- isize = 1;
- else if (isize <= 2)
- ; // no change
- else if (code() == Bytecodes::_invokedynamic)
- isize = 4;
- else
- isize = 2;
- assert(isize = required_size, "wrong index size");
-#endif
+ // These are used locally and also from bytecode streams.
+ void assert_same_format_as(Bytecodes::Code testbc, bool is_wide = false) const NOT_DEBUG_RETURN;
+ static void assert_index_size(int required_size, Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
+ static void assert_offset_size(int required_size, Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
+ static void assert_constant_size(int required_size, int where, Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
+ static void assert_native_index(Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
+ static bool can_use_native_byte_order(Bytecodes::Code bc, bool is_wide = false) {
+ return (!Bytes::is_Java_byte_ordering_different() || Bytecodes::native_byte_order(bc /*, is_wide*/));
}
};
inline Bytecode* Bytecode_at(address bcp) {
+ // Warning: Use with caution on live bytecode streams. 4926272
return (Bytecode*)bcp;
}
@@ -124,8 +132,8 @@ class LookupswitchPair: ThisRelativeObj {
int _offset;
public:
- int match() const { return java_signed_word_at(0 * jintSize); }
- int offset() const { return java_signed_word_at(1 * jintSize); }
+ int match() const { return get_Java_u4_at(0 * jintSize); }
+ int offset() const { return get_Java_u4_at(1 * jintSize); }
};
@@ -134,8 +142,8 @@ class Bytecode_lookupswitch: public Bytecode {
void verify() const PRODUCT_RETURN;
// Attributes
- int default_offset() const { return java_signed_word_at(aligned_offset(1 + 0*jintSize)); }
- int number_of_pairs() const { return java_signed_word_at(aligned_offset(1 + 1*jintSize)); }
+ int default_offset() const { return get_Java_u4_at(aligned_offset(1 + 0*jintSize)); }
+ int number_of_pairs() const { return get_Java_u4_at(aligned_offset(1 + 1*jintSize)); }
LookupswitchPair* pair_at(int i) const { assert(0 <= i && i < number_of_pairs(), "pair index out of bounds");
return (LookupswitchPair*)aligned_addr_at(1 + (1 + i)*2*jintSize); }
// Creation
@@ -154,9 +162,9 @@ class Bytecode_tableswitch: public Bytecode {
void verify() const PRODUCT_RETURN;
// Attributes
- int default_offset() const { return java_signed_word_at(aligned_offset(1 + 0*jintSize)); }
- int low_key() const { return java_signed_word_at(aligned_offset(1 + 1*jintSize)); }
- int high_key() const { return java_signed_word_at(aligned_offset(1 + 2*jintSize)); }
+ int default_offset() const { return get_Java_u4_at(aligned_offset(1 + 0*jintSize)); }
+ int low_key() const { return get_Java_u4_at(aligned_offset(1 + 1*jintSize)); }
+ int high_key() const { return get_Java_u4_at(aligned_offset(1 + 2*jintSize)); }
int dest_offset_at(int i) const;
int length() { return high_key()-low_key()+1; }
@@ -206,7 +214,6 @@ class Bytecode_invoke: public ResourceObj {
bool is_invokedynamic() const { return adjusted_invoke_code() == Bytecodes::_invokedynamic; }
bool has_receiver() const { return !is_invokestatic() && !is_invokedynamic(); }
- bool has_giant_index() const { return is_invokedynamic(); }
bool is_valid() const { return is_invokeinterface() ||
is_invokevirtual() ||
@@ -252,26 +259,6 @@ inline Bytecode_field* Bytecode_field_at(const methodOop method, address bcp) {
}
-// Abstraction for {get,put}static
-
-class Bytecode_static: public Bytecode {
- public:
- void verify() const;
-
- // Returns the result type of the send by inspecting the field ref
- BasicType result_type(methodOop method) const;
-
- // Creation
- inline friend Bytecode_static* Bytecode_static_at(const methodOop method, address bcp);
-};
-
-inline Bytecode_static* Bytecode_static_at(const methodOop method, address bcp) {
- Bytecode_static* b = (Bytecode_static*)bcp;
- debug_only(b->verify());
- return b;
-}
-
-
// Abstraction for checkcast
class Bytecode_checkcast: public Bytecode {
@@ -279,7 +266,7 @@ class Bytecode_checkcast: public Bytecode {
void verify() const { assert(Bytecodes::java_code(code()) == Bytecodes::_checkcast, "check checkcast"); }
// Returns index
- long index() const { return java_hwrd_at(1); };
+ long index() const { return get_index_u2(Bytecodes::_checkcast); };
// Creation
inline friend Bytecode_checkcast* Bytecode_checkcast_at(address bcp);
@@ -299,7 +286,7 @@ class Bytecode_instanceof: public Bytecode {
void verify() const { assert(code() == Bytecodes::_instanceof, "check instanceof"); }
// Returns index
- long index() const { return java_hwrd_at(1); };
+ long index() const { return get_index_u2(Bytecodes::_instanceof); };
// Creation
inline friend Bytecode_instanceof* Bytecode_instanceof_at(address bcp);
@@ -317,7 +304,7 @@ class Bytecode_new: public Bytecode {
void verify() const { assert(java_code() == Bytecodes::_new, "check new"); }
// Returns index
- long index() const { return java_hwrd_at(1); };
+ long index() const { return get_index_u2(Bytecodes::_new); };
// Creation
inline friend Bytecode_new* Bytecode_new_at(address bcp);
@@ -335,7 +322,7 @@ class Bytecode_multianewarray: public Bytecode {
void verify() const { assert(java_code() == Bytecodes::_multianewarray, "check new"); }
// Returns index
- long index() const { return java_hwrd_at(1); };
+ long index() const { return get_index_u2(Bytecodes::_multianewarray); };
// Creation
inline friend Bytecode_multianewarray* Bytecode_multianewarray_at(address bcp);
@@ -353,7 +340,7 @@ class Bytecode_anewarray: public Bytecode {
void verify() const { assert(java_code() == Bytecodes::_anewarray, "check anewarray"); }
// Returns index
- long index() const { return java_hwrd_at(1); };
+ long index() const { return get_index_u2(Bytecodes::_anewarray); };
// Creation
inline friend Bytecode_anewarray* Bytecode_anewarray_at(address bcp);
diff --git a/src/share/vm/interpreter/bytecodeStream.cpp b/src/share/vm/interpreter/bytecodeStream.cpp
index a6ceb4729..b5414d5b9 100644
--- a/src/share/vm/interpreter/bytecodeStream.cpp
+++ b/src/share/vm/interpreter/bytecodeStream.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -48,6 +48,25 @@ Bytecodes::Code RawBytecodeStream::raw_next_special(Bytecodes::Code code) {
}
}
}
- _code = code;
+ _raw_code = code;
return code;
}
+
+#ifdef ASSERT
+void BaseBytecodeStream::assert_raw_index_size(int size) const {
+ if (raw_code() == Bytecodes::_invokedynamic && is_raw()) {
+ // in raw mode, pretend indy is "bJJ__"
+ assert(size == 2, "raw invokedynamic instruction has 2-byte index only");
+ } else {
+ bytecode()->assert_index_size(size, raw_code(), is_wide());
+ }
+}
+
+void BaseBytecodeStream::assert_raw_stream(bool want_raw) const {
+ if (want_raw) {
+ assert( is_raw(), "this function only works on raw streams");
+ } else {
+ assert(!is_raw(), "this function only works on non-raw streams");
+ }
+}
+#endif //ASSERT
diff --git a/src/share/vm/interpreter/bytecodeStream.hpp b/src/share/vm/interpreter/bytecodeStream.hpp
index 6561c225c..204a3c56a 100644
--- a/src/share/vm/interpreter/bytecodeStream.hpp
+++ b/src/share/vm/interpreter/bytecodeStream.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,13 +32,13 @@
// while ((c = s.next()) >= 0) {
// ...
// }
-//
+
// A RawBytecodeStream is a simple version of BytecodeStream.
// It is used ONLY when we know the bytecodes haven't been rewritten
-// yet, such as in the rewriter or the verifier. Currently only the
-// verifier uses this class.
+// yet, such as in the rewriter or the verifier.
-class RawBytecodeStream: StackObj {
+// Here is the common base class for both RawBytecodeStream and BytecodeStream:
+class BaseBytecodeStream: StackObj {
protected:
// stream buffer
methodHandle _method; // read from method directly
@@ -49,15 +49,17 @@ class RawBytecodeStream: StackObj {
int _end_bci; // bci after the current iteration interval
// last bytecode read
- Bytecodes::Code _code;
+ Bytecodes::Code _raw_code;
bool _is_wide;
+ bool _is_raw; // false in 'cooked' BytecodeStream
- public:
// Construction
- RawBytecodeStream(methodHandle method) : _method(method) {
+ BaseBytecodeStream(methodHandle method) : _method(method) {
set_interval(0, _method->code_size());
+ _is_raw = false;
}
+ public:
// Iteration control
void set_interval(int beg_bci, int end_bci) {
// iterate over the interval [beg_bci, end_bci)
@@ -72,6 +74,46 @@ class RawBytecodeStream: StackObj {
set_interval(beg_bci, _method->code_size());
}
+ bool is_raw() const { return _is_raw; }
+
+ // Stream attributes
+ methodHandle method() const { return _method; }
+
+ int bci() const { return _bci; }
+ int next_bci() const { return _next_bci; }
+ int end_bci() const { return _end_bci; }
+
+ Bytecodes::Code raw_code() const { return _raw_code; }
+ bool is_wide() const { return _is_wide; }
+ int instruction_size() const { return (_next_bci - _bci); }
+ bool is_last_bytecode() const { return _next_bci >= _end_bci; }
+
+ address bcp() const { return method()->code_base() + _bci; }
+ Bytecode* bytecode() const { return Bytecode_at(bcp()); }
+
+ // State changes
+ void set_next_bci(int bci) { assert(0 <= bci && bci <= method()->code_size(), "illegal bci"); _next_bci = bci; }
+
+ // Bytecode-specific attributes
+ int dest() const { return bci() + bytecode()->get_offset_s2(raw_code()); }
+ int dest_w() const { return bci() + bytecode()->get_offset_s4(raw_code()); }
+
+ // One-byte indices.
+ int get_index_u1() const { assert_raw_index_size(1); return *(jubyte*)(bcp()+1); }
+
+ protected:
+ void assert_raw_index_size(int size) const NOT_DEBUG_RETURN;
+ void assert_raw_stream(bool want_raw) const NOT_DEBUG_RETURN;
+};
+
+class RawBytecodeStream: public BaseBytecodeStream {
+ public:
+ // Construction
+ RawBytecodeStream(methodHandle method) : BaseBytecodeStream(method) {
+ _is_raw = true;
+ }
+
+ public:
// Iteration
// Use raw_next() rather than next() for faster method reference
Bytecodes::Code raw_next() {
@@ -80,7 +122,7 @@ class RawBytecodeStream: StackObj {
_bci = _next_bci;
assert(!is_last_bytecode(), "caller should check is_last_bytecode()");
- address bcp = RawBytecodeStream::bcp();
+ address bcp = this->bcp();
code = Bytecodes::code_or_bp_at(bcp);
// set next bytecode position
@@ -90,84 +132,49 @@ class RawBytecodeStream: StackObj {
&& code != Bytecodes::_lookupswitch, "can't be special bytecode");
_is_wide = false;
_next_bci += l;
- _code = code;
+ _raw_code = code;
return code;
- } else if (code == Bytecodes::_wide && _bci + 1 >= _end_bci) {
- return Bytecodes::_illegal;
} else {
return raw_next_special(code);
}
}
Bytecodes::Code raw_next_special(Bytecodes::Code code);
- // Stream attributes
- methodHandle method() const { return _method; }
-
- int bci() const { return _bci; }
- int next_bci() const { return _next_bci; }
- int end_bci() const { return _end_bci; }
-
- Bytecodes::Code code() const { return _code; }
- bool is_wide() const { return _is_wide; }
- int instruction_size() const { return (_next_bci - _bci); }
- bool is_last_bytecode() const { return _next_bci >= _end_bci; }
-
- address bcp() const { return method()->code_base() + _bci; }
- address next_bcp() { return method()->code_base() + _next_bci; }
-
- // State changes
- void set_next_bci(int bci) { assert(0 <= bci && bci <= method()->code_size(), "illegal bci"); _next_bci = bci; }
-
- // Bytecode-specific attributes
- int dest() const { return bci() + (short)Bytes::get_Java_u2(bcp() + 1); }
- int dest_w() const { return bci() + (int )Bytes::get_Java_u4(bcp() + 1); }
-
- // Unsigned indices, widening
- int get_index() const { assert_index_size(is_wide() ? 2 : 1);
- return (is_wide()) ? Bytes::get_Java_u2(bcp() + 2) : bcp()[1]; }
- int get_index_big() const { assert_index_size(2);
- return (int)Bytes::get_Java_u2(bcp() + 1); }
- int get_index_int() const { return has_giant_index() ? get_index_giant() : get_index_big(); }
- int get_index_giant() const { assert_index_size(4); return Bytes::get_native_u4(bcp() + 1); }
- int has_giant_index() const { return (code() == Bytecodes::_invokedynamic); }
+ // Unsigned indices, widening, with no swapping of bytes
+ int get_index() const { return (is_wide()) ? get_index_u2_raw(bcp() + 2) : get_index_u1(); }
+ // Get an unsigned 2-byte index, with no swapping of bytes.
+ int get_index_u2() const { assert(!is_wide(), ""); return get_index_u2_raw(bcp() + 1); }
private:
- void assert_index_size(int required_size) const {
-#ifdef ASSERT
- int isize = instruction_size() - (int)_is_wide - 1;
- if (isize == 2 && code() == Bytecodes::_iinc)
- isize = 1;
- else if (isize <= 2)
- ; // no change
- else if (has_giant_index())
- isize = 4;
- else
- isize = 2;
- assert(isize = required_size, "wrong index size");
-#endif
+ int get_index_u2_raw(address p) const {
+ assert_raw_index_size(2); assert_raw_stream(true);
+ return Bytes::get_Java_u2(p);
}
};
// In BytecodeStream, non-java bytecodes will be translated into the
// corresponding java bytecodes.
-class BytecodeStream: public RawBytecodeStream {
+class BytecodeStream: public BaseBytecodeStream {
+ Bytecodes::Code _code;
+
public:
// Construction
- BytecodeStream(methodHandle method) : RawBytecodeStream(method) { }
+ BytecodeStream(methodHandle method) : BaseBytecodeStream(method) { }
// Iteration
Bytecodes::Code next() {
- Bytecodes::Code code;
+ Bytecodes::Code raw_code, code;
// set reading position
_bci = _next_bci;
if (is_last_bytecode()) {
// indicate end of bytecode stream
- code = Bytecodes::_illegal;
+ raw_code = code = Bytecodes::_illegal;
} else {
// get bytecode
- address bcp = BytecodeStream::bcp();
- code = Bytecodes::java_code_at(bcp);
+ address bcp = this->bcp();
+ raw_code = Bytecodes::code_at(bcp);
+ code = Bytecodes::java_code(raw_code);
// set next bytecode position
//
// note that we cannot advance before having the
@@ -181,14 +188,29 @@ class BytecodeStream: public RawBytecodeStream {
_is_wide = false;
// check for special (uncommon) cases
if (code == Bytecodes::_wide) {
- code = (Bytecodes::Code)bcp[1];
+ raw_code = (Bytecodes::Code)bcp[1];
+ code = raw_code; // wide BCs are always Java-normal
_is_wide = true;
}
assert(Bytecodes::is_java_code(code), "sanity check");
}
+ _raw_code = raw_code;
_code = code;
return _code;
}
bool is_active_breakpoint() const { return Bytecodes::is_active_breakpoint_at(bcp()); }
+ Bytecodes::Code code() const { return _code; }
+
+ // Unsigned indices, widening
+ int get_index() const { return is_wide() ? bytecode()->get_index_u2(raw_code(), true) : get_index_u1(); }
+ // Get an unsigned 2-byte index, swapping the bytes if necessary.
+ int get_index_u2() const { assert_raw_stream(false);
+ return bytecode()->get_index_u2(raw_code(), false); }
+ // Get an unsigned 2-byte index in native order.
+ int get_index_u2_cpcache() const { assert_raw_stream(false);
+ return bytecode()->get_index_u2_cpcache(raw_code()); }
+ int get_index_u4() const { assert_raw_stream(false);
+ return bytecode()->get_index_u4(raw_code()); }
+ bool has_index_u4() const { return bytecode()->has_index_u4(raw_code()); }
};
diff --git a/src/share/vm/interpreter/bytecodeTracer.cpp b/src/share/vm/interpreter/bytecodeTracer.cpp
index dbf617bc2..5dea0d746 100644
--- a/src/share/vm/interpreter/bytecodeTracer.cpp
+++ b/src/share/vm/interpreter/bytecodeTracer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,7 @@ class BytecodePrinter: public BytecodeClosure {
// (Also, ensure that occasional false positives are benign.)
methodOop _current_method;
bool _is_wide;
+ Bytecodes::Code _code;
address _next_pc; // current decoding position
void align() { _next_pc = (address)round_to((intptr_t)_next_pc, sizeof(jint)); }
@@ -46,23 +47,26 @@ class BytecodePrinter: public BytecodeClosure {
short get_short() { short i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
int get_int() { int i=Bytes::get_Java_u4(_next_pc); _next_pc+=4; return i; }
- int get_index() { return *(address)_next_pc++; }
- int get_big_index() { int i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
- int get_giant_index() { int i=Bytes::get_native_u4(_next_pc); _next_pc+=4; return i; }
- int get_index_special() { return (is_wide()) ? get_big_index() : get_index(); }
+ int get_index_u1() { return *(address)_next_pc++; }
+ int get_index_u2() { int i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
+ int get_index_u2_cpcache() { int i=Bytes::get_native_u2(_next_pc); _next_pc+=2; return i + constantPoolOopDesc::CPCACHE_INDEX_TAG; }
+ int get_index_u4() { int i=Bytes::get_native_u4(_next_pc); _next_pc+=4; return i; }
+ int get_index_special() { return (is_wide()) ? get_index_u2() : get_index_u1(); }
methodOop method() { return _current_method; }
bool is_wide() { return _is_wide; }
+ Bytecodes::Code raw_code() { return Bytecodes::Code(_code); }
- bool check_index(int i, bool in_cp_cache, int& cp_index, outputStream* st = tty);
+ bool check_index(int i, int& cp_index, outputStream* st = tty);
void print_constant(int i, outputStream* st = tty);
void print_field_or_method(int i, outputStream* st = tty);
- void print_attributes(Bytecodes::Code code, int bci, outputStream* st = tty);
+ void print_attributes(int bci, outputStream* st = tty);
void bytecode_epilog(int bci, outputStream* st = tty);
public:
BytecodePrinter() {
_is_wide = false;
+ _code = Bytecodes::_illegal;
}
// This method is called while executing the raw bytecodes, so none of
@@ -89,7 +93,8 @@ class BytecodePrinter: public BytecodeClosure {
} else {
code = Bytecodes::code_at(bcp);
}
- int bci = bcp - method->code_base();
+ _code = code;
+ int bci = bcp - method->code_base();
st->print("[%d] ", (int) Thread::current()->osthread()->thread_id());
if (Verbose) {
st->print("%8d %4d " INTPTR_FORMAT " " INTPTR_FORMAT " %s",
@@ -99,10 +104,11 @@ class BytecodePrinter: public BytecodeClosure {
BytecodeCounter::counter_value(), bci, Bytecodes::name(code));
}
_next_pc = is_wide() ? bcp+2 : bcp+1;
- print_attributes(code, bci);
+ print_attributes(bci);
// Set is_wide for the next one, since the caller of this doesn't skip
// the next bytecode.
_is_wide = (code == Bytecodes::_wide);
+ _code = Bytecodes::_illegal;
}
// Used for methodOop::print_codes(). The input bcp comes from
@@ -116,6 +122,7 @@ class BytecodePrinter: public BytecodeClosure {
if (is_wide()) {
code = Bytecodes::code_at(bcp+1);
}
+ _code = code;
int bci = bcp - method->code_base();
// Print bytecode index and name
if (is_wide()) {
@@ -124,7 +131,7 @@ class BytecodePrinter: public BytecodeClosure {
st->print("%d %s", bci, Bytecodes::name(code));
}
_next_pc = is_wide() ? bcp+2 : bcp+1;
- print_attributes(code, bci, st);
+ print_attributes(bci, st);
bytecode_epilog(bci, st);
}
};
@@ -185,12 +192,13 @@ void print_oop(oop value, outputStream* st) {
}
}
-bool BytecodePrinter::check_index(int i, bool in_cp_cache, int& cp_index, outputStream* st) {
+bool BytecodePrinter::check_index(int i, int& cp_index, outputStream* st) {
constantPoolOop constants = method()->constants();
int ilimit = constants->length(), climit = 0;
+ Bytecodes::Code code = raw_code();
constantPoolCacheOop cache = NULL;
- if (in_cp_cache) {
+ if (Bytecodes::uses_cp_cache(code)) {
cache = constants->cache();
if (cache != NULL) {
//climit = cache->length(); // %%% private!
@@ -201,7 +209,7 @@ bool BytecodePrinter::check_index(int i, bool in_cp_cache, int& cp_index, output
}
}
- if (in_cp_cache && constantPoolCacheOopDesc::is_secondary_index(i)) {
+ if (cache != NULL && constantPoolCacheOopDesc::is_secondary_index(i)) {
i = constantPoolCacheOopDesc::decode_secondary_index(i);
st->print(" secondary cache[%d] of", i);
if (i >= 0 && i < climit) {
@@ -218,8 +226,6 @@ bool BytecodePrinter::check_index(int i, bool in_cp_cache, int& cp_index, output
}
if (cache != NULL) {
- i = Bytes::swap_u2(i);
- if (WizardMode) st->print(" (swap=%d)", i);
goto check_cache_index;
}
@@ -234,6 +240,17 @@ bool BytecodePrinter::check_index(int i, bool in_cp_cache, int& cp_index, output
return false;
check_cache_index:
+#ifdef ASSERT
+ {
+ const int CPCACHE_INDEX_TAG = constantPoolOopDesc::CPCACHE_INDEX_TAG;
+ if (i >= CPCACHE_INDEX_TAG && i < climit + CPCACHE_INDEX_TAG) {
+ i -= CPCACHE_INDEX_TAG;
+ } else {
+ st->print_cr(" CP[%d] missing bias?", i);
+ return false;
+ }
+ }
+#endif //ASSERT
if (i >= 0 && i < climit) {
if (cache->entry_at(i)->is_secondary_entry()) {
st->print_cr(" secondary entry?");
@@ -248,7 +265,7 @@ bool BytecodePrinter::check_index(int i, bool in_cp_cache, int& cp_index, output
void BytecodePrinter::print_constant(int i, outputStream* st) {
int orig_i = i;
- if (!check_index(orig_i, false, i, st)) return;
+ if (!check_index(orig_i, i, st)) return;
constantPoolOop constants = method()->constants();
constantTag tag = constants->tag_at(i);
@@ -279,7 +296,7 @@ void BytecodePrinter::print_constant(int i, outputStream* st) {
void BytecodePrinter::print_field_or_method(int i, outputStream* st) {
int orig_i = i;
- if (!check_index(orig_i, true, i, st)) return;
+ if (!check_index(orig_i, i, st)) return;
constantPoolOop constants = method()->constants();
constantTag tag = constants->tag_at(i);
@@ -303,9 +320,9 @@ void BytecodePrinter::print_field_or_method(int i, outputStream* st) {
}
-void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci, outputStream* st) {
+void BytecodePrinter::print_attributes(int bci, outputStream* st) {
// Show attributes of pre-rewritten codes
- code = Bytecodes::java_code(code);
+ Bytecodes::Code code = Bytecodes::java_code(raw_code());
// If the code doesn't have any fields there's nothing to print.
// note this is ==1 because the tableswitch and lookupswitch are
// zero size (for some reason) and we want to print stuff out for them.
@@ -323,12 +340,12 @@ void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci, outputStre
st->print_cr(" " INT32_FORMAT, get_short());
break;
case Bytecodes::_ldc:
- print_constant(get_index(), st);
+ print_constant(get_index_u1(), st);
break;
case Bytecodes::_ldc_w:
case Bytecodes::_ldc2_w:
- print_constant(get_big_index(), st);
+ print_constant(get_index_u2(), st);
break;
case Bytecodes::_iload:
@@ -352,7 +369,7 @@ void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci, outputStre
break;
case Bytecodes::_newarray: {
- BasicType atype = (BasicType)get_index();
+ BasicType atype = (BasicType)get_index_u1();
const char* str = type2name(atype);
if (str == NULL || atype == T_OBJECT || atype == T_ARRAY) {
assert(false, "Unidentified basic type");
@@ -361,15 +378,15 @@ void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci, outputStre
}
break;
case Bytecodes::_anewarray: {
- int klass_index = get_big_index();
+ int klass_index = get_index_u2();
constantPoolOop constants = method()->constants();
symbolOop name = constants->klass_name_at(klass_index);
st->print_cr(" %s ", name->as_C_string());
}
break;
case Bytecodes::_multianewarray: {
- int klass_index = get_big_index();
- int nof_dims = get_index();
+ int klass_index = get_index_u2();
+ int nof_dims = get_index_u1();
constantPoolOop constants = method()->constants();
symbolOop name = constants->klass_name_at(klass_index);
st->print_cr(" %s %d", name->as_C_string(), nof_dims);
@@ -451,31 +468,31 @@ void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci, outputStre
case Bytecodes::_getstatic:
case Bytecodes::_putfield:
case Bytecodes::_getfield:
- print_field_or_method(get_big_index(), st);
+ print_field_or_method(get_index_u2_cpcache(), st);
break;
case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
- print_field_or_method(get_big_index(), st);
+ print_field_or_method(get_index_u2_cpcache(), st);
break;
case Bytecodes::_invokeinterface:
- { int i = get_big_index();
- int n = get_index();
- get_index(); // ignore zero byte
+ { int i = get_index_u2_cpcache();
+ int n = get_index_u1();
+ get_byte(); // ignore zero byte
print_field_or_method(i, st);
}
break;
case Bytecodes::_invokedynamic:
- print_field_or_method(get_giant_index(), st);
+ print_field_or_method(get_index_u4(), st);
break;
case Bytecodes::_new:
case Bytecodes::_checkcast:
case Bytecodes::_instanceof:
- { int i = get_big_index();
+ { int i = get_index_u2();
constantPoolOop constants = method()->constants();
symbolOop name = constants->klass_name_at(i);
st->print_cr(" %d <%s>", i, name->as_C_string());
diff --git a/src/share/vm/interpreter/bytecodes.cpp b/src/share/vm/interpreter/bytecodes.cpp
index 1c8887170..59d9b0e80 100644
--- a/src/share/vm/interpreter/bytecodes.cpp
+++ b/src/share/vm/interpreter/bytecodes.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,14 +37,11 @@
bool Bytecodes::_is_initialized = false;
const char* Bytecodes::_name [Bytecodes::number_of_codes];
-const char* Bytecodes::_format [Bytecodes::number_of_codes];
-const char* Bytecodes::_wide_format [Bytecodes::number_of_codes];
BasicType Bytecodes::_result_type [Bytecodes::number_of_codes];
s_char Bytecodes::_depth [Bytecodes::number_of_codes];
-u_char Bytecodes::_length [Bytecodes::number_of_codes];
-bool Bytecodes::_can_trap [Bytecodes::number_of_codes];
+u_char Bytecodes::_lengths [Bytecodes::number_of_codes];
Bytecodes::Code Bytecodes::_java_code [Bytecodes::number_of_codes];
-bool Bytecodes::_can_rewrite [Bytecodes::number_of_codes];
+u_short Bytecodes::_flags [(1<<BitsPerByte)*2];
Bytecodes::Code Bytecodes::code_at(methodOop method, int bci) {
@@ -91,6 +88,7 @@ int Bytecodes::special_length_at(address bcp, address end) {
return (len > 0 && len == (int)len) ? len : -1;
}
}
+ // Note: Length functions must return <=0 for invalid bytecodes.
return 0;
}
@@ -124,15 +122,22 @@ void Bytecodes::def(Code code, const char* name, const char* format, const char*
void Bytecodes::def(Code code, const char* name, const char* format, const char* wide_format, BasicType result_type, int depth, bool can_trap, Code java_code) {
assert(wide_format == NULL || format != NULL, "short form must exist if there's a wide form");
+ int len = (format != NULL ? (int) strlen(format) : 0);
+ int wlen = (wide_format != NULL ? (int) strlen(wide_format) : 0);
_name [code] = name;
- _format [code] = format;
- _wide_format [code] = wide_format;
_result_type [code] = result_type;
_depth [code] = depth;
- _can_trap [code] = can_trap;
- _length [code] = format != NULL ? (u_char)strlen(format) : 0;
+ _lengths [code] = (wlen << 4) | (len & 0xF);
_java_code [code] = java_code;
- if (java_code != code) _can_rewrite[java_code] = true;
+ int bc_flags = 0;
+ if (can_trap) bc_flags |= _bc_can_trap;
+ if (java_code != code) bc_flags |= _bc_can_rewrite;
+ _flags[(u1)code+0*(1<<BitsPerByte)] = compute_flags(format, bc_flags);
+ _flags[(u1)code+1*(1<<BitsPerByte)] = compute_flags(wide_format, bc_flags);
+ assert(is_defined(code) == (format != NULL), "");
+ assert(wide_is_defined(code) == (wide_format != NULL), "");
+ assert(length_for(code) == len, "");
+ assert(wide_length_for(code) == wlen, "");
}
@@ -140,23 +145,92 @@ void Bytecodes::def(Code code, const char* name, const char* format, const char*
//
// b: bytecode
// c: signed constant, Java byte-ordering
-// i: unsigned index , Java byte-ordering
-// j: unsigned index , native byte-ordering
-// o: branch offset , Java byte-ordering
+// i: unsigned local index, Java byte-ordering (I = native byte ordering)
+// j: unsigned CP cache index, Java byte-ordering (J = native byte ordering)
+// k: unsigned CP index, Java byte-ordering
+// o: branch offset, Java byte-ordering
// _: unused/ignored
// w: wide bytecode
//
-// Note: Right now the format strings are used for 2 purposes:
+// Note: The format strings are used for 2 purposes:
// 1. to specify the length of the bytecode
// (= number of characters in format string)
-// 2. to specify the bytecode attributes
-//
-// The bytecode attributes are currently used only for bytecode tracing
-// (see BytecodeTracer); thus if more specific format information is
-// used, one would also have to adjust the bytecode tracer.
+// 2. to derive bytecode format flags (_fmt_has_k, etc.)
//
// Note: For bytecodes with variable length, the format string is the empty string.
+int Bytecodes::compute_flags(const char* format, int more_flags) {
+ if (format == NULL) return 0; // not even more_flags
+ int flags = more_flags;
+ const char* fp = format;
+ switch (*fp) {
+ case '\0':
+ flags |= _fmt_not_simple; // but variable
+ break;
+ case 'b':
+ flags |= _fmt_not_variable; // but simple
+ ++fp; // skip 'b'
+ break;
+ case 'w':
+ flags |= _fmt_not_variable | _fmt_not_simple;
+ ++fp; // skip 'w'
+ guarantee(*fp == 'b', "wide format must start with 'wb'");
+ ++fp; // skip 'b'
+ break;
+ }
+
+ int has_nbo = 0, has_jbo = 0, has_size = 0;
+ for (;;) {
+ int this_flag = 0;
+ char fc = *fp++;
+ switch (fc) {
+ case '\0': // end of string
+ assert(flags == (jchar)flags, "change _format_flags");
+ return flags;
+
+ case '_': continue; // ignore these
+
+ case 'j': this_flag = _fmt_has_j; has_jbo = 1; break;
+ case 'k': this_flag = _fmt_has_k; has_jbo = 1; break;
+ case 'i': this_flag = _fmt_has_i; has_jbo = 1; break;
+ case 'c': this_flag = _fmt_has_c; has_jbo = 1; break;
+ case 'o': this_flag = _fmt_has_o; has_jbo = 1; break;
+
+ // uppercase versions mark native byte order (from Rewriter)
+ // actually, only the 'J' case happens currently
+ case 'J': this_flag = _fmt_has_j; has_nbo = 1; break;
+ case 'K': this_flag = _fmt_has_k; has_nbo = 1; break;
+ case 'I': this_flag = _fmt_has_i; has_nbo = 1; break;
+ case 'C': this_flag = _fmt_has_c; has_nbo = 1; break;
+ case 'O': this_flag = _fmt_has_o; has_nbo = 1; break;
+ default: guarantee(false, "bad char in format");
+ }
+
+ flags |= this_flag;
+
+ guarantee(!(has_jbo && has_nbo), "mixed byte orders in format");
+ if (has_nbo)
+ flags |= _fmt_has_nbo;
+
+ int this_size = 1;
+ if (*fp == fc) {
+ // advance beyond run of the same characters
+ this_size = 2;
+ while (*++fp == fc) this_size++;
+ switch (this_size) {
+ case 2: flags |= _fmt_has_u2; break;
+ case 4: flags |= _fmt_has_u4; break;
+ default: guarantee(false, "bad rep count in format");
+ }
+ }
+ guarantee(has_size == 0 || // no field yet
+ this_size == has_size || // same size
+ this_size < has_size && *fp == '\0', // last field can be short
+ "mixed field sizes in format");
+ has_size = this_size;
+ }
+}
+
void Bytecodes::initialize() {
if (_is_initialized) return;
assert(number_of_codes <= 256, "too many bytecodes");
@@ -191,9 +265,9 @@ void Bytecodes::initialize() {
def(_dconst_1 , "dconst_1" , "b" , NULL , T_DOUBLE , 2, false);
def(_bipush , "bipush" , "bc" , NULL , T_INT , 1, false);
def(_sipush , "sipush" , "bcc" , NULL , T_INT , 1, false);
- def(_ldc , "ldc" , "bi" , NULL , T_ILLEGAL, 1, true );
- def(_ldc_w , "ldc_w" , "bii" , NULL , T_ILLEGAL, 1, true );
- def(_ldc2_w , "ldc2_w" , "bii" , NULL , T_ILLEGAL, 2, true );
+ def(_ldc , "ldc" , "bk" , NULL , T_ILLEGAL, 1, true );
+ def(_ldc_w , "ldc_w" , "bkk" , NULL , T_ILLEGAL, 1, true );
+ def(_ldc2_w , "ldc2_w" , "bkk" , NULL , T_ILLEGAL, 2, true );
def(_iload , "iload" , "bi" , "wbii" , T_INT , 1, false);
def(_lload , "lload" , "bi" , "wbii" , T_LONG , 2, false);
def(_fload , "fload" , "bi" , "wbii" , T_FLOAT , 1, false);
@@ -351,26 +425,26 @@ void Bytecodes::initialize() {
def(_dreturn , "dreturn" , "b" , NULL , T_DOUBLE , -2, true);
def(_areturn , "areturn" , "b" , NULL , T_OBJECT , -1, true);
def(_return , "return" , "b" , NULL , T_VOID , 0, true);
- def(_getstatic , "getstatic" , "bjj" , NULL , T_ILLEGAL, 1, true );
- def(_putstatic , "putstatic" , "bjj" , NULL , T_ILLEGAL, -1, true );
- def(_getfield , "getfield" , "bjj" , NULL , T_ILLEGAL, 0, true );
- def(_putfield , "putfield" , "bjj" , NULL , T_ILLEGAL, -2, true );
- def(_invokevirtual , "invokevirtual" , "bjj" , NULL , T_ILLEGAL, -1, true);
- def(_invokespecial , "invokespecial" , "bjj" , NULL , T_ILLEGAL, -1, true);
- def(_invokestatic , "invokestatic" , "bjj" , NULL , T_ILLEGAL, 0, true);
- def(_invokeinterface , "invokeinterface" , "bjj__", NULL , T_ILLEGAL, -1, true);
- def(_invokedynamic , "invokedynamic" , "bjjjj", NULL , T_ILLEGAL, 0, true );
- def(_new , "new" , "bii" , NULL , T_OBJECT , 1, true );
+ def(_getstatic , "getstatic" , "bJJ" , NULL , T_ILLEGAL, 1, true );
+ def(_putstatic , "putstatic" , "bJJ" , NULL , T_ILLEGAL, -1, true );
+ def(_getfield , "getfield" , "bJJ" , NULL , T_ILLEGAL, 0, true );
+ def(_putfield , "putfield" , "bJJ" , NULL , T_ILLEGAL, -2, true );
+ def(_invokevirtual , "invokevirtual" , "bJJ" , NULL , T_ILLEGAL, -1, true);
+ def(_invokespecial , "invokespecial" , "bJJ" , NULL , T_ILLEGAL, -1, true);
+ def(_invokestatic , "invokestatic" , "bJJ" , NULL , T_ILLEGAL, 0, true);
+ def(_invokeinterface , "invokeinterface" , "bJJ__", NULL , T_ILLEGAL, -1, true);
+ def(_invokedynamic , "invokedynamic" , "bJJJJ", NULL , T_ILLEGAL, 0, true );
+ def(_new , "new" , "bkk" , NULL , T_OBJECT , 1, true );
def(_newarray , "newarray" , "bc" , NULL , T_OBJECT , 0, true );
- def(_anewarray , "anewarray" , "bii" , NULL , T_OBJECT , 0, true );
+ def(_anewarray , "anewarray" , "bkk" , NULL , T_OBJECT , 0, true );
def(_arraylength , "arraylength" , "b" , NULL , T_VOID , 0, true );
def(_athrow , "athrow" , "b" , NULL , T_VOID , -1, true );
- def(_checkcast , "checkcast" , "bii" , NULL , T_OBJECT , 0, true );
- def(_instanceof , "instanceof" , "bii" , NULL , T_INT , 0, true );
+ def(_checkcast , "checkcast" , "bkk" , NULL , T_OBJECT , 0, true );
+ def(_instanceof , "instanceof" , "bkk" , NULL , T_INT , 0, true );
def(_monitorenter , "monitorenter" , "b" , NULL , T_VOID , -1, true );
def(_monitorexit , "monitorexit" , "b" , NULL , T_VOID , -1, true );
def(_wide , "wide" , "" , NULL , T_VOID , 0, false);
- def(_multianewarray , "multianewarray" , "biic" , NULL , T_OBJECT , 1, true );
+ def(_multianewarray , "multianewarray" , "bkkc" , NULL , T_OBJECT , 1, true );
def(_ifnull , "ifnull" , "boo" , NULL , T_VOID , -1, false);
def(_ifnonnull , "ifnonnull" , "boo" , NULL , T_VOID , -1, false);
def(_goto_w , "goto_w" , "boooo", NULL , T_VOID , 0, false);
@@ -380,35 +454,35 @@ void Bytecodes::initialize() {
// JVM bytecodes
// bytecode bytecode name format wide f. result tp stk traps std code
- def(_fast_agetfield , "fast_agetfield" , "bjj" , NULL , T_OBJECT , 0, true , _getfield );
- def(_fast_bgetfield , "fast_bgetfield" , "bjj" , NULL , T_INT , 0, true , _getfield );
- def(_fast_cgetfield , "fast_cgetfield" , "bjj" , NULL , T_CHAR , 0, true , _getfield );
- def(_fast_dgetfield , "fast_dgetfield" , "bjj" , NULL , T_DOUBLE , 0, true , _getfield );
- def(_fast_fgetfield , "fast_fgetfield" , "bjj" , NULL , T_FLOAT , 0, true , _getfield );
- def(_fast_igetfield , "fast_igetfield" , "bjj" , NULL , T_INT , 0, true , _getfield );
- def(_fast_lgetfield , "fast_lgetfield" , "bjj" , NULL , T_LONG , 0, true , _getfield );
- def(_fast_sgetfield , "fast_sgetfield" , "bjj" , NULL , T_SHORT , 0, true , _getfield );
-
- def(_fast_aputfield , "fast_aputfield" , "bjj" , NULL , T_OBJECT , 0, true , _putfield );
- def(_fast_bputfield , "fast_bputfield" , "bjj" , NULL , T_INT , 0, true , _putfield );
- def(_fast_cputfield , "fast_cputfield" , "bjj" , NULL , T_CHAR , 0, true , _putfield );
- def(_fast_dputfield , "fast_dputfield" , "bjj" , NULL , T_DOUBLE , 0, true , _putfield );
- def(_fast_fputfield , "fast_fputfield" , "bjj" , NULL , T_FLOAT , 0, true , _putfield );
- def(_fast_iputfield , "fast_iputfield" , "bjj" , NULL , T_INT , 0, true , _putfield );
- def(_fast_lputfield , "fast_lputfield" , "bjj" , NULL , T_LONG , 0, true , _putfield );
- def(_fast_sputfield , "fast_sputfield" , "bjj" , NULL , T_SHORT , 0, true , _putfield );
+ def(_fast_agetfield , "fast_agetfield" , "bJJ" , NULL , T_OBJECT , 0, true , _getfield );
+ def(_fast_bgetfield , "fast_bgetfield" , "bJJ" , NULL , T_INT , 0, true , _getfield );
+ def(_fast_cgetfield , "fast_cgetfield" , "bJJ" , NULL , T_CHAR , 0, true , _getfield );
+ def(_fast_dgetfield , "fast_dgetfield" , "bJJ" , NULL , T_DOUBLE , 0, true , _getfield );
+ def(_fast_fgetfield , "fast_fgetfield" , "bJJ" , NULL , T_FLOAT , 0, true , _getfield );
+ def(_fast_igetfield , "fast_igetfield" , "bJJ" , NULL , T_INT , 0, true , _getfield );
+ def(_fast_lgetfield , "fast_lgetfield" , "bJJ" , NULL , T_LONG , 0, true , _getfield );
+ def(_fast_sgetfield , "fast_sgetfield" , "bJJ" , NULL , T_SHORT , 0, true , _getfield );
+
+ def(_fast_aputfield , "fast_aputfield" , "bJJ" , NULL , T_OBJECT , 0, true , _putfield );
+ def(_fast_bputfield , "fast_bputfield" , "bJJ" , NULL , T_INT , 0, true , _putfield );
+ def(_fast_cputfield , "fast_cputfield" , "bJJ" , NULL , T_CHAR , 0, true , _putfield );
+ def(_fast_dputfield , "fast_dputfield" , "bJJ" , NULL , T_DOUBLE , 0, true , _putfield );
+ def(_fast_fputfield , "fast_fputfield" , "bJJ" , NULL , T_FLOAT , 0, true , _putfield );
+ def(_fast_iputfield , "fast_iputfield" , "bJJ" , NULL , T_INT , 0, true , _putfield );
+ def(_fast_lputfield , "fast_lputfield" , "bJJ" , NULL , T_LONG , 0, true , _putfield );
+ def(_fast_sputfield , "fast_sputfield" , "bJJ" , NULL , T_SHORT , 0, true , _putfield );
def(_fast_aload_0 , "fast_aload_0" , "b" , NULL , T_OBJECT , 1, true , _aload_0 );
- def(_fast_iaccess_0 , "fast_iaccess_0" , "b_jj" , NULL , T_INT , 1, true , _aload_0 );
- def(_fast_aaccess_0 , "fast_aaccess_0" , "b_jj" , NULL , T_OBJECT , 1, true , _aload_0 );
- def(_fast_faccess_0 , "fast_faccess_0" , "b_jj" , NULL , T_OBJECT , 1, true , _aload_0 );
+ def(_fast_iaccess_0 , "fast_iaccess_0" , "b_JJ" , NULL , T_INT , 1, true , _aload_0 );
+ def(_fast_aaccess_0 , "fast_aaccess_0" , "b_JJ" , NULL , T_OBJECT , 1, true , _aload_0 );
+ def(_fast_faccess_0 , "fast_faccess_0" , "b_JJ" , NULL , T_OBJECT , 1, true , _aload_0 );
def(_fast_iload , "fast_iload" , "bi" , NULL , T_INT , 1, false, _iload);
def(_fast_iload2 , "fast_iload2" , "bi_i" , NULL , T_INT , 2, false, _iload);
def(_fast_icaload , "fast_icaload" , "bi_" , NULL , T_INT , 0, false, _iload);
// Faster method invocation.
- def(_fast_invokevfinal , "fast_invokevfinal" , "bjj" , NULL , T_ILLEGAL, -1, true, _invokevirtual );
+ def(_fast_invokevfinal , "fast_invokevfinal" , "bJJ" , NULL , T_ILLEGAL, -1, true, _invokevirtual );
def(_fast_linearswitch , "fast_linearswitch" , "" , NULL , T_VOID , -1, false, _lookupswitch );
def(_fast_binaryswitch , "fast_binaryswitch" , "" , NULL , T_VOID , -1, false, _lookupswitch );
diff --git a/src/share/vm/interpreter/bytecodes.hpp b/src/share/vm/interpreter/bytecodes.hpp
index 22e8919c3..3dd756df7 100644
--- a/src/share/vm/interpreter/bytecodes.hpp
+++ b/src/share/vm/interpreter/bytecodes.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -280,17 +280,43 @@ class Bytecodes: AllStatic {
number_of_codes
};
+ // Flag bits derived from format strings, can_trap, can_rewrite, etc.:
+ enum Flags {
+ // semantic flags:
+ _bc_can_trap = 1<<0, // bytecode execution can trap or block
+ _bc_can_rewrite = 1<<1, // bytecode execution has an alternate form
+
+ // format bits (determined only by the format string):
+ _fmt_has_c = 1<<2, // constant, such as sipush "bcc"
+ _fmt_has_j = 1<<3, // constant pool cache index, such as getfield "bjj"
+ _fmt_has_k = 1<<4, // constant pool index, such as ldc "bk"
+ _fmt_has_i = 1<<5, // local index, such as iload
+ _fmt_has_o = 1<<6, // offset, such as ifeq
+ _fmt_has_nbo = 1<<7, // contains native-order field(s)
+ _fmt_has_u2 = 1<<8, // contains double-byte field(s)
+ _fmt_has_u4 = 1<<9, // contains quad-byte field
+ _fmt_not_variable = 1<<10, // not of variable length (simple or wide)
+ _fmt_not_simple = 1<<11, // either wide or variable length
+ _all_fmt_bits = (_fmt_not_simple*2 - _fmt_has_c),
+
+ // Example derived format syndromes:
+ _fmt_b = _fmt_not_variable,
+ _fmt_bc = _fmt_b | _fmt_has_c,
+ _fmt_bi = _fmt_b | _fmt_has_i,
+ _fmt_bkk = _fmt_b | _fmt_has_k | _fmt_has_u2,
+ _fmt_bJJ = _fmt_b | _fmt_has_j | _fmt_has_u2 | _fmt_has_nbo,
+ _fmt_bo2 = _fmt_b | _fmt_has_o | _fmt_has_u2,
+ _fmt_bo4 = _fmt_b | _fmt_has_o | _fmt_has_u4
+ };
+
private:
static bool _is_initialized;
static const char* _name [number_of_codes];
- static const char* _format [number_of_codes];
- static const char* _wide_format [number_of_codes];
static BasicType _result_type [number_of_codes];
static s_char _depth [number_of_codes];
- static u_char _length [number_of_codes];
- static bool _can_trap [number_of_codes];
+ static u_char _lengths [number_of_codes];
static Code _java_code [number_of_codes];
- static bool _can_rewrite [number_of_codes];
+ static jchar _flags [(1<<BitsPerByte)*2]; // all second page for wide formats
static void def(Code code, const char* name, const char* format, const char* wide_format, BasicType result_type, int depth, bool can_trap);
static void def(Code code, const char* name, const char* format, const char* wide_format, BasicType result_type, int depth, bool can_trap, Code java_code);
@@ -322,24 +348,20 @@ class Bytecodes: AllStatic {
static Code non_breakpoint_code_at(address bcp, methodOop method = NULL);
// Bytecode attributes
- static bool is_defined (int code) { return 0 <= code && code < number_of_codes && _format[code] != NULL; }
- static bool wide_is_defined(int code) { return is_defined(code) && _wide_format[code] != NULL; }
+ static bool is_defined (int code) { return 0 <= code && code < number_of_codes && flags(code, false) != 0; }
+ static bool wide_is_defined(int code) { return is_defined(code) && flags(code, true) != 0; }
static const char* name (Code code) { check(code); return _name [code]; }
- static const char* format (Code code) { check(code); return _format [code]; }
- static const char* wide_format (Code code) { return _wide_format[code]; }
static BasicType result_type (Code code) { check(code); return _result_type [code]; }
static int depth (Code code) { check(code); return _depth [code]; }
- static int length_for (Code code) { return _length[code]; }
- static bool can_trap (Code code) { check(code); return _can_trap [code]; }
+ // Note: Length functions must return <=0 for invalid bytecodes.
+ // Calling check(code) in length functions would throw an unwanted assert.
+ static int length_for (Code code) { /*no check*/ return _lengths [code] & 0xF; }
+ static int wide_length_for(Code code) { /*no check*/ return _lengths [code] >> 4; }
+ static bool can_trap (Code code) { check(code); return has_all_flags(code, _bc_can_trap, false); }
static Code java_code (Code code) { check(code); return _java_code [code]; }
- static bool can_rewrite (Code code) { check(code); return _can_rewrite [code]; }
- static int wide_length_for(Code code) {
- if (!is_defined(code)) {
- return 0;
- }
- const char* wf = wide_format(code);
- return (wf == NULL) ? 0 : (int)strlen(wf);
- }
+ static bool can_rewrite (Code code) { check(code); return has_all_flags(code, _bc_can_rewrite, false); }
+ static bool native_byte_order(Code code) { check(code); return has_all_flags(code, _fmt_has_nbo, false); }
+ static bool uses_cp_cache (Code code) { check(code); return has_all_flags(code, _fmt_has_j, false); }
// if 'end' is provided, it indicates the end of the code buffer which
// should not be read past when parsing.
static int special_length_at(address bcp, address end = NULL);
@@ -355,6 +377,16 @@ class Bytecodes: AllStatic {
static bool is_zero_const (Code code) { return (code == _aconst_null || code == _iconst_0
|| code == _fconst_0 || code == _dconst_0); }
+ static int compute_flags (const char* format, int more_flags = 0); // compute the flags
+ static int flags (int code, bool is_wide) {
+ assert(code == (u_char)code, "must be a byte");
+ return _flags[code + (is_wide ? (1<<BitsPerByte) : 0)];
+ }
+ static int format_bits (Code code, bool is_wide) { return flags(code, is_wide) & _all_fmt_bits; }
+ static bool has_all_flags (Code code, int test_flags, bool is_wide) {
+ return (flags(code, is_wide) & test_flags) == test_flags;
+ }
+
// Initialization
static void initialize ();
};
diff --git a/src/share/vm/interpreter/interpreter.cpp b/src/share/vm/interpreter/interpreter.cpp
index eb398dd7d..b13633373 100644
--- a/src/share/vm/interpreter/interpreter.cpp
+++ b/src/share/vm/interpreter/interpreter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -226,8 +226,9 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m)
// not yet been executed (in Java semantics, not in actual operation).
bool AbstractInterpreter::is_not_reached(methodHandle method, int bci) {
address bcp = method->bcp_from(bci);
+ Bytecodes::Code code = Bytecodes::code_at(bcp, method());
- if (!Bytecode_at(bcp)->must_rewrite()) {
+ if (!Bytecode_at(bcp)->must_rewrite(code)) {
// might have been reached
return false;
}
diff --git a/src/share/vm/interpreter/interpreterRuntime.cpp b/src/share/vm/interpreter/interpreterRuntime.cpp
index ace796404..3c7009e7e 100644
--- a/src/share/vm/interpreter/interpreterRuntime.cpp
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp
@@ -63,7 +63,7 @@ void InterpreterRuntime::set_bcp_and_mdp(address bcp, JavaThread *thread) {
IRT_ENTRY(void, InterpreterRuntime::ldc(JavaThread* thread, bool wide))
// access constant pool
constantPoolOop pool = method(thread)->constants();
- int index = wide ? two_byte_index(thread) : one_byte_index(thread);
+ int index = wide ? get_index_u2(thread, Bytecodes::_ldc_w) : get_index_u1(thread, Bytecodes::_ldc);
constantTag tag = pool->tag_at(index);
if (tag.is_unresolved_klass() || tag.is_klass()) {
@@ -135,7 +135,7 @@ IRT_END
IRT_ENTRY(void, InterpreterRuntime::multianewarray(JavaThread* thread, jint* first_size_address))
// We may want to pass in more arguments - could make this slightly faster
constantPoolOop constants = method(thread)->constants();
- int i = two_byte_index(thread);
+ int i = get_index_u2(thread, Bytecodes::_multianewarray);
klassOop klass = constants->klass_at(i, CHECK);
int nof_dims = number_of_dimensions(thread);
assert(oop(klass)->is_klass(), "not a class");
@@ -169,7 +169,7 @@ IRT_END
// Quicken instance-of and check-cast bytecodes
IRT_ENTRY(void, InterpreterRuntime::quicken_io_cc(JavaThread* thread))
// Force resolving; quicken the bytecode
- int which = two_byte_index(thread);
+ int which = get_index_u2(thread, Bytecodes::_checkcast);
constantPoolOop cpool = method(thread)->constants();
// We'd expect to assert that we're only here to quicken bytecodes, but in a multithreaded
// program we might have seen an unquick'd bytecode in the interpreter but have another
@@ -463,7 +463,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
{
JvmtiHideSingleStepping jhss(thread);
- LinkResolver::resolve_field(info, pool, two_byte_index(thread),
+ LinkResolver::resolve_field(info, pool, get_index_u2_cpcache(thread, bytecode),
bytecode, false, CHECK);
} // end JvmtiHideSingleStepping
@@ -634,7 +634,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes
{
JvmtiHideSingleStepping jhss(thread);
LinkResolver::resolve_invoke(info, receiver, pool,
- two_byte_index(thread), bytecode, CHECK);
+ get_index_u2_cpcache(thread, bytecode), bytecode, CHECK);
if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
int retry_count = 0;
while (info.resolved_method()->is_old()) {
@@ -645,7 +645,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes
"Could not resolve to latest version of redefined method");
// method is redefined in the middle of resolve so re-try.
LinkResolver::resolve_invoke(info, receiver, pool,
- two_byte_index(thread), bytecode, CHECK);
+ get_index_u2_cpcache(thread, bytecode), bytecode, CHECK);
}
}
} // end JvmtiHideSingleStepping
@@ -704,7 +704,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
caller_bci = caller_method->bci_from(caller_bcp);
site_index = Bytes::get_native_u4(caller_bcp+1);
}
- assert(site_index == four_byte_index(thread), "");
+ assert(site_index == InterpreterRuntime::bytecode(thread)->get_index_u4(bytecode), "");
assert(constantPoolCacheOopDesc::is_secondary_index(site_index), "proper format");
// there is a second CPC entries that is of interest; it caches signature info:
int main_index = pool->cache()->secondary_entry_at(site_index)->main_entry_index();
diff --git a/src/share/vm/interpreter/interpreterRuntime.hpp b/src/share/vm/interpreter/interpreterRuntime.hpp
index 7bd284472..3d36a4524 100644
--- a/src/share/vm/interpreter/interpreterRuntime.hpp
+++ b/src/share/vm/interpreter/interpreterRuntime.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,9 +40,13 @@ class InterpreterRuntime: AllStatic {
return Bytecodes::code_at(bcp(thread), method(thread));
}
static bool already_resolved(JavaThread *thread) { return cache_entry(thread)->is_resolved(code(thread)); }
- static int one_byte_index(JavaThread *thread) { return bcp(thread)[1]; }
- static int two_byte_index(JavaThread *thread) { return Bytes::get_Java_u2(bcp(thread) + 1); }
- static int four_byte_index(JavaThread *thread) { return Bytes::get_native_u4(bcp(thread) + 1); }
+ static Bytecode* bytecode(JavaThread *thread) { return Bytecode_at(bcp(thread)); }
+ static int get_index_u1(JavaThread *thread, Bytecodes::Code bc)
+ { return bytecode(thread)->get_index_u1(bc); }
+ static int get_index_u2(JavaThread *thread, Bytecodes::Code bc)
+ { return bytecode(thread)->get_index_u2(bc); }
+ static int get_index_u2_cpcache(JavaThread *thread, Bytecodes::Code bc)
+ { return bytecode(thread)->get_index_u2_cpcache(bc); }
static int number_of_dimensions(JavaThread *thread) { return bcp(thread)[3]; }
static ConstantPoolCacheEntry* cache_entry_at(JavaThread *thread, int i) { return method(thread)->constants()->cache()->entry_at(i); }
diff --git a/src/share/vm/interpreter/rewriter.cpp b/src/share/vm/interpreter/rewriter.cpp
index 5cca8eb8d..815bb22af 100644
--- a/src/share/vm/interpreter/rewriter.cpp
+++ b/src/share/vm/interpreter/rewriter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -103,16 +103,15 @@ void Rewriter::rewrite_Object_init(methodHandle method, TRAPS) {
// Rewrite a classfile-order CP index into a native-order CPC index.
-int Rewriter::rewrite_member_reference(address bcp, int offset) {
+void Rewriter::rewrite_member_reference(address bcp, int offset) {
address p = bcp + offset;
int cp_index = Bytes::get_Java_u2(p);
int cache_index = cp_entry_to_cp_cache(cp_index);
Bytes::put_native_u2(p, cache_index);
- return cp_index;
}
-void Rewriter::rewrite_invokedynamic(address bcp, int offset, int delete_me) {
+void Rewriter::rewrite_invokedynamic(address bcp, int offset) {
address p = bcp + offset;
assert(p[-1] == Bytecodes::_invokedynamic, "");
int cp_index = Bytes::get_Java_u2(p);
@@ -178,7 +177,7 @@ void Rewriter::scan_method(methodOop method) {
case Bytecodes::_lookupswitch : {
#ifndef CC_INTERP
Bytecode_lookupswitch* bc = Bytecode_lookupswitch_at(bcp);
- bc->set_code(
+ (*bcp) = (
bc->number_of_pairs() < BinarySwitchThreshold
? Bytecodes::_fast_linearswitch
: Bytecodes::_fast_binaryswitch
@@ -197,7 +196,7 @@ void Rewriter::scan_method(methodOop method) {
rewrite_member_reference(bcp, prefix_length+1);
break;
case Bytecodes::_invokedynamic:
- rewrite_invokedynamic(bcp, prefix_length+1, int(sizeof"@@@@DELETE ME"));
+ rewrite_invokedynamic(bcp, prefix_length+1);
break;
case Bytecodes::_jsr : // fall through
case Bytecodes::_jsr_w : nof_jsrs++; break;
@@ -308,5 +307,19 @@ Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, objArray
// Set up method entry points for compiler and interpreter.
m->link_method(m, CHECK);
+
+#ifdef ASSERT
+ if (StressMethodComparator) {
+ static int nmc = 0;
+ for (int j = i; j >= 0 && j >= i-4; j--) {
+ if ((++nmc % 1000) == 0) tty->print_cr("Have run MethodComparator %d times...", nmc);
+ bool z = MethodComparator::methods_EMCP(m(), (methodOop)_methods->obj_at(j));
+ if (j == i && !z) {
+ tty->print("MethodComparator FAIL: "); m->print(); m->print_codes();
+ assert(z, "method must compare equal to itself");
+ }
+ }
+ }
+#endif //ASSERT
}
}
diff --git a/src/share/vm/interpreter/rewriter.hpp b/src/share/vm/interpreter/rewriter.hpp
index 68cc9d76c..0135f7636 100644
--- a/src/share/vm/interpreter/rewriter.hpp
+++ b/src/share/vm/interpreter/rewriter.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -64,8 +64,8 @@ class Rewriter: public StackObj {
void scan_method(methodOop m);
methodHandle rewrite_jsrs(methodHandle m, TRAPS);
void rewrite_Object_init(methodHandle m, TRAPS);
- int rewrite_member_reference(address bcp, int offset);
- void rewrite_invokedynamic(address bcp, int offset, int cp_index);
+ void rewrite_member_reference(address bcp, int offset);
+ void rewrite_invokedynamic(address bcp, int offset);
public:
// Driver routine:
diff --git a/src/share/vm/interpreter/templateTable.cpp b/src/share/vm/interpreter/templateTable.cpp
index c392eda77..677589795 100644
--- a/src/share/vm/interpreter/templateTable.cpp
+++ b/src/share/vm/interpreter/templateTable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -434,15 +434,15 @@ void TemplateTable::initialize() {
def(Bytecodes::_dreturn , ____|disp|clvm|____, dtos, dtos, _return , dtos );
def(Bytecodes::_areturn , ____|disp|clvm|____, atos, atos, _return , atos );
def(Bytecodes::_return , ____|disp|clvm|____, vtos, vtos, _return , vtos );
- def(Bytecodes::_getstatic , ubcp|____|clvm|____, vtos, vtos, getstatic , 1 );
- def(Bytecodes::_putstatic , ubcp|____|clvm|____, vtos, vtos, putstatic , 2 );
- def(Bytecodes::_getfield , ubcp|____|clvm|____, vtos, vtos, getfield , 1 );
- def(Bytecodes::_putfield , ubcp|____|clvm|____, vtos, vtos, putfield , 2 );
- def(Bytecodes::_invokevirtual , ubcp|disp|clvm|____, vtos, vtos, invokevirtual , 2 );
- def(Bytecodes::_invokespecial , ubcp|disp|clvm|____, vtos, vtos, invokespecial , 1 );
- def(Bytecodes::_invokestatic , ubcp|disp|clvm|____, vtos, vtos, invokestatic , 1 );
- def(Bytecodes::_invokeinterface , ubcp|disp|clvm|____, vtos, vtos, invokeinterface , 1 );
- def(Bytecodes::_invokedynamic , ubcp|disp|clvm|____, vtos, vtos, invokedynamic , 1 );
+ def(Bytecodes::_getstatic , ubcp|____|clvm|____, vtos, vtos, getstatic , f1_byte );
+ def(Bytecodes::_putstatic , ubcp|____|clvm|____, vtos, vtos, putstatic , f2_byte );
+ def(Bytecodes::_getfield , ubcp|____|clvm|____, vtos, vtos, getfield , f1_byte );
+ def(Bytecodes::_putfield , ubcp|____|clvm|____, vtos, vtos, putfield , f2_byte );
+ def(Bytecodes::_invokevirtual , ubcp|disp|clvm|____, vtos, vtos, invokevirtual , f2_byte );
+ def(Bytecodes::_invokespecial , ubcp|disp|clvm|____, vtos, vtos, invokespecial , f1_byte );
+ def(Bytecodes::_invokestatic , ubcp|disp|clvm|____, vtos, vtos, invokestatic , f1_byte );
+ def(Bytecodes::_invokeinterface , ubcp|disp|clvm|____, vtos, vtos, invokeinterface , f1_byte );
+ def(Bytecodes::_invokedynamic , ubcp|disp|clvm|____, vtos, vtos, invokedynamic , f1_oop );
def(Bytecodes::_new , ubcp|____|clvm|____, vtos, atos, _new , _ );
def(Bytecodes::_newarray , ubcp|____|clvm|____, itos, atos, newarray , _ );
def(Bytecodes::_anewarray , ubcp|____|clvm|____, itos, atos, anewarray , _ );
@@ -502,7 +502,7 @@ void TemplateTable::initialize() {
def(Bytecodes::_fast_iload2 , ubcp|____|____|____, vtos, itos, fast_iload2 , _ );
def(Bytecodes::_fast_icaload , ubcp|____|____|____, vtos, itos, fast_icaload , _ );
- def(Bytecodes::_fast_invokevfinal , ubcp|disp|clvm|____, vtos, vtos, fast_invokevfinal , 2 );
+ def(Bytecodes::_fast_invokevfinal , ubcp|disp|clvm|____, vtos, vtos, fast_invokevfinal , f2_byte );
def(Bytecodes::_fast_linearswitch , ubcp|disp|____|____, itos, vtos, fast_linearswitch , _ );
def(Bytecodes::_fast_binaryswitch , ubcp|disp|____|____, itos, vtos, fast_binaryswitch , _ );
diff --git a/src/share/vm/interpreter/templateTable.hpp b/src/share/vm/interpreter/templateTable.hpp
index 9a455c076..16145d633 100644
--- a/src/share/vm/interpreter/templateTable.hpp
+++ b/src/share/vm/interpreter/templateTable.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -73,6 +73,7 @@ class TemplateTable: AllStatic {
public:
enum Operation { add, sub, mul, div, rem, _and, _or, _xor, shl, shr, ushr };
enum Condition { equal, not_equal, less, less_equal, greater, greater_equal };
+ enum CacheByte { f1_byte = 1, f2_byte = 2, f1_oop = 0x11 }; // byte_no codes
private:
static bool _is_initialized; // true if TemplateTable has been initialized
@@ -244,13 +245,18 @@ class TemplateTable: AllStatic {
static void _return(TosState state);
- static void resolve_cache_and_index(int byte_no, Register cache, Register index);
+ static void resolve_cache_and_index(int byte_no, // one of 1,2,11
+ Register result , // either noreg or output for f1/f2
+ Register cache, // output for CP cache
+ Register index, // output for CP index
+ size_t index_size); // one of 1,2,4
static void load_invoke_cp_cache_entry(int byte_no,
Register method,
Register itable_index,
Register flags,
- bool is_invokevirtual = false,
- bool is_virtual_final = false);
+ bool is_invokevirtual,
+ bool is_virtual_final,
+ bool is_invokedynamic);
static void load_field_cp_cache_entry(Register obj,
Register cache,
Register index,
diff --git a/src/share/vm/memory/iterator.cpp b/src/share/vm/memory/iterator.cpp
index 52ff5a393..683a1e3ab 100644
--- a/src/share/vm/memory/iterator.cpp
+++ b/src/share/vm/memory/iterator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -58,8 +58,8 @@ MarkingCodeBlobClosure::MarkScope::~MarkScope() {
}
void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
- if (!cb->is_nmethod()) return;
- nmethod* nm = (nmethod*) cb;
+ nmethod* nm = cb->as_nmethod_or_null();
+ if (nm == NULL) return;
if (!nm->test_set_oops_do_mark()) {
NOT_PRODUCT(if (TraceScavenge) nm->print_on(tty, "oops_do, 1st visit\n"));
do_newly_marked_nmethod(nm);
@@ -74,11 +74,14 @@ void CodeBlobToOopClosure::do_newly_marked_nmethod(nmethod* nm) {
void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
if (!_do_marking) {
- NOT_PRODUCT(if (TraceScavenge && Verbose && cb->is_nmethod()) ((nmethod*)cb)->print_on(tty, "oops_do, unmarked visit\n"));
+ nmethod* nm = cb->as_nmethod_or_null();
+ NOT_PRODUCT(if (TraceScavenge && Verbose && nm != NULL) nm->print_on(tty, "oops_do, unmarked visit\n"));
// This assert won't work, since there are lots of mini-passes
// (mostly in debug mode) that co-exist with marking phases.
//assert(!(cb->is_nmethod() && ((nmethod*)cb)->test_oops_do_mark()), "found marked nmethod during mark-free phase");
- cb->oops_do(_cl);
+ if (nm != NULL) {
+ nm->oops_do(_cl);
+ }
} else {
MarkingCodeBlobClosure::do_code_blob(cb);
}
diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp
index 3dc236199..f20d82c9b 100644
--- a/src/share/vm/memory/space.cpp
+++ b/src/share/vm/memory/space.cpp
@@ -861,9 +861,9 @@ void ContiguousSpace::allocate_temporary_filler(int factor) {
}
size = align_object_size(size);
- const size_t min_int_array_size = typeArrayOopDesc::header_size(T_INT);
- if (size >= min_int_array_size) {
- size_t length = (size - min_int_array_size) * (HeapWordSize / sizeof(jint));
+ const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
+ if (size >= (size_t)align_object_size(array_header_size)) {
+ size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
// allocate uninitialized int array
typeArrayOop t = (typeArrayOop) allocate(size);
assert(t != NULL, "allocation should succeed");
@@ -871,7 +871,7 @@ void ContiguousSpace::allocate_temporary_filler(int factor) {
t->set_klass(Universe::intArrayKlassObj());
t->set_length((int)length);
} else {
- assert((int) size == instanceOopDesc::header_size(),
+ assert(size == CollectedHeap::min_fill_size(),
"size for smallest fake object doesn't match");
instanceOop obj = (instanceOop) allocate(size);
obj->set_mark(markOopDesc::prototype());
diff --git a/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp b/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp
index 1a6a4d1be..712abe437 100644
--- a/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp
+++ b/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp
@@ -31,7 +31,7 @@ inline HeapWord* ThreadLocalAllocBuffer::allocate(size_t size) {
// Skip mangling the space corresponding to the object header to
// ensure that the returned space is not considered parsable by
// any concurrent GC thread.
- size_t hdr_size = CollectedHeap::min_fill_size();
+ size_t hdr_size = oopDesc::header_size();
Copy::fill_to_words(obj + hdr_size, size - hdr_size, badHeapWordVal);
#endif // ASSERT
// This addition is safe because we know that top is
diff --git a/src/share/vm/memory/universe.cpp b/src/share/vm/memory/universe.cpp
index a5762c023..f0595761a 100644
--- a/src/share/vm/memory/universe.cpp
+++ b/src/share/vm/memory/universe.cpp
@@ -748,7 +748,7 @@ jint universe_init() {
// 4Gb
static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
// 32Gb
-static const uint64_t OopEncodingHeapMax = NarrowOopHeapMax << LogMinObjAlignmentInBytes;
+// OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
size_t base = 0;
@@ -1261,7 +1261,7 @@ static void calculate_verify_data(uintptr_t verify_data[2],
// decide which low-order bits we require to be clear:
size_t alignSize = MinObjAlignmentInBytes;
- size_t min_object_size = oopDesc::header_size();
+ size_t min_object_size = CollectedHeap::min_fill_size();
// make an inclusive limit:
uintptr_t max = (uintptr_t)high_boundary - min_object_size*wordSize;
diff --git a/src/share/vm/oops/arrayOop.hpp b/src/share/vm/oops/arrayOop.hpp
index b0a8530ec..c1ad41474 100644
--- a/src/share/vm/oops/arrayOop.hpp
+++ b/src/share/vm/oops/arrayOop.hpp
@@ -92,7 +92,7 @@ class arrayOopDesc : public oopDesc {
static int header_size(BasicType type) {
size_t typesize_in_bytes = header_size_in_bytes();
return (int)(Universe::element_type_should_be_aligned(type)
- ? align_object_size(typesize_in_bytes/HeapWordSize)
+ ? align_object_offset(typesize_in_bytes/HeapWordSize)
: typesize_in_bytes/HeapWordSize);
}
diff --git a/src/share/vm/oops/constantPoolKlass.cpp b/src/share/vm/oops/constantPoolKlass.cpp
index c13d07692..87c5ed3fe 100644
--- a/src/share/vm/oops/constantPoolKlass.cpp
+++ b/src/share/vm/oops/constantPoolKlass.cpp
@@ -310,15 +310,12 @@ void constantPoolKlass::oop_print_on(oop obj, outputStream* st) {
Klass::oop_print_on(obj, st);
constantPoolOop cp = constantPoolOop(obj);
if (cp->flags() != 0) {
- st->print(" - flags : 0x%x", cp->flags());
+ st->print(" - flags: 0x%x", cp->flags());
if (cp->has_pseudo_string()) st->print(" has_pseudo_string");
if (cp->has_invokedynamic()) st->print(" has_invokedynamic");
st->cr();
}
-
- // Temp. remove cache so we can do lookups with original indicies.
- constantPoolCacheHandle cache (THREAD, cp->cache());
- cp->set_cache(NULL);
+ st->print_cr(" - cache: " INTPTR_FORMAT, cp->cache());
for (int index = 1; index < cp->length(); index++) { // Index 0 is unused
st->print(" - %3d : ", index);
@@ -334,8 +331,8 @@ void constantPoolKlass::oop_print_on(oop obj, outputStream* st) {
case JVM_CONSTANT_Fieldref :
case JVM_CONSTANT_Methodref :
case JVM_CONSTANT_InterfaceMethodref :
- st->print("klass_index=%d", cp->klass_ref_index_at(index));
- st->print(" name_and_type_index=%d", cp->name_and_type_ref_index_at(index));
+ st->print("klass_index=%d", cp->uncached_klass_ref_index_at(index));
+ st->print(" name_and_type_index=%d", cp->uncached_name_and_type_ref_index_at(index));
break;
case JVM_CONSTANT_UnresolvedString :
case JVM_CONSTANT_String :
@@ -382,9 +379,6 @@ void constantPoolKlass::oop_print_on(oop obj, outputStream* st) {
st->cr();
}
st->cr();
-
- // Restore cache
- cp->set_cache(cache());
}
#endif
@@ -398,6 +392,9 @@ void constantPoolKlass::oop_print_value_on(oop obj, outputStream* st) {
cp->print_address_on(st);
st->print(" for ");
cp->pool_holder()->print_value_on(st);
+ if (cp->cache() != NULL) {
+ st->print(" cache=" PTR_FORMAT, cp->cache());
+ }
}
const char* constantPoolKlass::internal_name() const {
diff --git a/src/share/vm/oops/constantPoolOop.cpp b/src/share/vm/oops/constantPoolOop.cpp
index ddd97f4b6..11b10e3d7 100644
--- a/src/share/vm/oops/constantPoolOop.cpp
+++ b/src/share/vm/oops/constantPoolOop.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -297,11 +297,9 @@ int constantPoolOopDesc::impl_klass_ref_index_at(int which, bool uncached) {
int constantPoolOopDesc::remap_instruction_operand_from_cache(int operand) {
- // Operand was fetched by a stream using get_Java_u2, yet was stored
- // by Rewriter::rewrite_member_reference in native order.
- // So now we have to fix the damage by swapping back to native order.
- assert((int)(u2)operand == operand, "clean u2");
- int cpc_index = Bytes::swap_u2(operand);
+ int cpc_index = operand;
+ DEBUG_ONLY(cpc_index -= CPCACHE_INDEX_TAG);
+ assert((int)(u2)cpc_index == cpc_index, "clean u2");
int member_index = cache()->entry_at(cpc_index)->constant_pool_index();
return member_index;
}
diff --git a/src/share/vm/oops/constantPoolOop.hpp b/src/share/vm/oops/constantPoolOop.hpp
index 53bf61e1d..8609d7ace 100644
--- a/src/share/vm/oops/constantPoolOop.hpp
+++ b/src/share/vm/oops/constantPoolOop.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -434,6 +434,10 @@ class constantPoolOopDesc : public oopDesc {
// Debugging
const char* printable_name_at(int which) PRODUCT_RETURN0;
+#ifdef ASSERT
+ enum { CPCACHE_INDEX_TAG = 0x10000 }; // helps keep CP cache indices distinct from CP indices
+#endif //ASSERT
+
private:
symbolOop impl_name_ref_at(int which, bool uncached);
@@ -441,7 +445,7 @@ class constantPoolOopDesc : public oopDesc {
int impl_klass_ref_index_at(int which, bool uncached);
int impl_name_and_type_ref_index_at(int which, bool uncached);
- int remap_instruction_operand_from_cache(int operand);
+ int remap_instruction_operand_from_cache(int operand); // operand must be biased by CPCACHE_INDEX_TAG
// Used while constructing constant pool (only by ClassFileParser)
jint klass_index_at(int which) {
diff --git a/src/share/vm/oops/generateOopMap.cpp b/src/share/vm/oops/generateOopMap.cpp
index fea2a0a4c..952e2661e 100644
--- a/src/share/vm/oops/generateOopMap.cpp
+++ b/src/share/vm/oops/generateOopMap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1254,7 +1254,7 @@ void GenerateOopMap::print_current_state(outputStream *os,
case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
case Bytecodes::_invokeinterface:
- int idx = currentBC->get_index_int();
+ int idx = currentBC->has_index_u4() ? currentBC->get_index_u4() : currentBC->get_index_u2();
constantPoolOop cp = method()->constants();
int nameAndTypeIdx = cp->name_and_type_ref_index_at(idx);
int signatureIdx = cp->signature_ref_index_at(nameAndTypeIdx);
@@ -1286,7 +1286,7 @@ void GenerateOopMap::print_current_state(outputStream *os,
case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
case Bytecodes::_invokeinterface:
- int idx = currentBC->get_index_int();
+ int idx = currentBC->has_index_u4() ? currentBC->get_index_u4() : currentBC->get_index_u2();
constantPoolOop cp = method()->constants();
int nameAndTypeIdx = cp->name_and_type_ref_index_at(idx);
int signatureIdx = cp->signature_ref_index_at(nameAndTypeIdx);
@@ -1356,8 +1356,8 @@ void GenerateOopMap::interp1(BytecodeStream *itr) {
case Bytecodes::_ldc2_w: ppush(vvCTS); break;
- case Bytecodes::_ldc: do_ldc(itr->get_index(), itr->bci()); break;
- case Bytecodes::_ldc_w: do_ldc(itr->get_index_big(), itr->bci());break;
+ case Bytecodes::_ldc: do_ldc(itr->get_index(), itr->bci()); break;
+ case Bytecodes::_ldc_w: do_ldc(itr->get_index_u2(), itr->bci()); break;
case Bytecodes::_iload:
case Bytecodes::_fload: ppload(vCTS, itr->get_index()); break;
@@ -1550,17 +1550,17 @@ void GenerateOopMap::interp1(BytecodeStream *itr) {
case Bytecodes::_jsr_w: do_jsr(itr->dest_w()); break;
case Bytecodes::_getstatic: do_field(true, true,
- itr->get_index_big(),
+ itr->get_index_u2_cpcache(),
itr->bci()); break;
- case Bytecodes::_putstatic: do_field(false, true, itr->get_index_big(), itr->bci()); break;
- case Bytecodes::_getfield: do_field(true, false, itr->get_index_big(), itr->bci()); break;
- case Bytecodes::_putfield: do_field(false, false, itr->get_index_big(), itr->bci()); break;
+ case Bytecodes::_putstatic: do_field(false, true, itr->get_index_u2_cpcache(), itr->bci()); break;
+ case Bytecodes::_getfield: do_field(true, false, itr->get_index_u2_cpcache(), itr->bci()); break;
+ case Bytecodes::_putfield: do_field(false, false, itr->get_index_u2_cpcache(), itr->bci()); break;
case Bytecodes::_invokevirtual:
- case Bytecodes::_invokespecial: do_method(false, false, itr->get_index_big(), itr->bci()); break;
- case Bytecodes::_invokestatic: do_method(true, false, itr->get_index_big(), itr->bci()); break;
- case Bytecodes::_invokedynamic: do_method(true, false, itr->get_index_int(), itr->bci()); break;
- case Bytecodes::_invokeinterface: do_method(false, true, itr->get_index_big(), itr->bci()); break;
+ case Bytecodes::_invokespecial: do_method(false, false, itr->get_index_u2_cpcache(), itr->bci()); break;
+ case Bytecodes::_invokestatic: do_method(true, false, itr->get_index_u2_cpcache(), itr->bci()); break;
+ case Bytecodes::_invokedynamic: do_method(true, false, itr->get_index_u4(), itr->bci()); break;
+ case Bytecodes::_invokeinterface: do_method(false, true, itr->get_index_u2_cpcache(), itr->bci()); break;
case Bytecodes::_newarray:
case Bytecodes::_anewarray: pp_new_ref(vCTS, itr->bci()); break;
case Bytecodes::_checkcast: do_checkcast(); break;
diff --git a/src/share/vm/oops/methodKlass.cpp b/src/share/vm/oops/methodKlass.cpp
index a664c4e8f..f0ba4ad84 100644
--- a/src/share/vm/oops/methodKlass.cpp
+++ b/src/share/vm/oops/methodKlass.cpp
@@ -237,7 +237,7 @@ void methodKlass::oop_print_on(oop obj, outputStream* st) {
Klass::oop_print_on(obj, st);
methodOop m = methodOop(obj);
// get the effect of PrintOopAddress, always, for methods:
- st->print (" - this oop: "INTPTR_FORMAT, (intptr_t)m);
+ st->print_cr(" - this oop: "INTPTR_FORMAT, (intptr_t)m);
st->print (" - method holder: "); m->method_holder()->print_value_on(st); st->cr();
st->print (" - constants: "INTPTR_FORMAT" ", (address)m->constants());
m->constants()->print_value_on(st); st->cr();
diff --git a/src/share/vm/oops/oop.hpp b/src/share/vm/oops/oop.hpp
index 63a03d323..952802c78 100644
--- a/src/share/vm/oops/oop.hpp
+++ b/src/share/vm/oops/oop.hpp
@@ -149,10 +149,6 @@ class oopDesc {
// Need this as public for garbage collection.
template <class T> T* obj_field_addr(int offset) const;
- // Oop encoding heap max
- static const uint64_t OopEncodingHeapMax =
- (uint64_t(max_juint) + 1) << LogMinObjAlignmentInBytes;
-
static bool is_null(oop obj);
static bool is_null(narrowOop obj);
diff --git a/src/share/vm/oops/oop.inline.hpp b/src/share/vm/oops/oop.inline.hpp
index 3336fb20a..c840f46f9 100644
--- a/src/share/vm/oops/oop.inline.hpp
+++ b/src/share/vm/oops/oop.inline.hpp
@@ -146,8 +146,13 @@ inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
// offset from the heap base. Saving the check for null can save instructions
// in inner GC loops so these are separated.
+inline bool check_obj_alignment(oop obj) {
+ return (intptr_t)obj % MinObjAlignmentInBytes == 0;
+}
+
inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
assert(!is_null(v), "oop value can never be zero");
+ assert(check_obj_alignment(v), "Address not aligned");
assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
address base = Universe::narrow_oop_base();
int shift = Universe::narrow_oop_shift();
@@ -167,7 +172,9 @@ inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
assert(!is_null(v), "narrow oop value can never be zero");
address base = Universe::narrow_oop_base();
int shift = Universe::narrow_oop_shift();
- return (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
+ oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
+ assert(check_obj_alignment(result), "Address not aligned");
+ return result;
}
inline oop oopDesc::decode_heap_oop(narrowOop v) {
@@ -522,10 +529,6 @@ inline bool oopDesc::has_bias_pattern() const {
return mark()->has_bias_pattern();
}
-inline bool check_obj_alignment(oop obj) {
- return (intptr_t)obj % MinObjAlignmentInBytes == 0;
-}
-
// used only for asserts
inline bool oopDesc::is_oop(bool ignore_mark_word) const {
@@ -600,6 +603,8 @@ inline bool oopDesc::is_forwarded() const {
// Used by scavengers
inline void oopDesc::forward_to(oop p) {
+ assert(check_obj_alignment(p),
+ "forwarding to something not aligned");
assert(Universe::heap()->is_in_reserved(p),
"forwarding to something not in heap");
markOop m = markOopDesc::encode_pointer_as_mark(p);
@@ -609,6 +614,8 @@ inline void oopDesc::forward_to(oop p) {
// Used by parallel scavengers
inline bool oopDesc::cas_forward_to(oop p, markOop compare) {
+ assert(check_obj_alignment(p),
+ "forwarding to something not aligned");
assert(Universe::heap()->is_in_reserved(p),
"forwarding to something not in heap");
markOop m = markOopDesc::encode_pointer_as_mark(p);
diff --git a/src/share/vm/opto/bytecodeInfo.cpp b/src/share/vm/opto/bytecodeInfo.cpp
index cba1aa453..b4178384b 100644
--- a/src/share/vm/opto/bytecodeInfo.cpp
+++ b/src/share/vm/opto/bytecodeInfo.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -188,8 +188,8 @@ const char* InlineTree::shouldNotInline(ciMethod *callee_method, ciMethod* calle
return NULL;
}
- // Always inline MethodHandle methods.
- if (callee_method->is_method_handle_invoke())
+ // Always inline MethodHandle methods and generated MethodHandle adapters.
+ if (callee_method->is_method_handle_invoke() || callee_method->is_method_handle_adapter())
return NULL;
// First check all inlining restrictions which are required for correctness
@@ -340,7 +340,7 @@ bool pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* call
Bytecodes::Code call_bc = iter.cur_bc();
// An invokedynamic instruction does not have a klass.
if (call_bc != Bytecodes::_invokedynamic) {
- int index = iter.get_index_int();
+ int index = iter.get_index_u2_cpcache();
if (!caller_method->is_klass_loaded(index, true)) {
return false;
}
diff --git a/src/share/vm/opto/compile.cpp b/src/share/vm/opto/compile.cpp
index c86237006..3152ef2e9 100644
--- a/src/share/vm/opto/compile.cpp
+++ b/src/share/vm/opto/compile.cpp
@@ -2176,14 +2176,14 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
#ifdef _LP64
case Op_CastPP:
- if (n->in(1)->is_DecodeN() && Universe::narrow_oop_use_implicit_null_checks()) {
+ if (n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) {
Compile* C = Compile::current();
Node* in1 = n->in(1);
const Type* t = n->bottom_type();
Node* new_in1 = in1->clone();
new_in1->as_DecodeN()->set_type(t);
- if (!Matcher::clone_shift_expressions) {
+ if (!Matcher::narrow_oop_use_complex_address()) {
//
// x86, ARM and friends can handle 2 adds in addressing mode
// and Matcher can fold a DecodeN node into address by using
@@ -2231,8 +2231,12 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
new_in2 = in2->in(1);
} else if (in2->Opcode() == Op_ConP) {
const Type* t = in2->bottom_type();
- if (t == TypePtr::NULL_PTR && Universe::narrow_oop_use_implicit_null_checks()) {
- new_in2 = ConNode::make(C, TypeNarrowOop::NULL_PTR);
+ if (t == TypePtr::NULL_PTR) {
+ // Don't convert CmpP null check into CmpN if compressed
+ // oops implicit null check is not generated.
+ // This will allow to generate normal oop implicit null check.
+ if (Matcher::gen_narrow_oop_implicit_null_checks())
+ new_in2 = ConNode::make(C, TypeNarrowOop::NULL_PTR);
//
// This transformation together with CastPP transformation above
// will generated code for implicit NULL checks for compressed oops.
@@ -2289,9 +2293,9 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
case Op_DecodeN:
assert(!n->in(1)->is_EncodeP(), "should be optimized out");
- // DecodeN could be pinned on Sparc where it can't be fold into
+ // DecodeN could be pinned when it can't be fold into
// an address expression, see the code for Op_CastPP above.
- assert(n->in(0) == NULL || !Matcher::clone_shift_expressions, "no control except on sparc");
+ assert(n->in(0) == NULL || !Matcher::narrow_oop_use_complex_address(), "no control");
break;
case Op_EncodeP: {
@@ -2496,6 +2500,10 @@ static void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Re
}
}
+ // Skip next transformation if compressed oops are not used.
+ if (!UseCompressedOops || !Matcher::gen_narrow_oop_implicit_null_checks())
+ return;
+
// Go over safepoints nodes to skip DecodeN nodes for debug edges.
// It could be done for an uncommon traps or any safepoints/calls
// if the DecodeN node is referenced only in a debug info.
diff --git a/src/share/vm/opto/connode.cpp b/src/share/vm/opto/connode.cpp
index 996d2fc57..3fd660a9f 100644
--- a/src/share/vm/opto/connode.cpp
+++ b/src/share/vm/opto/connode.cpp
@@ -437,7 +437,7 @@ Node *ConstraintCastNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
// If not converting int->oop, throw away cast after constant propagation
Node *CastPPNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
const Type *t = ccp->type(in(1));
- if (!t->isa_oop_ptr() || (in(1)->is_DecodeN() && Universe::narrow_oop_use_implicit_null_checks())) {
+ if (!t->isa_oop_ptr() || (in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks())) {
return NULL; // do not transform raw pointers or narrow oops
}
return ConstraintCastNode::Ideal_DU_postCCP(ccp);
diff --git a/src/share/vm/opto/lcm.cpp b/src/share/vm/opto/lcm.cpp
index 6199c9b4e..0afde9016 100644
--- a/src/share/vm/opto/lcm.cpp
+++ b/src/share/vm/opto/lcm.cpp
@@ -32,7 +32,8 @@
// with suitable memory ops nearby. Use the memory op to do the NULL check.
// I can generate a memory op if there is not one nearby.
// The proj is the control projection for the not-null case.
-// The val is the pointer being checked for nullness.
+// The val is the pointer being checked for nullness or
+// decodeHeapOop_not_null node if it did not fold into address.
void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons) {
// Assume if null check need for 0 offset then always needed
// Intel solaris doesn't support any null checks yet and no
@@ -96,6 +97,13 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
}
}
+ // Check for decodeHeapOop_not_null node which did not fold into address
+ bool is_decoden = ((intptr_t)val) & 1;
+ val = (Node*)(((intptr_t)val) & ~1);
+
+ assert(!is_decoden || (val->in(0) == NULL) && val->is_Mach() &&
+ (val->as_Mach()->ideal_Opcode() == Op_DecodeN), "sanity");
+
// Search the successor block for a load or store who's base value is also
// the tested value. There may be several.
Node_List *out = new Node_List(Thread::current()->resource_area());
@@ -148,7 +156,8 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
if( !mach->needs_anti_dependence_check() )
continue; // Not an memory op; skip it
{
- // Check that value is used in memory address.
+ // Check that value is used in memory address in
+ // instructions with embedded load (CmpP val1,(val2+off)).
Node* base;
Node* index;
const MachOper* oper = mach->memory_inputs(base, index);
@@ -213,7 +222,11 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
uint vidx = 0; // Capture index of value into memop
uint j;
for( j = mach->req()-1; j > 0; j-- ) {
- if( mach->in(j) == val ) vidx = j;
+ if( mach->in(j) == val ) {
+ vidx = j;
+ // Ignore DecodeN val which could be hoisted to where needed.
+ if( is_decoden ) continue;
+ }
// Block of memory-op input
Block *inb = cfg->_bbs[mach->in(j)->_idx];
Block *b = this; // Start from nul check
@@ -270,6 +283,26 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
extern int implicit_null_checks;
implicit_null_checks++;
+ if( is_decoden ) {
+ // Check if we need to hoist decodeHeapOop_not_null first.
+ Block *valb = cfg->_bbs[val->_idx];
+ if( this != valb && this->_dom_depth < valb->_dom_depth ) {
+ // Hoist it up to the end of the test block.
+ valb->find_remove(val);
+ this->add_inst(val);
+ cfg->_bbs.map(val->_idx,this);
+ // DecodeN on x86 may kill flags. Check for flag-killing projections
+ // that also need to be hoisted.
+ for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
+ Node* n = val->fast_out(j);
+ if( n->Opcode() == Op_MachProj ) {
+ cfg->_bbs[n->_idx]->find_remove(n);
+ this->add_inst(n);
+ cfg->_bbs.map(n->_idx,this);
+ }
+ }
+ }
+ }
// Hoist the memory candidate up to the end of the test block.
Block *old_block = cfg->_bbs[best->_idx];
old_block->find_remove(best);
diff --git a/src/share/vm/opto/matcher.cpp b/src/share/vm/opto/matcher.cpp
index eb59720ff..78931baa4 100644
--- a/src/share/vm/opto/matcher.cpp
+++ b/src/share/vm/opto/matcher.cpp
@@ -1334,7 +1334,7 @@ static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool s
if( j == max_scan ) // No post-domination before scan end?
return true; // Then break the match tree up
}
- if (m->is_DecodeN() && Matcher::clone_shift_expressions) {
+ if (m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) {
// These are commonly used in address expressions and can
// efficiently fold into them on X64 in some cases.
return false;
@@ -2110,8 +2110,8 @@ void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
_null_check_tests.push(proj);
Node* val = cmp->in(1);
#ifdef _LP64
- if (UseCompressedOops && !Matcher::clone_shift_expressions &&
- val->bottom_type()->isa_narrowoop()) {
+ if (val->bottom_type()->isa_narrowoop() &&
+ !Matcher::narrow_oop_use_complex_address()) {
//
// Look for DecodeN node which should be pinned to orig_proj.
// On platforms (Sparc) which can not handle 2 adds
@@ -2127,6 +2127,9 @@ void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
if (d->is_DecodeN() && d->in(1) == val) {
val = d;
val->set_req(0, NULL); // Unpin now.
+ // Mark this as special case to distinguish from
+ // a regular case: CmpP(DecodeN, NULL).
+ val = (Node*)(((intptr_t)val) | 1);
break;
}
}
@@ -2146,9 +2149,21 @@ void Matcher::validate_null_checks( ) {
for( uint i=0; i < cnt; i+=2 ) {
Node *test = _null_check_tests[i];
Node *val = _null_check_tests[i+1];
+ bool is_decoden = ((intptr_t)val) & 1;
+ val = (Node*)(((intptr_t)val) & ~1);
if (has_new_node(val)) {
+ Node* new_val = new_node(val);
+ if (is_decoden) {
+ assert(val->is_DecodeN() && val->in(0) == NULL, "sanity");
+ // Note: new_val may have a control edge if
+ // the original ideal node DecodeN was matched before
+ // it was unpinned in Matcher::collect_null_checks().
+ // Unpin the mach node and mark it.
+ new_val->set_req(0, NULL);
+ new_val = (Node*)(((intptr_t)new_val) | 1);
+ }
// Is a match-tree root, so replace with the matched value
- _null_check_tests.map(i+1, new_node(val));
+ _null_check_tests.map(i+1, new_val);
} else {
// Yank from candidate list
_null_check_tests.map(i+1,_null_check_tests[--cnt]);
diff --git a/src/share/vm/opto/matcher.hpp b/src/share/vm/opto/matcher.hpp
index e4bcf567d..0badb1366 100644
--- a/src/share/vm/opto/matcher.hpp
+++ b/src/share/vm/opto/matcher.hpp
@@ -352,6 +352,38 @@ public:
// registers? True for Intel but false for most RISCs
static const bool clone_shift_expressions;
+ static bool narrow_oop_use_complex_address();
+
+ // Generate implicit null check for narrow oops if it can fold
+ // into address expression (x64).
+ //
+ // [R12 + narrow_oop_reg<<3 + offset] // fold into address expression
+ // NullCheck narrow_oop_reg
+ //
+ // When narrow oops can't fold into address expression (Sparc) and
+ // base is not null use decode_not_null and normal implicit null check.
+ // Note, decode_not_null node can be used here since it is referenced
+ // only on non null path but it requires special handling, see
+ // collect_null_checks():
+ //
+ // decode_not_null narrow_oop_reg, oop_reg // 'shift' and 'add base'
+ // [oop_reg + offset]
+ // NullCheck oop_reg
+ //
+ // With Zero base and when narrow oops can not fold into address
+ // expression use normal implicit null check since only shift
+ // is needed to decode narrow oop.
+ //
+ // decode narrow_oop_reg, oop_reg // only 'shift'
+ // [oop_reg + offset]
+ // NullCheck oop_reg
+ //
+ inline static bool gen_narrow_oop_implicit_null_checks() {
+ return Universe::narrow_oop_use_implicit_null_checks() &&
+ (narrow_oop_use_complex_address() ||
+ Universe::narrow_oop_base() != NULL);
+ }
+
// Is it better to copy float constants, or load them directly from memory?
// Intel can load a float constant from a direct address, requiring no
// extra registers. Most RISCs will have to materialize an address into a
diff --git a/src/share/vm/opto/parse2.cpp b/src/share/vm/opto/parse2.cpp
index 7ed0ae39a..e212e0d7e 100644
--- a/src/share/vm/opto/parse2.cpp
+++ b/src/share/vm/opto/parse2.cpp
@@ -1317,8 +1317,8 @@ void Parse::do_one_bytecode() {
case Bytecodes::_iconst_3: push(intcon( 3)); break;
case Bytecodes::_iconst_4: push(intcon( 4)); break;
case Bytecodes::_iconst_5: push(intcon( 5)); break;
- case Bytecodes::_bipush: push(intcon( iter().get_byte())); break;
- case Bytecodes::_sipush: push(intcon( iter().get_short())); break;
+ case Bytecodes::_bipush: push(intcon(iter().get_constant_u1())); break;
+ case Bytecodes::_sipush: push(intcon(iter().get_constant_u2())); break;
case Bytecodes::_aconst_null: push(null()); break;
case Bytecodes::_ldc:
case Bytecodes::_ldc_w:
diff --git a/src/share/vm/prims/jvmtiClassFileReconstituter.cpp b/src/share/vm/prims/jvmtiClassFileReconstituter.cpp
index b2d69373d..e8ac72d4a 100644
--- a/src/share/vm/prims/jvmtiClassFileReconstituter.cpp
+++ b/src/share/vm/prims/jvmtiClassFileReconstituter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -638,7 +638,7 @@ void JvmtiClassFileReconstituter::copy_bytecodes(methodHandle mh,
// length of bytecode (mnemonic + operands)
address bcp = bs.bcp();
- int len = bs.next_bcp() - bcp;
+ int len = bs.instruction_size();
assert(len > 0, "length must be > 0");
// copy the bytecodes
diff --git a/src/share/vm/prims/jvmtiExport.cpp b/src/share/vm/prims/jvmtiExport.cpp
index 07d4bf01a..a4c2828da 100644
--- a/src/share/vm/prims/jvmtiExport.cpp
+++ b/src/share/vm/prims/jvmtiExport.cpp
@@ -726,6 +726,32 @@ GrowableArray<jmethodID>* JvmtiExport::_pending_compiled_method_unload_method_id
GrowableArray<const void *>* JvmtiExport::_pending_compiled_method_unload_code_begins;
JavaThread* JvmtiExport::_current_poster;
+void JvmtiExport::post_compiled_method_unload_internal(JavaThread* self, jmethodID method, const void *code_begin) {
+ EVT_TRIG_TRACE(JVMTI_EVENT_COMPILED_METHOD_UNLOAD,
+ ("JVMTI [%s] method compile unload event triggered",
+ JvmtiTrace::safe_get_thread_name(self)));
+
+ // post the event for each environment that has this event enabled.
+ JvmtiEnvIterator it;
+ for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+ if (env->is_enabled(JVMTI_EVENT_COMPILED_METHOD_UNLOAD)) {
+
+ EVT_TRACE(JVMTI_EVENT_COMPILED_METHOD_UNLOAD,
+ ("JVMTI [%s] class compile method unload event sent jmethodID " PTR_FORMAT,
+ JvmtiTrace::safe_get_thread_name(self), method));
+
+ ResourceMark rm(self);
+
+ JvmtiEventMark jem(self);
+ JvmtiJavaThreadEventTransition jet(self);
+ jvmtiEventCompiledMethodUnload callback = env->callbacks()->CompiledMethodUnload;
+ if (callback != NULL) {
+ (*callback)(env->jvmti_external(), method, code_begin);
+ }
+ }
+ }
+}
+
// post any pending CompiledMethodUnload events
void JvmtiExport::post_pending_compiled_method_unload_events() {
@@ -788,26 +814,7 @@ void JvmtiExport::post_pending_compiled_method_unload_events() {
// flag, cleanup _current_poster to indicate that no thread is now servicing the
// pending events list, and finally notify any thread that might be waiting.
for (;;) {
- EVT_TRIG_TRACE(JVMTI_EVENT_COMPILED_METHOD_UNLOAD,
- ("JVMTI [%s] method compile unload event triggered",
- JvmtiTrace::safe_get_thread_name(self)));
-
- // post the event for each environment that has this event enabled.
- JvmtiEnvIterator it;
- for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
- if (env->is_enabled(JVMTI_EVENT_COMPILED_METHOD_UNLOAD)) {
- EVT_TRACE(JVMTI_EVENT_COMPILED_METHOD_UNLOAD,
- ("JVMTI [%s] class compile method unload event sent jmethodID " PTR_FORMAT,
- JvmtiTrace::safe_get_thread_name(self), method));
-
- JvmtiEventMark jem(self);
- JvmtiJavaThreadEventTransition jet(self);
- jvmtiEventCompiledMethodUnload callback = env->callbacks()->CompiledMethodUnload;
- if (callback != NULL) {
- (*callback)(env->jvmti_external(), method, code_begin);
- }
- }
- }
+ post_compiled_method_unload_internal(self, method, code_begin);
// event posted, now re-grab monitor and get the next event
// If there's no next event then we are done. If this is the first
@@ -1864,17 +1871,25 @@ void JvmtiExport::post_compiled_method_load(JvmtiEnv* env, const jmethodID metho
}
// used at a safepoint to post a CompiledMethodUnload event
-void JvmtiExport::post_compiled_method_unload_at_safepoint(jmethodID mid, const void *code_begin) {
- assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
-
- // create list lazily
- if (_pending_compiled_method_unload_method_ids == NULL) {
- _pending_compiled_method_unload_method_ids = new (ResourceObj::C_HEAP) GrowableArray<jmethodID>(10,true);
- _pending_compiled_method_unload_code_begins = new (ResourceObj::C_HEAP) GrowableArray<const void *>(10,true);
+void JvmtiExport::post_compiled_method_unload(jmethodID mid, const void *code_begin) {
+ if (SafepointSynchronize::is_at_safepoint()) {
+ // Class unloading can cause nmethod unloading which is reported
+ // by the VMThread. These must be batched to be processed later.
+ if (_pending_compiled_method_unload_method_ids == NULL) {
+ // create list lazily
+ _pending_compiled_method_unload_method_ids = new (ResourceObj::C_HEAP) GrowableArray<jmethodID>(10,true);
+ _pending_compiled_method_unload_code_begins = new (ResourceObj::C_HEAP) GrowableArray<const void *>(10,true);
+ }
+ _pending_compiled_method_unload_method_ids->append(mid);
+ _pending_compiled_method_unload_code_begins->append(code_begin);
+ _have_pending_compiled_method_unload_events = true;
+ } else {
+ // Unloading caused by the sweeper can be reported synchronously.
+ if (have_pending_compiled_method_unload_events()) {
+ post_pending_compiled_method_unload_events();
+ }
+ post_compiled_method_unload_internal(JavaThread::current(), mid, code_begin);
}
- _pending_compiled_method_unload_method_ids->append(mid);
- _pending_compiled_method_unload_code_begins->append(code_begin);
- _have_pending_compiled_method_unload_events = true;
}
void JvmtiExport::post_dynamic_code_generated_internal(const char *name, const void *code_begin, const void *code_end) {
diff --git a/src/share/vm/prims/jvmtiExport.hpp b/src/share/vm/prims/jvmtiExport.hpp
index 31c5571ee..57d1fb530 100644
--- a/src/share/vm/prims/jvmtiExport.hpp
+++ b/src/share/vm/prims/jvmtiExport.hpp
@@ -144,6 +144,9 @@ class JvmtiExport : public AllStatic {
// posts any pending CompiledMethodUnload events.
static void post_pending_compiled_method_unload_events();
+ // Perform the actual notification to interested JvmtiEnvs.
+ static void post_compiled_method_unload_internal(JavaThread* self, jmethodID mid, const void* code_begin);
+
// posts a DynamicCodeGenerated event (internal/private implementation).
// The public post_dynamic_code_generated* functions make use of the
// internal implementation.
@@ -299,8 +302,8 @@ class JvmtiExport : public AllStatic {
static void post_compiled_method_load(nmethod *nm) KERNEL_RETURN;
static void post_dynamic_code_generated(const char *name, const void *code_begin, const void *code_end) KERNEL_RETURN;
- // used at a safepoint to post a CompiledMethodUnload event
- static void post_compiled_method_unload_at_safepoint(jmethodID mid, const void *code_begin) KERNEL_RETURN;
+ // used to post a CompiledMethodUnload event
+ static void post_compiled_method_unload(jmethodID mid, const void *code_begin) KERNEL_RETURN;
// similiar to post_dynamic_code_generated except that it can be used to
// post a DynamicCodeGenerated event while holding locks in the VM. Any event
diff --git a/src/share/vm/prims/methodComparator.cpp b/src/share/vm/prims/methodComparator.cpp
index 4b198f95b..9190d5a83 100644
--- a/src/share/vm/prims/methodComparator.cpp
+++ b/src/share/vm/prims/methodComparator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -130,8 +130,8 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
case Bytecodes::_multianewarray : // fall through
case Bytecodes::_checkcast : // fall through
case Bytecodes::_instanceof : {
- u2 cpi_old = _s_old->get_index_big();
- u2 cpi_new = _s_new->get_index_big();
+ u2 cpi_old = _s_old->get_index_u2();
+ u2 cpi_new = _s_new->get_index_u2();
if ((_old_cp->klass_at_noresolve(cpi_old) != _new_cp->klass_at_noresolve(cpi_new)))
return false;
if (c_old == Bytecodes::_multianewarray &&
@@ -147,9 +147,10 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
case Bytecodes::_invokevirtual : // fall through
case Bytecodes::_invokespecial : // fall through
case Bytecodes::_invokestatic : // fall through
+ case Bytecodes::_invokedynamic : // fall through
case Bytecodes::_invokeinterface : {
- u2 cpci_old = _s_old->get_index_int();
- u2 cpci_new = _s_new->get_index_int();
+ int cpci_old = _s_old->has_index_u4() ? _s_old->get_index_u4() : _s_old->get_index_u2_cpcache();
+ int cpci_new = _s_new->has_index_u4() ? _s_new->get_index_u4() : _s_new->get_index_u2_cpcache();
// Check if the names of classes, field/method names and signatures at these indexes
// are the same. Indices which are really into constantpool cache (rather than constant
// pool itself) are accepted by the constantpool query routines below.
@@ -162,14 +163,10 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
case Bytecodes::_ldc : // fall through
case Bytecodes::_ldc_w : {
- u2 cpi_old, cpi_new;
- if (c_old == Bytecodes::_ldc) {
- cpi_old = _s_old->bcp()[1];
- cpi_new = _s_new->bcp()[1];
- } else {
- cpi_old = _s_old->get_index_big();
- cpi_new = _s_new->get_index_big();
- }
+ Bytecode_loadconstant* ldc_old = Bytecode_loadconstant_at(_s_old->method()(), _s_old->bcp());
+ Bytecode_loadconstant* ldc_new = Bytecode_loadconstant_at(_s_new->method()(), _s_new->bcp());
+ int cpi_old = ldc_old->index();
+ int cpi_new = ldc_new->index();
constantTag tag_old = _old_cp->tag_at(cpi_old);
constantTag tag_new = _new_cp->tag_at(cpi_new);
if (tag_old.is_int() || tag_old.is_float()) {
@@ -179,7 +176,9 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
if (_old_cp->int_at(cpi_old) != _new_cp->int_at(cpi_new))
return false;
} else {
- if (_old_cp->float_at(cpi_old) != _new_cp->float_at(cpi_new))
+ // Use jint_cast to compare the bits rather than numerical values.
+ // This makes a difference for NaN constants.
+ if (jint_cast(_old_cp->float_at(cpi_old)) != jint_cast(_new_cp->float_at(cpi_new)))
return false;
}
} else if (tag_old.is_string() || tag_old.is_unresolved_string()) {
@@ -199,8 +198,8 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
}
case Bytecodes::_ldc2_w : {
- u2 cpi_old = _s_old->get_index_big();
- u2 cpi_new = _s_new->get_index_big();
+ u2 cpi_old = _s_old->get_index_u2();
+ u2 cpi_new = _s_new->get_index_u2();
constantTag tag_old = _old_cp->tag_at(cpi_old);
constantTag tag_new = _new_cp->tag_at(cpi_new);
if (tag_old.value() != tag_new.value())
@@ -209,7 +208,9 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
if (_old_cp->long_at(cpi_old) != _new_cp->long_at(cpi_new))
return false;
} else {
- if (_old_cp->double_at(cpi_old) != _new_cp->double_at(cpi_new))
+ // Use jlong_cast to compare the bits rather than numerical values.
+ // This makes a difference for NaN constants.
+ if (jlong_cast(_old_cp->double_at(cpi_old)) != jlong_cast(_new_cp->double_at(cpi_new)))
return false;
}
break;
@@ -221,7 +222,7 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
break;
case Bytecodes::_sipush :
- if (_s_old->get_index_big() != _s_new->get_index_big())
+ if (_s_old->get_index_u2() != _s_new->get_index_u2())
return false;
break;
@@ -260,8 +261,8 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
case Bytecodes::_ifnonnull : // fall through
case Bytecodes::_ifnull : // fall through
case Bytecodes::_jsr : {
- short old_ofs = (short) _s_old->get_index_big();
- short new_ofs = (short) _s_new->get_index_big();
+ int old_ofs = _s_old->bytecode()->get_offset_s2(c_old);
+ int new_ofs = _s_new->bytecode()->get_offset_s2(c_new);
if (_switchable_test) {
int old_dest = _s_old->bci() + old_ofs;
int new_dest = _s_new->bci() + new_ofs;
@@ -285,9 +286,11 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
if (_s_old->is_wide() != _s_new->is_wide())
return false;
if (! _s_old->is_wide()) {
- if (_s_old->get_index_big() != _s_new->get_index_big())
+ // We could use get_index_u1 and get_constant_u1, but it's simpler to grab both bytes at once:
+ if (Bytes::get_Java_u2(_s_old->bcp() + 1) != Bytes::get_Java_u2(_s_new->bcp() + 1))
return false;
} else {
+ // We could use get_index_u2 and get_constant_u2, but it's simpler to grab all four bytes at once:
if (Bytes::get_Java_u4(_s_old->bcp() + 1) != Bytes::get_Java_u4(_s_new->bcp() + 1))
return false;
}
@@ -295,8 +298,8 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
case Bytecodes::_goto_w : // fall through
case Bytecodes::_jsr_w : {
- int old_ofs = (int) Bytes::get_Java_u4(_s_old->bcp() + 1);
- int new_ofs = (int) Bytes::get_Java_u4(_s_new->bcp() + 1);
+ int old_ofs = _s_old->bytecode()->get_offset_s4(c_old);
+ int new_ofs = _s_new->bytecode()->get_offset_s4(c_new);
if (_switchable_test) {
int old_dest = _s_old->bci() + old_ofs;
int new_dest = _s_new->bci() + new_ofs;
@@ -357,8 +360,8 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
}
}
} else { // !_switchable_test, can use fast rough compare
- int len_old = _s_old->next_bcp() - _s_old->bcp();
- int len_new = _s_new->next_bcp() - _s_new->bcp();
+ int len_old = _s_old->instruction_size();
+ int len_new = _s_new->instruction_size();
if (len_old != len_new)
return false;
if (memcmp(_s_old->bcp(), _s_new->bcp(), len_old) != 0)
diff --git a/src/share/vm/prims/methodHandleWalk.cpp b/src/share/vm/prims/methodHandleWalk.cpp
index d4f9ab3b0..f41f63a16 100644
--- a/src/share/vm/prims/methodHandleWalk.cpp
+++ b/src/share/vm/prims/methodHandleWalk.cpp
@@ -732,7 +732,7 @@ void MethodHandleCompiler::emit_bc(Bytecodes::Code op, int index) {
case Bytecodes::_dreturn:
case Bytecodes::_areturn:
case Bytecodes::_return:
- assert(strcmp(Bytecodes::format(op), "b") == 0, "wrong bytecode format");
+ assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_b, "wrong bytecode format");
_bytecode.push(op);
break;
@@ -748,7 +748,7 @@ void MethodHandleCompiler::emit_bc(Bytecodes::Code op, int index) {
case Bytecodes::_fstore:
case Bytecodes::_dstore:
case Bytecodes::_astore:
- assert(strcmp(Bytecodes::format(op), "bi") == 0, "wrong bytecode format");
+ assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bi, "wrong bytecode format");
assert((char) index == index, "index does not fit in 8-bit");
_bytecode.push(op);
_bytecode.push(index);
@@ -757,18 +757,18 @@ void MethodHandleCompiler::emit_bc(Bytecodes::Code op, int index) {
// bii
case Bytecodes::_ldc2_w:
case Bytecodes::_checkcast:
- assert(strcmp(Bytecodes::format(op), "bii") == 0, "wrong bytecode format");
+ assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bkk, "wrong bytecode format");
assert((short) index == index, "index does not fit in 16-bit");
_bytecode.push(op);
_bytecode.push(index >> 8);
_bytecode.push(index);
break;
- // bjj
+ // bJJ
case Bytecodes::_invokestatic:
case Bytecodes::_invokespecial:
case Bytecodes::_invokevirtual:
- assert(strcmp(Bytecodes::format(op), "bjj") == 0, "wrong bytecode format");
+ assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bJJ, "wrong bytecode format");
assert((short) index == index, "index does not fit in 16-bit");
_bytecode.push(op);
_bytecode.push(index >> 8);
diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp
index 6f42fb8ef..4d2f6762e 100644
--- a/src/share/vm/runtime/arguments.cpp
+++ b/src/share/vm/runtime/arguments.cpp
@@ -1211,8 +1211,44 @@ void Arguments::set_cms_and_parnew_gc_flags() {
}
#endif // KERNEL
+void set_object_alignment() {
+ // Object alignment.
+ assert(is_power_of_2(ObjectAlignmentInBytes), "ObjectAlignmentInBytes must be power of 2");
+ MinObjAlignmentInBytes = ObjectAlignmentInBytes;
+ assert(MinObjAlignmentInBytes >= HeapWordsPerLong * HeapWordSize, "ObjectAlignmentInBytes value is too small");
+ MinObjAlignment = MinObjAlignmentInBytes / HeapWordSize;
+ assert(MinObjAlignmentInBytes == MinObjAlignment * HeapWordSize, "ObjectAlignmentInBytes value is incorrect");
+ MinObjAlignmentInBytesMask = MinObjAlignmentInBytes - 1;
+
+ LogMinObjAlignmentInBytes = exact_log2(ObjectAlignmentInBytes);
+ LogMinObjAlignment = LogMinObjAlignmentInBytes - LogHeapWordSize;
+
+ // Oop encoding heap max
+ OopEncodingHeapMax = (uint64_t(max_juint) + 1) << LogMinObjAlignmentInBytes;
+
+#ifndef KERNEL
+ // Set CMS global values
+ CompactibleFreeListSpace::set_cms_values();
+#endif // KERNEL
+}
+
+bool verify_object_alignment() {
+ // Object alignment.
+ if (!is_power_of_2(ObjectAlignmentInBytes)) {
+ jio_fprintf(defaultStream::error_stream(),
+ "error: ObjectAlignmentInBytes=%d must be power of 2", (int)ObjectAlignmentInBytes);
+ return false;
+ }
+ if ((int)ObjectAlignmentInBytes < BytesPerLong) {
+ jio_fprintf(defaultStream::error_stream(),
+ "error: ObjectAlignmentInBytes=%d must be greater or equal %d", (int)ObjectAlignmentInBytes, BytesPerLong);
+ return false;
+ }
+ return true;
+}
+
inline uintx max_heap_for_compressed_oops() {
- LP64_ONLY(return oopDesc::OopEncodingHeapMax - MaxPermSize - os::vm_page_size());
+ LP64_ONLY(return OopEncodingHeapMax - MaxPermSize - os::vm_page_size());
NOT_LP64(ShouldNotReachHere(); return 0);
}
@@ -1776,6 +1812,8 @@ bool Arguments::check_vm_args_consistency() {
status = status && verify_interval(TLABWasteTargetPercent,
1, 100, "TLABWasteTargetPercent");
+ status = status && verify_object_alignment();
+
return status;
}
@@ -2848,6 +2886,9 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
UseCompressedOops = false;
#endif
+ // Set object alignment values.
+ set_object_alignment();
+
#ifdef SERIALGC
force_serial_gc();
#endif // SERIALGC
diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp
index f6726a929..0a701e534 100644
--- a/src/share/vm/runtime/globals.hpp
+++ b/src/share/vm/runtime/globals.hpp
@@ -321,6 +321,9 @@ class CommandLineFlags {
diagnostic(bool, PrintCompressedOopsMode, false, \
"Print compressed oops base address and encoding mode") \
\
+ lp64_product(intx, ObjectAlignmentInBytes, 8, \
+ "Default object alignment in bytes, 8 is minimum") \
+ \
/* UseMembar is theoretically a temp flag used for memory barrier \
* removal testing. It was supposed to be removed before FCS but has \
* been re-added (see 6401008) */ \
@@ -1117,6 +1120,9 @@ class CommandLineFlags {
product(intx, TraceRedefineClasses, 0, \
"Trace level for JVMTI RedefineClasses") \
\
+ develop(bool, StressMethodComparator, false, \
+ "run the MethodComparator on all loaded methods") \
+ \
/* change to false by default sometime after Mustang */ \
product(bool, VerifyMergedCPBytecodes, true, \
"Verify bytecodes after RedefineClasses constant pool merging") \
diff --git a/src/share/vm/runtime/sharedRuntime.cpp b/src/share/vm/runtime/sharedRuntime.cpp
index 676ad8614..489063a4b 100644
--- a/src/share/vm/runtime/sharedRuntime.cpp
+++ b/src/share/vm/runtime/sharedRuntime.cpp
@@ -1435,7 +1435,7 @@ IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, addr
// for the rest of its life! Just another racing bug in the life of
// fixup_callers_callsite ...
//
- RelocIterator iter(cb, call->instruction_address(), call->next_instruction_address());
+ RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
iter.next();
assert(iter.has_current(), "must have a reloc at java call site");
relocInfo::relocType typ = iter.reloc()->type();
@@ -2055,11 +2055,11 @@ class AdapterHandlerTableIterator : public StackObj {
void scan() {
while (_index < _table->table_size()) {
AdapterHandlerEntry* a = _table->bucket(_index);
+ _index++;
if (a != NULL) {
_current = a;
return;
}
- _index++;
}
}
diff --git a/src/share/vm/runtime/vmStructs.cpp b/src/share/vm/runtime/vmStructs.cpp
index cae43b319..05ccdb991 100644
--- a/src/share/vm/runtime/vmStructs.cpp
+++ b/src/share/vm/runtime/vmStructs.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -607,8 +607,6 @@ static inline uint64_t cast_uint64_t(size_t x)
nonstatic_field(CodeBlob, _instructions_offset, int) \
nonstatic_field(CodeBlob, _frame_complete_offset, int) \
nonstatic_field(CodeBlob, _data_offset, int) \
- nonstatic_field(CodeBlob, _oops_offset, int) \
- nonstatic_field(CodeBlob, _oops_length, int) \
nonstatic_field(CodeBlob, _frame_size, int) \
nonstatic_field(CodeBlob, _oop_maps, OopMapSet*) \
\
@@ -626,6 +624,8 @@ static inline uint64_t cast_uint64_t(size_t x)
nonstatic_field(nmethod, _deoptimize_offset, int) \
nonstatic_field(nmethod, _orig_pc_offset, int) \
nonstatic_field(nmethod, _stub_offset, int) \
+ nonstatic_field(nmethod, _consts_offset, int) \
+ nonstatic_field(nmethod, _oops_offset, int) \
nonstatic_field(nmethod, _scopes_data_offset, int) \
nonstatic_field(nmethod, _scopes_pcs_offset, int) \
nonstatic_field(nmethod, _dependencies_offset, int) \
@@ -1328,14 +1328,6 @@ static inline uint64_t cast_uint64_t(size_t x)
declare_constant(LogBytesPerWord) \
declare_constant(BytesPerLong) \
\
- /********************/ \
- /* Object alignment */ \
- /********************/ \
- \
- declare_constant(MinObjAlignment) \
- declare_constant(MinObjAlignmentInBytes) \
- declare_constant(LogMinObjAlignmentInBytes) \
- \
/********************************************/ \
/* Generation and Space Hierarchy Constants */ \
/********************************************/ \
diff --git a/src/share/vm/utilities/copy.hpp b/src/share/vm/utilities/copy.hpp
index ce26c86bf..99966163f 100644
--- a/src/share/vm/utilities/copy.hpp
+++ b/src/share/vm/utilities/copy.hpp
@@ -51,7 +51,7 @@ extern "C" {
class Copy : AllStatic {
public:
// Block copy methods have four attributes. We don't define all possibilities.
- // alignment: aligned according to minimum Java object alignment (MinObjAlignment)
+ // alignment: aligned to BytesPerLong
// arrayof: arraycopy operation with both operands aligned on the same
// boundary as the first element of an array of the copy unit.
// This is currently a HeapWord boundary on all platforms, except
@@ -70,7 +70,7 @@ class Copy : AllStatic {
// [ '_atomic' ]
//
// Except in the arrayof case, whatever the alignment is, we assume we can copy
- // whole alignment units. E.g., if MinObjAlignment is 2x word alignment, an odd
+ // whole alignment units. E.g., if BytesPerLong is 2x word alignment, an odd
// count may copy an extra word. In the arrayof case, we are allowed to copy
// only the number of copy units specified.
@@ -305,17 +305,17 @@ class Copy : AllStatic {
}
static void assert_params_aligned(HeapWord* from, HeapWord* to) {
#ifdef ASSERT
- if (mask_bits((uintptr_t)from, MinObjAlignmentInBytes-1) != 0)
- basic_fatal("not object aligned");
- if (mask_bits((uintptr_t)to, MinObjAlignmentInBytes-1) != 0)
- basic_fatal("not object aligned");
+ if (mask_bits((uintptr_t)from, BytesPerLong-1) != 0)
+ basic_fatal("not long aligned");
+ if (mask_bits((uintptr_t)to, BytesPerLong-1) != 0)
+ basic_fatal("not long aligned");
#endif
}
static void assert_params_aligned(HeapWord* to) {
#ifdef ASSERT
- if (mask_bits((uintptr_t)to, MinObjAlignmentInBytes-1) != 0)
- basic_fatal("not object aligned");
+ if (mask_bits((uintptr_t)to, BytesPerLong-1) != 0)
+ basic_fatal("not long aligned");
#endif
}
diff --git a/src/share/vm/utilities/globalDefinitions.cpp b/src/share/vm/utilities/globalDefinitions.cpp
index d25f56613..1f03a88fd 100644
--- a/src/share/vm/utilities/globalDefinitions.cpp
+++ b/src/share/vm/utilities/globalDefinitions.cpp
@@ -34,6 +34,18 @@ int LogBitsPerHeapOop = 0;
int BytesPerHeapOop = 0;
int BitsPerHeapOop = 0;
+// Object alignment, in units of HeapWords.
+// Defaults are -1 so things will break badly if incorrectly initialized.
+int MinObjAlignment = -1;
+int MinObjAlignmentInBytes = -1;
+int MinObjAlignmentInBytesMask = 0;
+
+int LogMinObjAlignment = -1;
+int LogMinObjAlignmentInBytes = -1;
+
+// Oop encoding heap max
+uint64_t OopEncodingHeapMax = 0;
+
void basic_fatal(const char* msg) {
fatal(msg);
}
diff --git a/src/share/vm/utilities/globalDefinitions.hpp b/src/share/vm/utilities/globalDefinitions.hpp
index 291c42dd5..a5a8ae403 100644
--- a/src/share/vm/utilities/globalDefinitions.hpp
+++ b/src/share/vm/utilities/globalDefinitions.hpp
@@ -73,6 +73,9 @@ extern int LogBitsPerHeapOop;
extern int BytesPerHeapOop;
extern int BitsPerHeapOop;
+// Oop encoding heap max
+extern uint64_t OopEncodingHeapMax;
+
const int BitsPerJavaInteger = 32;
const int BitsPerJavaLong = 64;
const int BitsPerSize_t = size_tSize * BitsPerByte;
@@ -292,12 +295,12 @@ const int max_method_code_size = 64*K - 1; // JVM spec, 2nd ed. section 4.8.1 (
// Minimum is max(BytesPerLong, BytesPerDouble, BytesPerOop) / HeapWordSize, so jlong, jdouble and
// reference fields can be naturally aligned.
-const int MinObjAlignment = HeapWordsPerLong;
-const int MinObjAlignmentInBytes = MinObjAlignment * HeapWordSize;
-const int MinObjAlignmentInBytesMask = MinObjAlignmentInBytes - 1;
+extern int MinObjAlignment;
+extern int MinObjAlignmentInBytes;
+extern int MinObjAlignmentInBytesMask;
-const int LogMinObjAlignment = LogHeapWordsPerLong;
-const int LogMinObjAlignmentInBytes = LogMinObjAlignment + LogHeapWordSize;
+extern int LogMinObjAlignment;
+extern int LogMinObjAlignmentInBytes;
// Machine dependent stuff
@@ -332,18 +335,16 @@ inline intptr_t align_object_size(intptr_t size) {
return align_size_up(size, MinObjAlignment);
}
-// Pad out certain offsets to jlong alignment, in HeapWord units.
+inline bool is_object_aligned(intptr_t addr) {
+ return addr == align_object_size(addr);
+}
-#define align_object_offset_(offset) align_size_up_(offset, HeapWordsPerLong)
+// Pad out certain offsets to jlong alignment, in HeapWord units.
inline intptr_t align_object_offset(intptr_t offset) {
return align_size_up(offset, HeapWordsPerLong);
}
-inline bool is_object_aligned(intptr_t offset) {
- return offset == align_object_offset(offset);
-}
-
//----------------------------------------------------------------------------------------------------
// Utility macros for compilers