aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorkvn <none@none>2014-02-25 15:11:18 -0800
committerkvn <none@none>2014-02-25 15:11:18 -0800
commit9c0308b86746f885f1e6f31e9b474b0b7b550700 (patch)
tree77e60af7c17331350a7114b4440b6a8e3303f52f /src
parenta2bcf5591c2afbec1a941b7b4de582de157aafed (diff)
parent86005d48ffb5057779421bbe655f52eed21d974a (diff)
Merge
Diffstat (limited to 'src')
-rw-r--r--src/cpu/sparc/vm/assembler_sparc.hpp118
-rw-r--r--src/cpu/sparc/vm/sparc.ad6
-rw-r--r--src/cpu/sparc/vm/stubGenerator_sparc.cpp777
-rw-r--r--src/cpu/sparc/vm/vm_version_sparc.cpp38
-rw-r--r--src/cpu/sparc/vm/vm_version_sparc.hpp5
-rw-r--r--src/cpu/x86/vm/stubGenerator_x86_32.cpp10
-rw-r--r--src/cpu/x86/vm/stubGenerator_x86_64.cpp23
-rw-r--r--src/cpu/x86/vm/x86.ad6
-rw-r--r--src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp5
-rw-r--r--src/share/vm/classfile/classLoaderData.cpp30
-rw-r--r--src/share/vm/classfile/classLoaderData.hpp7
-rw-r--r--src/share/vm/classfile/dictionary.cpp4
-rw-r--r--src/share/vm/classfile/systemDictionary.cpp19
-rw-r--r--src/share/vm/classfile/systemDictionary.hpp6
-rw-r--r--src/share/vm/classfile/vmSymbols.hpp2
-rw-r--r--src/share/vm/code/dependencies.cpp2
-rw-r--r--src/share/vm/code/vtableStubs.cpp24
-rw-r--r--src/share/vm/code/vtableStubs.hpp1
-rw-r--r--src/share/vm/interpreter/linkResolver.cpp11
-rw-r--r--src/share/vm/interpreter/rewriter.cpp43
-rw-r--r--src/share/vm/interpreter/rewriter.hpp5
-rw-r--r--src/share/vm/memory/allocation.cpp3
-rw-r--r--src/share/vm/memory/allocation.hpp2
-rw-r--r--src/share/vm/memory/metachunk.hpp2
-rw-r--r--src/share/vm/memory/metaspace.cpp57
-rw-r--r--src/share/vm/memory/metaspace.hpp2
-rw-r--r--src/share/vm/oops/arrayKlass.cpp6
-rw-r--r--src/share/vm/oops/arrayKlass.hpp4
-rw-r--r--src/share/vm/oops/constantPool.cpp3
-rw-r--r--src/share/vm/oops/instanceKlass.cpp15
-rw-r--r--src/share/vm/oops/instanceKlass.hpp2
-rw-r--r--src/share/vm/oops/klass.cpp6
-rw-r--r--src/share/vm/oops/klass.hpp6
-rw-r--r--src/share/vm/oops/objArrayKlass.cpp6
-rw-r--r--src/share/vm/oops/objArrayKlass.hpp4
-rw-r--r--src/share/vm/opto/bytecodeInfo.cpp88
-rw-r--r--src/share/vm/opto/c2_globals.hpp3
-rw-r--r--src/share/vm/opto/callGenerator.cpp4
-rw-r--r--src/share/vm/opto/cfgnode.cpp8
-rw-r--r--src/share/vm/opto/compile.cpp46
-rw-r--r--src/share/vm/opto/connode.cpp8
-rw-r--r--src/share/vm/opto/connode.hpp2
-rw-r--r--src/share/vm/opto/doCall.cpp13
-rw-r--r--src/share/vm/opto/graphKit.cpp8
-rw-r--r--src/share/vm/opto/library_call.cpp59
-rw-r--r--src/share/vm/opto/loopopts.cpp8
-rw-r--r--src/share/vm/opto/matcher.hpp3
-rw-r--r--src/share/vm/opto/memnode.cpp4
-rw-r--r--src/share/vm/opto/multnode.cpp2
-rw-r--r--src/share/vm/opto/node.cpp4
-rw-r--r--src/share/vm/opto/parse1.cpp4
-rw-r--r--src/share/vm/opto/parse2.cpp10
-rw-r--r--src/share/vm/opto/parse3.cpp2
-rw-r--r--src/share/vm/opto/phaseX.cpp23
-rw-r--r--src/share/vm/opto/phaseX.hpp6
-rw-r--r--src/share/vm/opto/runtime.cpp18
-rw-r--r--src/share/vm/opto/type.cpp125
-rw-r--r--src/share/vm/opto/type.hpp89
-rw-r--r--src/share/vm/prims/jvmtiCodeBlobEvents.cpp17
-rw-r--r--src/share/vm/runtime/arguments.cpp3
-rw-r--r--src/share/vm/runtime/globals.cpp33
-rw-r--r--src/share/vm/runtime/globals.hpp7
-rw-r--r--src/share/vm/runtime/os.cpp2
-rw-r--r--src/share/vm/utilities/array.hpp4
-rw-r--r--src/share/vm/utilities/bitMap.cpp2
65 files changed, 1448 insertions, 417 deletions
diff --git a/src/cpu/sparc/vm/assembler_sparc.hpp b/src/cpu/sparc/vm/assembler_sparc.hpp
index ccbc43e5a..11547cde9 100644
--- a/src/cpu/sparc/vm/assembler_sparc.hpp
+++ b/src/cpu/sparc/vm/assembler_sparc.hpp
@@ -88,6 +88,7 @@ class Assembler : public AbstractAssembler {
orncc_op3 = 0x16,
xnorcc_op3 = 0x17,
addccc_op3 = 0x18,
+ aes4_op3 = 0x19,
umulcc_op3 = 0x1a,
smulcc_op3 = 0x1b,
subccc_op3 = 0x1c,
@@ -121,6 +122,8 @@ class Assembler : public AbstractAssembler {
fpop1_op3 = 0x34,
fpop2_op3 = 0x35,
impdep1_op3 = 0x36,
+ aes3_op3 = 0x36,
+ flog3_op3 = 0x36,
impdep2_op3 = 0x37,
jmpl_op3 = 0x38,
rett_op3 = 0x39,
@@ -172,41 +175,56 @@ class Assembler : public AbstractAssembler {
enum opfs {
// selected opfs
- fmovs_opf = 0x01,
- fmovd_opf = 0x02,
-
- fnegs_opf = 0x05,
- fnegd_opf = 0x06,
-
- fadds_opf = 0x41,
- faddd_opf = 0x42,
- fsubs_opf = 0x45,
- fsubd_opf = 0x46,
-
- fmuls_opf = 0x49,
- fmuld_opf = 0x4a,
- fdivs_opf = 0x4d,
- fdivd_opf = 0x4e,
-
- fcmps_opf = 0x51,
- fcmpd_opf = 0x52,
-
- fstox_opf = 0x81,
- fdtox_opf = 0x82,
- fxtos_opf = 0x84,
- fxtod_opf = 0x88,
- fitos_opf = 0xc4,
- fdtos_opf = 0xc6,
- fitod_opf = 0xc8,
- fstod_opf = 0xc9,
- fstoi_opf = 0xd1,
- fdtoi_opf = 0xd2,
-
- mdtox_opf = 0x110,
- mstouw_opf = 0x111,
- mstosw_opf = 0x113,
- mxtod_opf = 0x118,
- mwtos_opf = 0x119
+ fmovs_opf = 0x01,
+ fmovd_opf = 0x02,
+
+ fnegs_opf = 0x05,
+ fnegd_opf = 0x06,
+
+ fadds_opf = 0x41,
+ faddd_opf = 0x42,
+ fsubs_opf = 0x45,
+ fsubd_opf = 0x46,
+
+ fmuls_opf = 0x49,
+ fmuld_opf = 0x4a,
+ fdivs_opf = 0x4d,
+ fdivd_opf = 0x4e,
+
+ fcmps_opf = 0x51,
+ fcmpd_opf = 0x52,
+
+ fstox_opf = 0x81,
+ fdtox_opf = 0x82,
+ fxtos_opf = 0x84,
+ fxtod_opf = 0x88,
+ fitos_opf = 0xc4,
+ fdtos_opf = 0xc6,
+ fitod_opf = 0xc8,
+ fstod_opf = 0xc9,
+ fstoi_opf = 0xd1,
+ fdtoi_opf = 0xd2,
+
+ mdtox_opf = 0x110,
+ mstouw_opf = 0x111,
+ mstosw_opf = 0x113,
+ mxtod_opf = 0x118,
+ mwtos_opf = 0x119,
+
+ aes_kexpand0_opf = 0x130,
+ aes_kexpand2_opf = 0x131
+ };
+
+ enum op5s {
+ aes_eround01_op5 = 0x00,
+ aes_eround23_op5 = 0x01,
+ aes_dround01_op5 = 0x02,
+ aes_dround23_op5 = 0x03,
+ aes_eround01_l_op5 = 0x04,
+ aes_eround23_l_op5 = 0x05,
+ aes_dround01_l_op5 = 0x06,
+ aes_dround23_l_op5 = 0x07,
+ aes_kexpand1_op5 = 0x08
};
enum RCondition { rc_z = 1, rc_lez = 2, rc_lz = 3, rc_nz = 5, rc_gz = 6, rc_gez = 7, rc_last = rc_gez };
@@ -427,6 +445,7 @@ class Assembler : public AbstractAssembler {
static int immed( bool i) { return u_field(i ? 1 : 0, 13, 13); }
static int opf_low6( int w) { return u_field(w, 10, 5); }
static int opf_low5( int w) { return u_field(w, 9, 5); }
+ static int op5( int x) { return u_field(x, 8, 5); }
static int trapcc( CC cc) { return u_field(cc, 12, 11); }
static int sx( int i) { return u_field(i, 12, 12); } // shift x=1 means 64-bit
static int opf( int x) { return u_field(x, 13, 5); }
@@ -451,6 +470,7 @@ class Assembler : public AbstractAssembler {
static int fd( FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 29, 25); };
static int fs1(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 18, 14); };
static int fs2(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 4, 0); };
+ static int fs3(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 13, 9); };
// some float instructions use this encoding on the op3 field
static int alt_op3(int op, FloatRegisterImpl::Width w) {
@@ -559,6 +579,12 @@ class Assembler : public AbstractAssembler {
return x & ((1 << 10) - 1);
}
+ // AES crypto instructions supported only on certain processors
+ static void aes_only() { assert( VM_Version::has_aes(), "This instruction only works on SPARC with AES instructions support"); }
+
+ // instruction only in VIS1
+ static void vis1_only() { assert( VM_Version::has_vis1(), "This instruction only works on SPARC with VIS1"); }
+
// instruction only in VIS3
static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); }
@@ -682,6 +708,24 @@ public:
void addccc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
+ // 4-operand AES instructions
+
+ void aes_eround01( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround01_op5) | fs2(s2, FloatRegisterImpl::D) ); }
+ void aes_eround23( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround23_op5) | fs2(s2, FloatRegisterImpl::D) ); }
+ void aes_dround01( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround01_op5) | fs2(s2, FloatRegisterImpl::D) ); }
+ void aes_dround23( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround23_op5) | fs2(s2, FloatRegisterImpl::D) ); }
+ void aes_eround01_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround01_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
+ void aes_eround23_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround23_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
+ void aes_dround01_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround01_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
+ void aes_dround23_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround23_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
+ void aes_kexpand1( FloatRegister s1, FloatRegister s2, int imm5a, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | u_field(imm5a, 13, 9) | op5(aes_kexpand1_op5) | fs2(s2, FloatRegisterImpl::D) ); }
+
+
+ // 3-operand AES instructions
+
+ void aes_kexpand0( FloatRegister s1, FloatRegister s2, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes3_op3) | fs1(s1, FloatRegisterImpl::D) | opf(aes_kexpand0_opf) | fs2(s2, FloatRegisterImpl::D) ); }
+ void aes_kexpand2( FloatRegister s1, FloatRegister s2, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes3_op3) | fs1(s1, FloatRegisterImpl::D) | opf(aes_kexpand2_opf) | fs2(s2, FloatRegisterImpl::D) ); }
+
// pp 136
inline void bpr(RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt = relocInfo::none);
@@ -784,6 +828,10 @@ public:
void fmul( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, dw) | op3(fpop1_op3) | fs1(s1, sw) | opf(0x60 + sw + dw*4) | fs2(s2, sw)); }
void fdiv( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x4c + w) | fs2(s2, w)); }
+ // FXORs/FXORd instructions
+
+ void fxor( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(flog3_op3) | fs1(s1, w) | opf(0x6E - w) | fs2(s2, w)); }
+
// pp 164
void fsqrt( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x28 + w) | fs2(s, w)); }
diff --git a/src/cpu/sparc/vm/sparc.ad b/src/cpu/sparc/vm/sparc.ad
index ef1ccf1c0..3a470b0f8 100644
--- a/src/cpu/sparc/vm/sparc.ad
+++ b/src/cpu/sparc/vm/sparc.ad
@@ -1853,6 +1853,12 @@ const bool Matcher::misaligned_vectors_ok() {
return false;
}
+// Current (2013) SPARC platforms need to read original key
+// to construct decryption expanded key
+const bool Matcher::pass_original_key_for_aes() {
+ return true;
+}
+
// USII supports fxtof through the whole range of number, USIII doesn't
const bool Matcher::convL2FSupported(void) {
return VM_Version::has_fast_fxtof();
diff --git a/src/cpu/sparc/vm/stubGenerator_sparc.cpp b/src/cpu/sparc/vm/stubGenerator_sparc.cpp
index b99118281..25023404d 100644
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp
@@ -3304,6 +3304,775 @@ class StubGenerator: public StubCodeGenerator {
}
}
+ address generate_aescrypt_encryptBlock() {
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", "aesencryptBlock");
+ Label L_doLast128bit, L_storeOutput;
+ address start = __ pc();
+ Register from = O0; // source byte array
+ Register to = O1; // destination byte array
+ Register key = O2; // expanded key array
+ const Register keylen = O4; //reg for storing expanded key array length
+
+ // read expanded key length
+ __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
+
+ // load input into F54-F56; F30-F31 used as temp
+ __ ldf(FloatRegisterImpl::S, from, 0, F30);
+ __ ldf(FloatRegisterImpl::S, from, 4, F31);
+ __ fmov(FloatRegisterImpl::D, F30, F54);
+ __ ldf(FloatRegisterImpl::S, from, 8, F30);
+ __ ldf(FloatRegisterImpl::S, from, 12, F31);
+ __ fmov(FloatRegisterImpl::D, F30, F56);
+
+ // load expanded key
+ for ( int i = 0; i <= 38; i += 2 ) {
+ __ ldf(FloatRegisterImpl::D, key, i*4, as_FloatRegister(i));
+ }
+
+ // perform cipher transformation
+ __ fxor(FloatRegisterImpl::D, F0, F54, F54);
+ __ fxor(FloatRegisterImpl::D, F2, F56, F56);
+ // rounds 1 through 8
+ for ( int i = 4; i <= 28; i += 8 ) {
+ __ aes_eround01(as_FloatRegister(i), F54, F56, F58);
+ __ aes_eround23(as_FloatRegister(i+2), F54, F56, F60);
+ __ aes_eround01(as_FloatRegister(i+4), F58, F60, F54);
+ __ aes_eround23(as_FloatRegister(i+6), F58, F60, F56);
+ }
+ __ aes_eround01(F36, F54, F56, F58); //round 9
+ __ aes_eround23(F38, F54, F56, F60);
+
+ // 128-bit original key size
+ __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pt, L_doLast128bit);
+
+ for ( int i = 40; i <= 50; i += 2 ) {
+ __ ldf(FloatRegisterImpl::D, key, i*4, as_FloatRegister(i) );
+ }
+ __ aes_eround01(F40, F58, F60, F54); //round 10
+ __ aes_eround23(F42, F58, F60, F56);
+ __ aes_eround01(F44, F54, F56, F58); //round 11
+ __ aes_eround23(F46, F54, F56, F60);
+
+ // 192-bit original key size
+ __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pt, L_storeOutput);
+
+ __ ldf(FloatRegisterImpl::D, key, 208, F52);
+ __ aes_eround01(F48, F58, F60, F54); //round 12
+ __ aes_eround23(F50, F58, F60, F56);
+ __ ldf(FloatRegisterImpl::D, key, 216, F46);
+ __ ldf(FloatRegisterImpl::D, key, 224, F48);
+ __ ldf(FloatRegisterImpl::D, key, 232, F50);
+ __ aes_eround01(F52, F54, F56, F58); //round 13
+ __ aes_eround23(F46, F54, F56, F60);
+ __ br(Assembler::always, false, Assembler::pt, L_storeOutput);
+ __ delayed()->nop();
+
+ __ BIND(L_doLast128bit);
+ __ ldf(FloatRegisterImpl::D, key, 160, F48);
+ __ ldf(FloatRegisterImpl::D, key, 168, F50);
+
+ __ BIND(L_storeOutput);
+ // perform last round of encryption common for all key sizes
+ __ aes_eround01_l(F48, F58, F60, F54); //last round
+ __ aes_eround23_l(F50, F58, F60, F56);
+
+ // store output into the destination array, F0-F1 used as temp
+ __ fmov(FloatRegisterImpl::D, F54, F0);
+ __ stf(FloatRegisterImpl::S, F0, to, 0);
+ __ stf(FloatRegisterImpl::S, F1, to, 4);
+ __ fmov(FloatRegisterImpl::D, F56, F0);
+ __ stf(FloatRegisterImpl::S, F0, to, 8);
+ __ retl();
+ __ delayed()->stf(FloatRegisterImpl::S, F1, to, 12);
+
+ return start;
+ }
+
+ address generate_aescrypt_decryptBlock() {
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", "aesdecryptBlock");
+ address start = __ pc();
+ Label L_expand192bit, L_expand256bit, L_common_transform;
+ Register from = O0; // source byte array
+ Register to = O1; // destination byte array
+ Register key = O2; // expanded key array
+ Register original_key = O3; // original key array only required during decryption
+ const Register keylen = O4; // reg for storing expanded key array length
+
+ // read expanded key array length
+ __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
+
+ // load input into F52-F54; F30,F31 used as temp
+ __ ldf(FloatRegisterImpl::S, from, 0, F30);
+ __ ldf(FloatRegisterImpl::S, from, 4, F31);
+ __ fmov(FloatRegisterImpl::D, F30, F52);
+ __ ldf(FloatRegisterImpl::S, from, 8, F30);
+ __ ldf(FloatRegisterImpl::S, from, 12, F31);
+ __ fmov(FloatRegisterImpl::D, F30, F54);
+
+ // load original key from SunJCE expanded decryption key
+ for ( int i = 0; i <= 3; i++ ) {
+ __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
+ }
+
+ // 256-bit original key size
+ __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_expand256bit);
+
+ // 192-bit original key size
+ __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_expand192bit);
+
+ // 128-bit original key size
+ // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
+ for ( int i = 0; i <= 36; i += 4 ) {
+ __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+2), i/4, as_FloatRegister(i+4));
+ __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+4), as_FloatRegister(i+6));
+ }
+
+ // perform 128-bit key specific inverse cipher transformation
+ __ fxor(FloatRegisterImpl::D, F42, F54, F54);
+ __ fxor(FloatRegisterImpl::D, F40, F52, F52);
+ __ br(Assembler::always, false, Assembler::pt, L_common_transform);
+ __ delayed()->nop();
+
+ __ BIND(L_expand192bit);
+
+ // start loading rest of the 192-bit key
+ __ ldf(FloatRegisterImpl::S, original_key, 16, F4);
+ __ ldf(FloatRegisterImpl::S, original_key, 20, F5);
+
+ // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
+ for ( int i = 0; i <= 36; i += 6 ) {
+ __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+4), i/6, as_FloatRegister(i+6));
+ __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+6), as_FloatRegister(i+8));
+ __ aes_kexpand2(as_FloatRegister(i+4), as_FloatRegister(i+8), as_FloatRegister(i+10));
+ }
+ __ aes_kexpand1(F42, F46, 7, F48);
+ __ aes_kexpand2(F44, F48, F50);
+
+ // perform 192-bit key specific inverse cipher transformation
+ __ fxor(FloatRegisterImpl::D, F50, F54, F54);
+ __ fxor(FloatRegisterImpl::D, F48, F52, F52);
+ __ aes_dround23(F46, F52, F54, F58);
+ __ aes_dround01(F44, F52, F54, F56);
+ __ aes_dround23(F42, F56, F58, F54);
+ __ aes_dround01(F40, F56, F58, F52);
+ __ br(Assembler::always, false, Assembler::pt, L_common_transform);
+ __ delayed()->nop();
+
+ __ BIND(L_expand256bit);
+
+ // load rest of the 256-bit key
+ for ( int i = 4; i <= 7; i++ ) {
+ __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
+ }
+
+ // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
+ for ( int i = 0; i <= 40; i += 8 ) {
+ __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+6), i/8, as_FloatRegister(i+8));
+ __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+8), as_FloatRegister(i+10));
+ __ aes_kexpand0(as_FloatRegister(i+4), as_FloatRegister(i+10), as_FloatRegister(i+12));
+ __ aes_kexpand2(as_FloatRegister(i+6), as_FloatRegister(i+12), as_FloatRegister(i+14));
+ }
+ __ aes_kexpand1(F48, F54, 6, F56);
+ __ aes_kexpand2(F50, F56, F58);
+
+ for ( int i = 0; i <= 6; i += 2 ) {
+ __ fmov(FloatRegisterImpl::D, as_FloatRegister(58-i), as_FloatRegister(i));
+ }
+
+ // load input into F52-F54
+ __ ldf(FloatRegisterImpl::D, from, 0, F52);
+ __ ldf(FloatRegisterImpl::D, from, 8, F54);
+
+ // perform 256-bit key specific inverse cipher transformation
+ __ fxor(FloatRegisterImpl::D, F0, F54, F54);
+ __ fxor(FloatRegisterImpl::D, F2, F52, F52);
+ __ aes_dround23(F4, F52, F54, F58);
+ __ aes_dround01(F6, F52, F54, F56);
+ __ aes_dround23(F50, F56, F58, F54);
+ __ aes_dround01(F48, F56, F58, F52);
+ __ aes_dround23(F46, F52, F54, F58);
+ __ aes_dround01(F44, F52, F54, F56);
+ __ aes_dround23(F42, F56, F58, F54);
+ __ aes_dround01(F40, F56, F58, F52);
+
+ for ( int i = 0; i <= 7; i++ ) {
+ __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
+ }
+
+ // perform inverse cipher transformations common for all key sizes
+ __ BIND(L_common_transform);
+ for ( int i = 38; i >= 6; i -= 8 ) {
+ __ aes_dround23(as_FloatRegister(i), F52, F54, F58);
+ __ aes_dround01(as_FloatRegister(i-2), F52, F54, F56);
+ if ( i != 6) {
+ __ aes_dround23(as_FloatRegister(i-4), F56, F58, F54);
+ __ aes_dround01(as_FloatRegister(i-6), F56, F58, F52);
+ } else {
+ __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F54);
+ __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F52);
+ }
+ }
+
+ // store output to destination array, F0-F1 used as temp
+ __ fmov(FloatRegisterImpl::D, F52, F0);
+ __ stf(FloatRegisterImpl::S, F0, to, 0);
+ __ stf(FloatRegisterImpl::S, F1, to, 4);
+ __ fmov(FloatRegisterImpl::D, F54, F0);
+ __ stf(FloatRegisterImpl::S, F0, to, 8);
+ __ retl();
+ __ delayed()->stf(FloatRegisterImpl::S, F1, to, 12);
+
+ return start;
+ }
+
+ address generate_cipherBlockChaining_encryptAESCrypt() {
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
+ Label L_cbcenc128, L_cbcenc192, L_cbcenc256;
+ address start = __ pc();
+ Register from = O0; // source byte array
+ Register to = O1; // destination byte array
+ Register key = O2; // expanded key array
+ Register rvec = O3; // init vector
+ const Register len_reg = O4; // cipher length
+ const Register keylen = O5; // reg for storing expanded key array length
+
+ // save cipher len to return in the end
+ __ mov(len_reg, L1);
+
+ // read expanded key length
+ __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
+
+ // load init vector
+ __ ldf(FloatRegisterImpl::D, rvec, 0, F60);
+ __ ldf(FloatRegisterImpl::D, rvec, 8, F62);
+ __ ldx(key,0,G1);
+ __ ldx(key,8,G2);
+
+ // start loading expanded key
+ for ( int i = 0, j = 16; i <= 38; i += 2, j += 8 ) {
+ __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i));
+ }
+
+ // 128-bit original key size
+ __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pt, L_cbcenc128);
+
+ for ( int i = 40, j = 176; i <= 46; i += 2, j += 8 ) {
+ __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i));
+ }
+
+ // 192-bit original key size
+ __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pt, L_cbcenc192);
+
+ for ( int i = 48, j = 208; i <= 54; i += 2, j += 8 ) {
+ __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i));
+ }
+
+ // 256-bit original key size
+ __ br(Assembler::always, false, Assembler::pt, L_cbcenc256);
+ __ delayed()->nop();
+
+ __ align(OptoLoopAlignment);
+ __ BIND(L_cbcenc128);
+ __ ldx(from,0,G3);
+ __ ldx(from,8,G4);
+ __ xor3(G1,G3,G3);
+ __ xor3(G2,G4,G4);
+ __ movxtod(G3,F56);
+ __ movxtod(G4,F58);
+ __ fxor(FloatRegisterImpl::D, F60, F56, F60);
+ __ fxor(FloatRegisterImpl::D, F62, F58, F62);
+
+ // TEN_EROUNDS
+ for ( int i = 0; i <= 32; i += 8 ) {
+ __ aes_eround01(as_FloatRegister(i), F60, F62, F56);
+ __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58);
+ if (i != 32 ) {
+ __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60);
+ __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62);
+ } else {
+ __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60);
+ __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62);
+ }
+ }
+
+ __ stf(FloatRegisterImpl::D, F60, to, 0);
+ __ stf(FloatRegisterImpl::D, F62, to, 8);
+ __ add(from, 16, from);
+ __ add(to, 16, to);
+ __ subcc(len_reg, 16, len_reg);
+ __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc128);
+ __ delayed()->nop();
+ __ stf(FloatRegisterImpl::D, F60, rvec, 0);
+ __ stf(FloatRegisterImpl::D, F62, rvec, 8);
+ __ retl();
+ __ delayed()->mov(L1, O0);
+
+ __ align(OptoLoopAlignment);
+ __ BIND(L_cbcenc192);
+ __ ldx(from,0,G3);
+ __ ldx(from,8,G4);
+ __ xor3(G1,G3,G3);
+ __ xor3(G2,G4,G4);
+ __ movxtod(G3,F56);
+ __ movxtod(G4,F58);
+ __ fxor(FloatRegisterImpl::D, F60, F56, F60);
+ __ fxor(FloatRegisterImpl::D, F62, F58, F62);
+
+ // TWELEVE_EROUNDS
+ for ( int i = 0; i <= 40; i += 8 ) {
+ __ aes_eround01(as_FloatRegister(i), F60, F62, F56);
+ __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58);
+ if (i != 40 ) {
+ __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60);
+ __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62);
+ } else {
+ __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60);
+ __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62);
+ }
+ }
+
+ __ stf(FloatRegisterImpl::D, F60, to, 0);
+ __ stf(FloatRegisterImpl::D, F62, to, 8);
+ __ add(from, 16, from);
+ __ subcc(len_reg, 16, len_reg);
+ __ add(to, 16, to);
+ __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc192);
+ __ delayed()->nop();
+ __ stf(FloatRegisterImpl::D, F60, rvec, 0);
+ __ stf(FloatRegisterImpl::D, F62, rvec, 8);
+ __ retl();
+ __ delayed()->mov(L1, O0);
+
+ __ align(OptoLoopAlignment);
+ __ BIND(L_cbcenc256);
+ __ ldx(from,0,G3);
+ __ ldx(from,8,G4);
+ __ xor3(G1,G3,G3);
+ __ xor3(G2,G4,G4);
+ __ movxtod(G3,F56);
+ __ movxtod(G4,F58);
+ __ fxor(FloatRegisterImpl::D, F60, F56, F60);
+ __ fxor(FloatRegisterImpl::D, F62, F58, F62);
+
+ // FOURTEEN_EROUNDS
+ for ( int i = 0; i <= 48; i += 8 ) {
+ __ aes_eround01(as_FloatRegister(i), F60, F62, F56);
+ __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58);
+ if (i != 48 ) {
+ __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60);
+ __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62);
+ } else {
+ __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60);
+ __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62);
+ }
+ }
+
+ __ stf(FloatRegisterImpl::D, F60, to, 0);
+ __ stf(FloatRegisterImpl::D, F62, to, 8);
+ __ add(from, 16, from);
+ __ subcc(len_reg, 16, len_reg);
+ __ add(to, 16, to);
+ __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc256);
+ __ delayed()->nop();
+ __ stf(FloatRegisterImpl::D, F60, rvec, 0);
+ __ stf(FloatRegisterImpl::D, F62, rvec, 8);
+ __ retl();
+ __ delayed()->mov(L1, O0);
+
+ return start;
+ }
+
+ address generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
+ Label L_cbcdec_end, L_expand192bit, L_expand256bit, L_dec_first_block_start;
+ Label L_dec_first_block128, L_dec_first_block192, L_dec_next2_blocks128, L_dec_next2_blocks192, L_dec_next2_blocks256;
+ address start = __ pc();
+ Register from = I0; // source byte array
+ Register to = I1; // destination byte array
+ Register key = I2; // expanded key array
+ Register rvec = I3; // init vector
+ const Register len_reg = I4; // cipher length
+ const Register original_key = I5; // original key array only required during decryption
+ const Register keylen = L6; // reg for storing expanded key array length
+
+ // save cipher len before save_frame, to return in the end
+ __ mov(O4, L0);
+ __ save_frame(0); //args are read from I* registers since we save the frame in the beginning
+
+ // load original key from SunJCE expanded decryption key
+ for ( int i = 0; i <= 3; i++ ) {
+ __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
+ }
+
+ // load initial vector
+ __ ldx(rvec,0,L0);
+ __ ldx(rvec,8,L1);
+
+ // read expanded key array length
+ __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
+
+ // 256-bit original key size
+ __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_expand256bit);
+
+ // 192-bit original key size
+ __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_expand192bit);
+
+ // 128-bit original key size
+ // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
+ for ( int i = 0; i <= 36; i += 4 ) {
+ __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+2), i/4, as_FloatRegister(i+4));
+ __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+4), as_FloatRegister(i+6));
+ }
+
+ // load expanded key[last-1] and key[last] elements
+ __ movdtox(F40,L2);
+ __ movdtox(F42,L3);
+
+ __ and3(len_reg, 16, L4);
+ __ br_null(L4, false, Assembler::pt, L_dec_next2_blocks128);
+ __ delayed()->nop();
+
+ __ br(Assembler::always, false, Assembler::pt, L_dec_first_block_start);
+ __ delayed()->nop();
+
+ __ BIND(L_expand192bit);
+ // load rest of the 192-bit key
+ __ ldf(FloatRegisterImpl::S, original_key, 16, F4);
+ __ ldf(FloatRegisterImpl::S, original_key, 20, F5);
+
+ // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
+ for ( int i = 0; i <= 36; i += 6 ) {
+ __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+4), i/6, as_FloatRegister(i+6));
+ __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+6), as_FloatRegister(i+8));
+ __ aes_kexpand2(as_FloatRegister(i+4), as_FloatRegister(i+8), as_FloatRegister(i+10));
+ }
+ __ aes_kexpand1(F42, F46, 7, F48);
+ __ aes_kexpand2(F44, F48, F50);
+
+ // load expanded key[last-1] and key[last] elements
+ __ movdtox(F48,L2);
+ __ movdtox(F50,L3);
+
+ __ and3(len_reg, 16, L4);
+ __ br_null(L4, false, Assembler::pt, L_dec_next2_blocks192);
+ __ delayed()->nop();
+
+ __ br(Assembler::always, false, Assembler::pt, L_dec_first_block_start);
+ __ delayed()->nop();
+
+ __ BIND(L_expand256bit);
+ // load rest of the 256-bit key
+ for ( int i = 4; i <= 7; i++ ) {
+ __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
+ }
+
+ // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
+ for ( int i = 0; i <= 40; i += 8 ) {
+ __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+6), i/8, as_FloatRegister(i+8));
+ __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+8), as_FloatRegister(i+10));
+ __ aes_kexpand0(as_FloatRegister(i+4), as_FloatRegister(i+10), as_FloatRegister(i+12));
+ __ aes_kexpand2(as_FloatRegister(i+6), as_FloatRegister(i+12), as_FloatRegister(i+14));
+ }
+ __ aes_kexpand1(F48, F54, 6, F56);
+ __ aes_kexpand2(F50, F56, F58);
+
+ // load expanded key[last-1] and key[last] elements
+ __ movdtox(F56,L2);
+ __ movdtox(F58,L3);
+
+ __ and3(len_reg, 16, L4);
+ __ br_null(L4, false, Assembler::pt, L_dec_next2_blocks256);
+ __ delayed()->nop();
+
+ __ BIND(L_dec_first_block_start);
+ __ ldx(from,0,L4);
+ __ ldx(from,8,L5);
+ __ xor3(L2,L4,G1);
+ __ movxtod(G1,F60);
+ __ xor3(L3,L5,G1);
+ __ movxtod(G1,F62);
+
+ // 128-bit original key size
+ __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pn, L_dec_first_block128);
+
+ // 192-bit original key size
+ __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_dec_first_block192);
+
+ __ aes_dround23(F54, F60, F62, F58);
+ __ aes_dround01(F52, F60, F62, F56);
+ __ aes_dround23(F50, F56, F58, F62);
+ __ aes_dround01(F48, F56, F58, F60);
+
+ __ BIND(L_dec_first_block192);
+ __ aes_dround23(F46, F60, F62, F58);
+ __ aes_dround01(F44, F60, F62, F56);
+ __ aes_dround23(F42, F56, F58, F62);
+ __ aes_dround01(F40, F56, F58, F60);
+
+ __ BIND(L_dec_first_block128);
+ for ( int i = 38; i >= 6; i -= 8 ) {
+ __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
+ __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
+ if ( i != 6) {
+ __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
+ __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
+ } else {
+ __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62);
+ __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60);
+ }
+ }
+
+ __ movxtod(L0,F56);
+ __ movxtod(L1,F58);
+ __ mov(L4,L0);
+ __ mov(L5,L1);
+ __ fxor(FloatRegisterImpl::D, F56, F60, F60);
+ __ fxor(FloatRegisterImpl::D, F58, F62, F62);
+
+ __ stf(FloatRegisterImpl::D, F60, to, 0);
+ __ stf(FloatRegisterImpl::D, F62, to, 8);
+
+ __ add(from, 16, from);
+ __ add(to, 16, to);
+ __ subcc(len_reg, 16, len_reg);
+ __ br(Assembler::equal, false, Assembler::pt, L_cbcdec_end);
+ __ delayed()->nop();
+
+ // 256-bit original key size
+ __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_dec_next2_blocks256);
+
+ // 192-bit original key size
+ __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_dec_next2_blocks192);
+
+ __ align(OptoLoopAlignment);
+ __ BIND(L_dec_next2_blocks128);
+ __ nop();
+
+ // F40:F42 used for first 16-bytes
+ __ ldx(from,0,G4);
+ __ ldx(from,8,G5);
+ __ xor3(L2,G4,G1);
+ __ movxtod(G1,F40);
+ __ xor3(L3,G5,G1);
+ __ movxtod(G1,F42);
+
+ // F60:F62 used for next 16-bytes
+ __ ldx(from,16,L4);
+ __ ldx(from,24,L5);
+ __ xor3(L2,L4,G1);
+ __ movxtod(G1,F60);
+ __ xor3(L3,L5,G1);
+ __ movxtod(G1,F62);
+
+ for ( int i = 38; i >= 6; i -= 8 ) {
+ __ aes_dround23(as_FloatRegister(i), F40, F42, F44);
+ __ aes_dround01(as_FloatRegister(i-2), F40, F42, F46);
+ __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
+ __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
+ if (i != 6 ) {
+ __ aes_dround23(as_FloatRegister(i-4), F46, F44, F42);
+ __ aes_dround01(as_FloatRegister(i-6), F46, F44, F40);
+ __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
+ __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
+ } else {
+ __ aes_dround23_l(as_FloatRegister(i-4), F46, F44, F42);
+ __ aes_dround01_l(as_FloatRegister(i-6), F46, F44, F40);
+ __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62);
+ __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60);
+ }
+ }
+
+ __ movxtod(L0,F46);
+ __ movxtod(L1,F44);
+ __ fxor(FloatRegisterImpl::D, F46, F40, F40);
+ __ fxor(FloatRegisterImpl::D, F44, F42, F42);
+
+ __ stf(FloatRegisterImpl::D, F40, to, 0);
+ __ stf(FloatRegisterImpl::D, F42, to, 8);
+
+ __ movxtod(G4,F56);
+ __ movxtod(G5,F58);
+ __ mov(L4,L0);
+ __ mov(L5,L1);
+ __ fxor(FloatRegisterImpl::D, F56, F60, F60);
+ __ fxor(FloatRegisterImpl::D, F58, F62, F62);
+
+ __ stf(FloatRegisterImpl::D, F60, to, 16);
+ __ stf(FloatRegisterImpl::D, F62, to, 24);
+
+ __ add(from, 32, from);
+ __ add(to, 32, to);
+ __ subcc(len_reg, 32, len_reg);
+ __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks128);
+ __ delayed()->nop();
+ __ br(Assembler::always, false, Assembler::pt, L_cbcdec_end);
+ __ delayed()->nop();
+
+ __ align(OptoLoopAlignment);
+ __ BIND(L_dec_next2_blocks192);
+ __ nop();
+
+ // F48:F50 used for first 16-bytes
+ __ ldx(from,0,G4);
+ __ ldx(from,8,G5);
+ __ xor3(L2,G4,G1);
+ __ movxtod(G1,F48);
+ __ xor3(L3,G5,G1);
+ __ movxtod(G1,F50);
+
+ // F60:F62 used for next 16-bytes
+ __ ldx(from,16,L4);
+ __ ldx(from,24,L5);
+ __ xor3(L2,L4,G1);
+ __ movxtod(G1,F60);
+ __ xor3(L3,L5,G1);
+ __ movxtod(G1,F62);
+
+ for ( int i = 46; i >= 6; i -= 8 ) {
+ __ aes_dround23(as_FloatRegister(i), F48, F50, F52);
+ __ aes_dround01(as_FloatRegister(i-2), F48, F50, F54);
+ __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
+ __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
+ if (i != 6 ) {
+ __ aes_dround23(as_FloatRegister(i-4), F54, F52, F50);
+ __ aes_dround01(as_FloatRegister(i-6), F54, F52, F48);
+ __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
+ __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
+ } else {
+ __ aes_dround23_l(as_FloatRegister(i-4), F54, F52, F50);
+ __ aes_dround01_l(as_FloatRegister(i-6), F54, F52, F48);
+ __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62);
+ __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60);
+ }
+ }
+
+ __ movxtod(L0,F54);
+ __ movxtod(L1,F52);
+ __ fxor(FloatRegisterImpl::D, F54, F48, F48);
+ __ fxor(FloatRegisterImpl::D, F52, F50, F50);
+
+ __ stf(FloatRegisterImpl::D, F48, to, 0);
+ __ stf(FloatRegisterImpl::D, F50, to, 8);
+
+ __ movxtod(G4,F56);
+ __ movxtod(G5,F58);
+ __ mov(L4,L0);
+ __ mov(L5,L1);
+ __ fxor(FloatRegisterImpl::D, F56, F60, F60);
+ __ fxor(FloatRegisterImpl::D, F58, F62, F62);
+
+ __ stf(FloatRegisterImpl::D, F60, to, 16);
+ __ stf(FloatRegisterImpl::D, F62, to, 24);
+
+ __ add(from, 32, from);
+ __ add(to, 32, to);
+ __ subcc(len_reg, 32, len_reg);
+ __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks192);
+ __ delayed()->nop();
+ __ br(Assembler::always, false, Assembler::pt, L_cbcdec_end);
+ __ delayed()->nop();
+
+ __ align(OptoLoopAlignment);
+ __ BIND(L_dec_next2_blocks256);
+ __ nop();
+
+ // F0:F2 used for first 16-bytes
+ __ ldx(from,0,G4);
+ __ ldx(from,8,G5);
+ __ xor3(L2,G4,G1);
+ __ movxtod(G1,F0);
+ __ xor3(L3,G5,G1);
+ __ movxtod(G1,F2);
+
+ // F60:F62 used for next 16-bytes
+ __ ldx(from,16,L4);
+ __ ldx(from,24,L5);
+ __ xor3(L2,L4,G1);
+ __ movxtod(G1,F60);
+ __ xor3(L3,L5,G1);
+ __ movxtod(G1,F62);
+
+ __ aes_dround23(F54, F0, F2, F4);
+ __ aes_dround01(F52, F0, F2, F6);
+ __ aes_dround23(F54, F60, F62, F58);
+ __ aes_dround01(F52, F60, F62, F56);
+ __ aes_dround23(F50, F6, F4, F2);
+ __ aes_dround01(F48, F6, F4, F0);
+ __ aes_dround23(F50, F56, F58, F62);
+ __ aes_dround01(F48, F56, F58, F60);
+ // save F48:F54 in temp registers
+ __ movdtox(F54,G2);
+ __ movdtox(F52,G3);
+ __ movdtox(F50,G6);
+ __ movdtox(F48,G1);
+ for ( int i = 46; i >= 14; i -= 8 ) {
+ __ aes_dround23(as_FloatRegister(i), F0, F2, F4);
+ __ aes_dround01(as_FloatRegister(i-2), F0, F2, F6);
+ __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
+ __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
+ __ aes_dround23(as_FloatRegister(i-4), F6, F4, F2);
+ __ aes_dround01(as_FloatRegister(i-6), F6, F4, F0);
+ __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
+ __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
+ }
+ // init F48:F54 with F0:F6 values (original key)
+ __ ldf(FloatRegisterImpl::D, original_key, 0, F48);
+ __ ldf(FloatRegisterImpl::D, original_key, 8, F50);
+ __ ldf(FloatRegisterImpl::D, original_key, 16, F52);
+ __ ldf(FloatRegisterImpl::D, original_key, 24, F54);
+ __ aes_dround23(F54, F0, F2, F4);
+ __ aes_dround01(F52, F0, F2, F6);
+ __ aes_dround23(F54, F60, F62, F58);
+ __ aes_dround01(F52, F60, F62, F56);
+ __ aes_dround23_l(F50, F6, F4, F2);
+ __ aes_dround01_l(F48, F6, F4, F0);
+ __ aes_dround23_l(F50, F56, F58, F62);
+ __ aes_dround01_l(F48, F56, F58, F60);
+ // re-init F48:F54 with their original values
+ __ movxtod(G2,F54);
+ __ movxtod(G3,F52);
+ __ movxtod(G6,F50);
+ __ movxtod(G1,F48);
+
+ __ movxtod(L0,F6);
+ __ movxtod(L1,F4);
+ __ fxor(FloatRegisterImpl::D, F6, F0, F0);
+ __ fxor(FloatRegisterImpl::D, F4, F2, F2);
+
+ __ stf(FloatRegisterImpl::D, F0, to, 0);
+ __ stf(FloatRegisterImpl::D, F2, to, 8);
+
+ __ movxtod(G4,F56);
+ __ movxtod(G5,F58);
+ __ mov(L4,L0);
+ __ mov(L5,L1);
+ __ fxor(FloatRegisterImpl::D, F56, F60, F60);
+ __ fxor(FloatRegisterImpl::D, F58, F62, F62);
+
+ __ stf(FloatRegisterImpl::D, F60, to, 16);
+ __ stf(FloatRegisterImpl::D, F62, to, 24);
+
+ __ add(from, 32, from);
+ __ add(to, 32, to);
+ __ subcc(len_reg, 32, len_reg);
+ __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks256);
+ __ delayed()->nop();
+
+ __ BIND(L_cbcdec_end);
+ __ stx(L0, rvec, 0);
+ __ stx(L1, rvec, 8);
+ __ restore();
+ __ mov(L0, O0);
+ __ retl();
+ __ delayed()->nop();
+
+ return start;
+ }
+
void generate_initial() {
// Generates all stubs and initializes the entry points
@@ -3368,6 +4137,14 @@ class StubGenerator: public StubCodeGenerator {
generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
&StubRoutines::_safefetchN_fault_pc,
&StubRoutines::_safefetchN_continuation_pc);
+
+ // generate AES intrinsics code
+ if (UseAESIntrinsics) {
+ StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
+ StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
+ StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
+ StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
+ }
}
diff --git a/src/cpu/sparc/vm/vm_version_sparc.cpp b/src/cpu/sparc/vm/vm_version_sparc.cpp
index bae7b3510..b75d21f98 100644
--- a/src/cpu/sparc/vm/vm_version_sparc.cpp
+++ b/src/cpu/sparc/vm/vm_version_sparc.cpp
@@ -234,7 +234,7 @@ void VM_Version::initialize() {
assert((OptoLoopAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size");
char buf[512];
- jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
(has_v9() ? ", v9" : (has_v8() ? ", v8" : "")),
(has_hardware_popc() ? ", popc" : ""),
(has_vis1() ? ", vis1" : ""),
@@ -242,6 +242,7 @@ void VM_Version::initialize() {
(has_vis3() ? ", vis3" : ""),
(has_blk_init() ? ", blk_init" : ""),
(has_cbcond() ? ", cbcond" : ""),
+ (has_aes() ? ", aes" : ""),
(is_ultra3() ? ", ultra3" : ""),
(is_sun4v() ? ", sun4v" : ""),
(is_niagara_plus() ? ", niagara_plus" : (is_niagara() ? ", niagara" : "")),
@@ -265,6 +266,41 @@ void VM_Version::initialize() {
if (!has_vis1()) // Drop to 0 if no VIS1 support
UseVIS = 0;
+ // T2 and above should have support for AES instructions
+ if (has_aes()) {
+ if (UseVIS > 0) { // AES intrinsics use FXOR instruction which is VIS1
+ if (FLAG_IS_DEFAULT(UseAES)) {
+ FLAG_SET_DEFAULT(UseAES, true);
+ }
+ if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
+ FLAG_SET_DEFAULT(UseAESIntrinsics, true);
+ }
+ // we disable both the AES flags if either of them is disabled on the command line
+ if (!UseAES || !UseAESIntrinsics) {
+ FLAG_SET_DEFAULT(UseAES, false);
+ FLAG_SET_DEFAULT(UseAESIntrinsics, false);
+ }
+ } else {
+ if (UseAES || UseAESIntrinsics) {
+ warning("SPARC AES intrinsics require VIS1 instruction support. Intrinsics will be disabled.");
+ if (UseAES) {
+ FLAG_SET_DEFAULT(UseAES, false);
+ }
+ if (UseAESIntrinsics) {
+ FLAG_SET_DEFAULT(UseAESIntrinsics, false);
+ }
+ }
+ }
+ } else if (UseAES || UseAESIntrinsics) {
+ warning("AES instructions are not available on this CPU");
+ if (UseAES) {
+ FLAG_SET_DEFAULT(UseAES, false);
+ }
+ if (UseAESIntrinsics) {
+ FLAG_SET_DEFAULT(UseAESIntrinsics, false);
+ }
+ }
+
if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
(cache_line_size > ContendedPaddingWidth))
ContendedPaddingWidth = cache_line_size;
diff --git a/src/cpu/sparc/vm/vm_version_sparc.hpp b/src/cpu/sparc/vm/vm_version_sparc.hpp
index b8d63f4f8..6ddf57007 100644
--- a/src/cpu/sparc/vm/vm_version_sparc.hpp
+++ b/src/cpu/sparc/vm/vm_version_sparc.hpp
@@ -48,7 +48,8 @@ protected:
sparc64_family = 14,
M_family = 15,
T_family = 16,
- T1_model = 17
+ T1_model = 17,
+ aes_instructions = 18
};
enum Feature_Flag_Set {
@@ -73,6 +74,7 @@ protected:
M_family_m = 1 << M_family,
T_family_m = 1 << T_family,
T1_model_m = 1 << T1_model,
+ aes_instructions_m = 1 << aes_instructions,
generic_v8_m = v8_instructions_m | hardware_mul32_m | hardware_div32_m | hardware_fsmuld_m,
generic_v9_m = generic_v8_m | v9_instructions_m,
@@ -123,6 +125,7 @@ public:
static bool has_vis3() { return (_features & vis3_instructions_m) != 0; }
static bool has_blk_init() { return (_features & blk_init_instructions_m) != 0; }
static bool has_cbcond() { return (_features & cbcond_instructions_m) != 0; }
+ static bool has_aes() { return (_features & aes_instructions_m) != 0; }
static bool supports_compare_and_exchange()
{ return has_v9(); }
diff --git a/src/cpu/x86/vm/stubGenerator_x86_32.cpp b/src/cpu/x86/vm/stubGenerator_x86_32.cpp
index 12fccf9dd..1622fe5ff 100644
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp
@@ -2403,6 +2403,9 @@ class StubGenerator: public StubCodeGenerator {
// c_rarg3 - r vector byte array address
// c_rarg4 - input length
//
+ // Output:
+ // rax - input length
+ //
address generate_cipherBlockChaining_encryptAESCrypt() {
assert(UseAES, "need AES instructions and misaligned SSE support");
__ align(CodeEntryAlignment);
@@ -2483,7 +2486,7 @@ class StubGenerator: public StubCodeGenerator {
__ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object
handleSOERegisters(false /*restoring*/);
- __ movl(rax, 0); // return 0 (why?)
+ __ movptr(rax, len_param); // return length
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -2557,6 +2560,9 @@ class StubGenerator: public StubCodeGenerator {
// c_rarg3 - r vector byte array address
// c_rarg4 - input length
//
+ // Output:
+ // rax - input length
+ //
address generate_cipherBlockChaining_decryptAESCrypt() {
assert(UseAES, "need AES instructions and misaligned SSE support");
@@ -2650,7 +2656,7 @@ class StubGenerator: public StubCodeGenerator {
__ movptr(rvec , rvec_param); // restore this since used in loop
__ movdqu(Address(rvec, 0), xmm_temp); // final value of r stored in rvec of CipherBlockChaining object
handleSOERegisters(false /*restoring*/);
- __ movl(rax, 0); // return 0 (why?)
+ __ movptr(rax, len_param); // return length
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
diff --git a/src/cpu/x86/vm/stubGenerator_x86_64.cpp b/src/cpu/x86/vm/stubGenerator_x86_64.cpp
index a16611280..0adb0d31e 100644
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp
@@ -3217,6 +3217,9 @@ class StubGenerator: public StubCodeGenerator {
// c_rarg3 - r vector byte array address
// c_rarg4 - input length
//
+ // Output:
+ // rax - input length
+ //
address generate_cipherBlockChaining_encryptAESCrypt() {
assert(UseAES, "need AES instructions and misaligned SSE support");
__ align(CodeEntryAlignment);
@@ -3232,7 +3235,7 @@ class StubGenerator: public StubCodeGenerator {
#ifndef _WIN64
const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16)
#else
- const Address len_mem(rsp, 6 * wordSize); // length is on stack on Win64
+ const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64
const Register len_reg = r10; // pick the first volatile windows register
#endif
const Register pos = rax;
@@ -3259,6 +3262,8 @@ class StubGenerator: public StubCodeGenerator {
for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
__ movdqu(xmm_save(i), as_XMMRegister(i));
}
+#else
+ __ push(len_reg); // Save
#endif
const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front
@@ -3301,8 +3306,10 @@ class StubGenerator: public StubCodeGenerator {
for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
__ movdqu(as_XMMRegister(i), xmm_save(i));
}
+ __ movl(rax, len_mem);
+#else
+ __ pop(rax); // return length
#endif
- __ movl(rax, 0); // return 0 (why?)
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -3409,6 +3416,9 @@ class StubGenerator: public StubCodeGenerator {
// c_rarg3 - r vector byte array address
// c_rarg4 - input length
//
+ // Output:
+ // rax - input length
+ //
address generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
assert(UseAES, "need AES instructions and misaligned SSE support");
@@ -3427,7 +3437,7 @@ class StubGenerator: public StubCodeGenerator {
#ifndef _WIN64
const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16)
#else
- const Address len_mem(rsp, 6 * wordSize); // length is on stack on Win64
+ const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64
const Register len_reg = r10; // pick the first volatile windows register
#endif
const Register pos = rax;
@@ -3448,7 +3458,10 @@ class StubGenerator: public StubCodeGenerator {
for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
__ movdqu(xmm_save(i), as_XMMRegister(i));
}
+#else
+ __ push(len_reg); // Save
#endif
+
// the java expanded key ordering is rotated one position from what we want
// so we start from 0x10 here and hit 0x00 last
const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front
@@ -3554,8 +3567,10 @@ class StubGenerator: public StubCodeGenerator {
for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
__ movdqu(as_XMMRegister(i), xmm_save(i));
}
+ __ movl(rax, len_mem);
+#else
+ __ pop(rax); // return length
#endif
- __ movl(rax, 0); // return 0 (why?)
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
diff --git a/src/cpu/x86/vm/x86.ad b/src/cpu/x86/vm/x86.ad
index c49d0e6c3..b0077a5f5 100644
--- a/src/cpu/x86/vm/x86.ad
+++ b/src/cpu/x86/vm/x86.ad
@@ -581,6 +581,12 @@ const bool Matcher::misaligned_vectors_ok() {
return !AlignVector; // can be changed by flag
}
+// x86 AES instructions are compatible with SunJCE expanded
+// keys, hence we do not need to pass the original key to stubs
+const bool Matcher::pass_original_key_for_aes() {
+ return false;
+}
+
// Helper methods for MachSpillCopyNode::implementation().
static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
int src_hi, int dst_hi, uint ireg, outputStream* st) {
diff --git a/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp b/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp
index e46e21500..6dca05f91 100644
--- a/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp
+++ b/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp
@@ -119,6 +119,11 @@ int VM_Version::platform_features(int features) {
#endif
if (av & AV_SPARC_CBCOND) features |= cbcond_instructions_m;
+#ifndef AV_SPARC_AES
+#define AV_SPARC_AES 0x00020000 /* aes instrs supported */
+#endif
+ if (av & AV_SPARC_AES) features |= aes_instructions_m;
+
} else {
// getisax(2) failed, use the old legacy code.
#ifndef PRODUCT
diff --git a/src/share/vm/classfile/classLoaderData.cpp b/src/share/vm/classfile/classLoaderData.cpp
index 273e0184d..6b34e0150 100644
--- a/src/share/vm/classfile/classLoaderData.cpp
+++ b/src/share/vm/classfile/classLoaderData.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -520,6 +520,13 @@ void ClassLoaderData::verify() {
}
}
+bool ClassLoaderData::contains_klass(Klass* klass) {
+ for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
+ if (k == klass) return true;
+ }
+ return false;
+}
+
// GC root of class loader data created.
ClassLoaderData* ClassLoaderDataGraph::_head = NULL;
@@ -648,12 +655,12 @@ GrowableArray<ClassLoaderData*>* ClassLoaderDataGraph::new_clds() {
return array;
}
-#ifndef PRODUCT
-// for debugging and hsfind(x)
-bool ClassLoaderDataGraph::contains(address x) {
- // I think we need the _metaspace_lock taken here because the class loader
- // data graph could be changing while we are walking it (new entries added,
- // new entries being unloaded, etc).
+// For profiling and hsfind() only. Otherwise, this is unsafe (and slow). This
+// is done lock free to avoid lock inversion problems. It is safe because
+// new ClassLoaderData are added to the end of the CLDG, and only removed at
+// safepoint. The _unloading list can be deallocated concurrently with CMS so
+// this doesn't look in metaspace for classes that have been unloaded.
+bool ClassLoaderDataGraph::contains(const void* x) {
if (DumpSharedSpaces) {
// There are only two metaspaces to worry about.
ClassLoaderData* ncld = ClassLoaderData::the_null_class_loader_data();
@@ -670,16 +677,11 @@ bool ClassLoaderDataGraph::contains(address x) {
}
}
- // Could also be on an unloading list which is okay, ie. still allocated
- // for a little while.
- for (ClassLoaderData* ucld = _unloading; ucld != NULL; ucld = ucld->next()) {
- if (ucld->metaspace_or_null() != NULL && ucld->metaspace_or_null()->contains(x)) {
- return true;
- }
- }
+ // Do not check unloading list because deallocation can be concurrent.
return false;
}
+#ifndef PRODUCT
bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
if (loader_data == data) {
diff --git a/src/share/vm/classfile/classLoaderData.hpp b/src/share/vm/classfile/classLoaderData.hpp
index 2cb21801d..3fcbb6b7d 100644
--- a/src/share/vm/classfile/classLoaderData.hpp
+++ b/src/share/vm/classfile/classLoaderData.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -90,9 +90,9 @@ class ClassLoaderDataGraph : public AllStatic {
static void dump() { dump_on(tty); }
static void verify();
-#ifndef PRODUCT
// expensive test for pointer in metaspace for debugging
- static bool contains(address x);
+ static bool contains(const void* x);
+#ifndef PRODUCT
static bool contains_loader_data(ClassLoaderData* loader_data);
#endif
@@ -260,6 +260,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
jobject add_handle(Handle h);
void add_class(Klass* k);
void remove_class(Klass* k);
+ bool contains_klass(Klass* k);
void record_dependency(Klass* to, TRAPS);
void init_dependencies(TRAPS);
diff --git a/src/share/vm/classfile/dictionary.cpp b/src/share/vm/classfile/dictionary.cpp
index e308791ca..9a2bb74f8 100644
--- a/src/share/vm/classfile/dictionary.cpp
+++ b/src/share/vm/classfile/dictionary.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -707,7 +707,7 @@ void Dictionary::verify() {
loader_data->class_loader() == NULL ||
loader_data->class_loader()->is_instance(),
"checking type of class_loader");
- e->verify(/*check_dictionary*/false);
+ e->verify();
probe->verify_protection_domain_set();
element_count++;
}
diff --git a/src/share/vm/classfile/systemDictionary.cpp b/src/share/vm/classfile/systemDictionary.cpp
index f5c5c017d..703443517 100644
--- a/src/share/vm/classfile/systemDictionary.cpp
+++ b/src/share/vm/classfile/systemDictionary.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2650,23 +2650,6 @@ void SystemDictionary::verify() {
constraints()->verify(dictionary(), placeholders());
}
-
-void SystemDictionary::verify_obj_klass_present(Symbol* class_name,
- ClassLoaderData* loader_data) {
- GCMutexLocker mu(SystemDictionary_lock);
- Symbol* name;
-
- Klass* probe = find_class(class_name, loader_data);
- if (probe == NULL) {
- probe = SystemDictionary::find_shared_class(class_name);
- if (probe == NULL) {
- name = find_placeholder(class_name, loader_data);
- }
- }
- guarantee(probe != NULL || name != NULL,
- "Loaded klasses should be in SystemDictionary");
-}
-
// utility function for class load event
void SystemDictionary::post_class_load_event(const Ticks& start_time,
instanceKlassHandle k,
diff --git a/src/share/vm/classfile/systemDictionary.hpp b/src/share/vm/classfile/systemDictionary.hpp
index b0e914ff9..88132f520 100644
--- a/src/share/vm/classfile/systemDictionary.hpp
+++ b/src/share/vm/classfile/systemDictionary.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -375,10 +375,6 @@ public:
static bool is_internal_format(Symbol* class_name);
#endif
- // Verify class is in dictionary
- static void verify_obj_klass_present(Symbol* class_name,
- ClassLoaderData* loader_data);
-
// Initialization
static void initialize(TRAPS);
diff --git a/src/share/vm/classfile/vmSymbols.hpp b/src/share/vm/classfile/vmSymbols.hpp
index 58485959f..ed3c0dbcb 100644
--- a/src/share/vm/classfile/vmSymbols.hpp
+++ b/src/share/vm/classfile/vmSymbols.hpp
@@ -787,7 +787,7 @@
do_intrinsic(_cipherBlockChaining_decryptAESCrypt, com_sun_crypto_provider_cipherBlockChaining, decrypt_name, byteArray_int_int_byteArray_int_signature, F_R) \
do_name( encrypt_name, "encrypt") \
do_name( decrypt_name, "decrypt") \
- do_signature(byteArray_int_int_byteArray_int_signature, "([BII[BI)V") \
+ do_signature(byteArray_int_int_byteArray_int_signature, "([BII[BI)I") \
\
/* support for java.util.zip */ \
do_class(java_util_zip_CRC32, "java/util/zip/CRC32") \
diff --git a/src/share/vm/code/dependencies.cpp b/src/share/vm/code/dependencies.cpp
index 29aaf3250..778efe8fe 100644
--- a/src/share/vm/code/dependencies.cpp
+++ b/src/share/vm/code/dependencies.cpp
@@ -655,8 +655,6 @@ inline Metadata* Dependencies::DepStream::recorded_metadata_at(int i) {
} else {
o = _deps->oop_recorder()->metadata_at(i);
}
- assert(o == NULL || o->is_metaspace_object(),
- err_msg("Should be metadata " PTR_FORMAT, o));
return o;
}
diff --git a/src/share/vm/code/vtableStubs.cpp b/src/share/vm/code/vtableStubs.cpp
index 5af91a3a6..b3bfc258f 100644
--- a/src/share/vm/code/vtableStubs.cpp
+++ b/src/share/vm/code/vtableStubs.cpp
@@ -55,6 +55,9 @@ void* VtableStub::operator new(size_t size, int code_size) throw() {
const int chunk_factor = 32;
if (_chunk == NULL || _chunk + real_size > _chunk_end) {
const int bytes = chunk_factor * real_size + pd_code_alignment();
+
+ // There is a dependency on the name of the blob in src/share/vm/prims/jvmtiCodeBlobEvents.cpp
+ // If changing the name, update the other file accordingly.
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
if (blob == NULL) {
return NULL;
@@ -62,12 +65,6 @@ void* VtableStub::operator new(size_t size, int code_size) throw() {
_chunk = blob->content_begin();
_chunk_end = _chunk + bytes;
Forte::register_stub("vtable stub", _chunk, _chunk_end);
- // Notify JVMTI about this stub. The event will be recorded by the enclosing
- // JvmtiDynamicCodeEventCollector and posted when this thread has released
- // all locks.
- if (JvmtiExport::should_post_dynamic_code_generated()) {
- JvmtiExport::post_dynamic_code_generated_while_holding_locks("vtable stub", _chunk, _chunk_end);
- }
align_chunk();
}
assert(_chunk + real_size <= _chunk_end, "bad allocation");
@@ -130,6 +127,13 @@ address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
is_vtable_stub? "vtbl": "itbl", vtable_index, VtableStub::receiver_location());
Disassembler::decode(s->code_begin(), s->code_end());
}
+ // Notify JVMTI about this stub. The event will be recorded by the enclosing
+ // JvmtiDynamicCodeEventCollector and posted when this thread has released
+ // all locks.
+ if (JvmtiExport::should_post_dynamic_code_generated()) {
+ JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",
+ s->code_begin(), s->code_end());
+ }
}
return s->entry_point();
}
@@ -195,6 +199,14 @@ void vtableStubs_init() {
VtableStubs::initialize();
}
+void VtableStubs::vtable_stub_do(void f(VtableStub*)) {
+ for (int i = 0; i < N; i++) {
+ for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {
+ f(s);
+ }
+ }
+}
+
//-----------------------------------------------------------------------------------------------------
// Non-product code
diff --git a/src/share/vm/code/vtableStubs.hpp b/src/share/vm/code/vtableStubs.hpp
index 06f2a67a8..b3d4f2d83 100644
--- a/src/share/vm/code/vtableStubs.hpp
+++ b/src/share/vm/code/vtableStubs.hpp
@@ -131,6 +131,7 @@ class VtableStubs : AllStatic {
static VtableStub* stub_containing(address pc); // stub containing pc or NULL
static int number_of_vtable_stubs() { return _number_of_vtable_stubs; }
static void initialize();
+ static void vtable_stub_do(void f(VtableStub*)); // iterates over all vtable stubs
};
#endif // SHARE_VM_CODE_VTABLESTUBS_HPP
diff --git a/src/share/vm/interpreter/linkResolver.cpp b/src/share/vm/interpreter/linkResolver.cpp
index 52c88bd3a..aa11784b7 100644
--- a/src/share/vm/interpreter/linkResolver.cpp
+++ b/src/share/vm/interpreter/linkResolver.cpp
@@ -564,16 +564,7 @@ void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle res
}
}
- // 5. check if method is concrete
- if (resolved_method->is_abstract() && !resolved_klass->is_abstract()) {
- ResourceMark rm(THREAD);
- THROW_MSG(vmSymbols::java_lang_AbstractMethodError(),
- Method::name_and_sig_as_C_string(resolved_klass(),
- method_name,
- method_signature));
- }
-
- // 6. access checks, access checking may be turned off when calling from within the VM.
+ // 5. access checks, access checking may be turned off when calling from within the VM.
if (check_access) {
assert(current_klass.not_null() , "current_klass should not be null");
diff --git a/src/share/vm/interpreter/rewriter.cpp b/src/share/vm/interpreter/rewriter.cpp
index 208f8617d..2474ae758 100644
--- a/src/share/vm/interpreter/rewriter.cpp
+++ b/src/share/vm/interpreter/rewriter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -250,8 +250,8 @@ void Rewriter::rewrite_invokedynamic(address bcp, int offset, bool reverse) {
// We will reverse the bytecode rewriting _after_ adjusting them.
// Adjust the cache index by offset to the invokedynamic entries in the
// cpCache plus the delta if the invokedynamic bytecodes were adjusted.
- cache_index = cp_cache_delta() + _first_iteration_cp_cache_limit;
- int cp_index = invokedynamic_cp_cache_entry_pool_index(cache_index);
+ int adjustment = cp_cache_delta() + _first_iteration_cp_cache_limit;
+ int cp_index = invokedynamic_cp_cache_entry_pool_index(cache_index - adjustment);
assert(_pool->tag_at(cp_index).is_invoke_dynamic(), "wrong index");
// zero out 4 bytes
Bytes::put_Java_u4(p, 0);
@@ -453,18 +453,7 @@ methodHandle Rewriter::rewrite_jsrs(methodHandle method, TRAPS) {
return method;
}
-void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
- ResourceMark rm(THREAD);
- Rewriter rw(klass, klass->constants(), klass->methods(), CHECK);
- // (That's all, folks.)
-}
-
-
-Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS)
- : _klass(klass),
- _pool(cpool),
- _methods(methods)
-{
+void Rewriter::rewrite_bytecodes(TRAPS) {
assert(_pool->cache() == NULL, "constant pool cache must not be set yet");
// determine index maps for Method* rewriting
@@ -508,6 +497,29 @@ Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Me
// May have to fix invokedynamic bytecodes if invokestatic/InterfaceMethodref
// entries had to be added.
patch_invokedynamic_bytecodes();
+}
+
+void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
+ ResourceMark rm(THREAD);
+ Rewriter rw(klass, klass->constants(), klass->methods(), CHECK);
+ // (That's all, folks.)
+}
+
+
+Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS)
+ : _klass(klass),
+ _pool(cpool),
+ _methods(methods)
+{
+
+ // Rewrite bytecodes - exception here exits.
+ rewrite_bytecodes(CHECK);
+
+ // Stress restoring bytecodes
+ if (StressRewriter) {
+ restore_bytecodes();
+ rewrite_bytecodes(CHECK);
+ }
// allocate constant pool cache, now that we've seen all the bytecodes
make_constant_pool_cache(THREAD);
@@ -523,6 +535,7 @@ Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Me
// so methods with jsrs in custom class lists in aren't attempted to be
// rewritten in the RO section of the shared archive.
// Relocated bytecodes don't have to be restored, only the cp cache entries
+ int len = _methods->length();
for (int i = len-1; i >= 0; i--) {
methodHandle m(THREAD, _methods->at(i));
diff --git a/src/share/vm/interpreter/rewriter.hpp b/src/share/vm/interpreter/rewriter.hpp
index 2c7990fa7..aa4b7cd52 100644
--- a/src/share/vm/interpreter/rewriter.hpp
+++ b/src/share/vm/interpreter/rewriter.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -199,6 +199,9 @@ class Rewriter: public StackObj {
void patch_invokedynamic_bytecodes();
+ // Do all the work.
+ void rewrite_bytecodes(TRAPS);
+
// Revert bytecodes in case of an exception.
void restore_bytecodes();
diff --git a/src/share/vm/memory/allocation.cpp b/src/share/vm/memory/allocation.cpp
index 73d1d7858..8e382ba85 100644
--- a/src/share/vm/memory/allocation.cpp
+++ b/src/share/vm/memory/allocation.cpp
@@ -74,9 +74,8 @@ bool MetaspaceObj::is_shared() const {
return MetaspaceShared::is_in_shared_space(this);
}
-
bool MetaspaceObj::is_metaspace_object() const {
- return Metaspace::contains((void*)this);
+ return ClassLoaderDataGraph::contains((void*)this);
}
void MetaspaceObj::print_address_on(outputStream* st) const {
diff --git a/src/share/vm/memory/allocation.hpp b/src/share/vm/memory/allocation.hpp
index feb482add..d635a0798 100644
--- a/src/share/vm/memory/allocation.hpp
+++ b/src/share/vm/memory/allocation.hpp
@@ -267,7 +267,7 @@ class ClassLoaderData;
class MetaspaceObj {
public:
- bool is_metaspace_object() const; // more specific test but slower
+ bool is_metaspace_object() const;
bool is_shared() const;
void print_address_on(outputStream* st) const; // nonvirtual address printing
diff --git a/src/share/vm/memory/metachunk.hpp b/src/share/vm/memory/metachunk.hpp
index 3bd4d15bc..e873dc6a3 100644
--- a/src/share/vm/memory/metachunk.hpp
+++ b/src/share/vm/memory/metachunk.hpp
@@ -143,6 +143,8 @@ class Metachunk : public Metabase<Metachunk> {
void set_is_tagged_free(bool v) { _is_tagged_free = v; }
#endif
+ bool contains(const void* ptr) { return bottom() <= ptr && ptr < _top; }
+
NOT_PRODUCT(void mangle();)
void print_on(outputStream* st) const;
diff --git a/src/share/vm/memory/metaspace.cpp b/src/share/vm/memory/metaspace.cpp
index 19c03661a..5a7e4ec2f 100644
--- a/src/share/vm/memory/metaspace.cpp
+++ b/src/share/vm/memory/metaspace.cpp
@@ -513,8 +513,6 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
// Unlink empty VirtualSpaceNodes and free it.
void purge(ChunkManager* chunk_manager);
- bool contains(const void *ptr);
-
void print_on(outputStream* st) const;
class VirtualSpaceListIterator : public StackObj {
@@ -558,7 +556,7 @@ class SpaceManager : public CHeapObj<mtClass> {
private:
- // protects allocations and contains.
+ // protects allocations
Mutex* const _lock;
// Type of metadata allocated.
@@ -595,7 +593,11 @@ class SpaceManager : public CHeapObj<mtClass> {
private:
// Accessors
Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
- void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
+ void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
+ // ensure lock-free iteration sees fully initialized node
+ OrderAccess::storestore();
+ _chunks_in_use[index] = v;
+ }
BlockFreelist* block_freelists() const {
return (BlockFreelist*) &_block_freelists;
@@ -708,6 +710,8 @@ class SpaceManager : public CHeapObj<mtClass> {
void print_on(outputStream* st) const;
void locked_print_chunks_in_use_on(outputStream* st) const;
+ bool contains(const void *ptr);
+
void verify();
void verify_chunk_size(Metachunk* chunk);
NOT_PRODUCT(void mangle_freed_chunks();)
@@ -1159,8 +1163,6 @@ bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
} else {
assert(new_entry->reserved_words() == vs_word_size,
"Reserved memory size differs from requested memory size");
- // ensure lock-free iteration sees fully initialized node
- OrderAccess::storestore();
link_vs(new_entry);
return true;
}
@@ -1287,19 +1289,6 @@ void VirtualSpaceList::print_on(outputStream* st) const {
}
}
-bool VirtualSpaceList::contains(const void *ptr) {
- VirtualSpaceNode* list = virtual_space_list();
- VirtualSpaceListIterator iter(list);
- while (iter.repeat()) {
- VirtualSpaceNode* node = iter.get_next();
- if (node->reserved()->contains(ptr)) {
- return true;
- }
- }
- return false;
-}
-
-
// MetaspaceGC methods
// VM_CollectForMetadataAllocation is the vm operation used to GC.
@@ -2392,6 +2381,21 @@ MetaWord* SpaceManager::allocate_work(size_t word_size) {
return result;
}
+// This function looks at the chunks in the metaspace without locking.
+// The chunks are added with store ordering and not deleted except for at
+// unloading time.
+bool SpaceManager::contains(const void *ptr) {
+ for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i))
+ {
+ Metachunk* curr = chunks_in_use(i);
+ while (curr != NULL) {
+ if (curr->contains(ptr)) return true;
+ curr = curr->next();
+ }
+ }
+ return false;
+}
+
void SpaceManager::verify() {
// If there are blocks in the dictionary, then
// verfication of chunks does not work since
@@ -3463,17 +3467,12 @@ void Metaspace::print_on(outputStream* out) const {
}
}
-bool Metaspace::contains(const void * ptr) {
- if (MetaspaceShared::is_in_shared_space(ptr)) {
- return true;
+bool Metaspace::contains(const void* ptr) {
+ if (vsm()->contains(ptr)) return true;
+ if (using_class_space()) {
+ return class_vsm()->contains(ptr);
}
- // This is checked while unlocked. As long as the virtualspaces are added
- // at the end, the pointer will be in one of them. The virtual spaces
- // aren't deleted presently. When they are, some sort of locking might
- // be needed. Note, locking this can cause inversion problems with the
- // caller in MetaspaceObj::is_metadata() function.
- return space_list()->contains(ptr) ||
- (using_class_space() && class_space_list()->contains(ptr));
+ return false;
}
void Metaspace::verify() {
diff --git a/src/share/vm/memory/metaspace.hpp b/src/share/vm/memory/metaspace.hpp
index b98e69a41..f74cec355 100644
--- a/src/share/vm/memory/metaspace.hpp
+++ b/src/share/vm/memory/metaspace.hpp
@@ -226,7 +226,7 @@ class Metaspace : public CHeapObj<mtClass> {
MetaWord* expand_and_allocate(size_t size,
MetadataType mdtype);
- static bool contains(const void *ptr);
+ bool contains(const void* ptr);
void dump(outputStream* const out) const;
// Free empty virtualspaces
diff --git a/src/share/vm/oops/arrayKlass.cpp b/src/share/vm/oops/arrayKlass.cpp
index 9e40206c2..fcb46b11f 100644
--- a/src/share/vm/oops/arrayKlass.cpp
+++ b/src/share/vm/oops/arrayKlass.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -214,8 +214,8 @@ void ArrayKlass::oop_print_on(oop obj, outputStream* st) {
// Verification
-void ArrayKlass::verify_on(outputStream* st, bool check_dictionary) {
- Klass::verify_on(st, check_dictionary);
+void ArrayKlass::verify_on(outputStream* st) {
+ Klass::verify_on(st);
if (component_mirror() != NULL) {
guarantee(component_mirror()->klass() != NULL, "should have a class");
diff --git a/src/share/vm/oops/arrayKlass.hpp b/src/share/vm/oops/arrayKlass.hpp
index 4b06f1c0e..7b4ad2e9a 100644
--- a/src/share/vm/oops/arrayKlass.hpp
+++ b/src/share/vm/oops/arrayKlass.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -146,7 +146,7 @@ class ArrayKlass: public Klass {
void oop_print_on(oop obj, outputStream* st);
// Verification
- void verify_on(outputStream* st, bool check_dictionary);
+ void verify_on(outputStream* st);
void oop_verify_on(oop obj, outputStream* st);
};
diff --git a/src/share/vm/oops/constantPool.cpp b/src/share/vm/oops/constantPool.cpp
index 16b53a699..3f37810d2 100644
--- a/src/share/vm/oops/constantPool.cpp
+++ b/src/share/vm/oops/constantPool.cpp
@@ -82,6 +82,9 @@ ConstantPool::ConstantPool(Array<u1>* tags) {
void ConstantPool::deallocate_contents(ClassLoaderData* loader_data) {
MetadataFactory::free_metadata(loader_data, cache());
set_cache(NULL);
+ MetadataFactory::free_array<u2>(loader_data, reference_map());
+ set_reference_map(NULL);
+
MetadataFactory::free_array<jushort>(loader_data, operands());
set_operands(NULL);
diff --git a/src/share/vm/oops/instanceKlass.cpp b/src/share/vm/oops/instanceKlass.cpp
index 4a27fc153..27ae4ff03 100644
--- a/src/share/vm/oops/instanceKlass.cpp
+++ b/src/share/vm/oops/instanceKlass.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -3184,7 +3184,7 @@ class VerifyFieldClosure: public OopClosure {
virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
};
-void InstanceKlass::verify_on(outputStream* st, bool check_dictionary) {
+void InstanceKlass::verify_on(outputStream* st) {
#ifndef PRODUCT
// Avoid redundant verifies, this really should be in product.
if (_verify_count == Universe::verify_count()) return;
@@ -3192,14 +3192,11 @@ void InstanceKlass::verify_on(outputStream* st, bool check_dictionary) {
#endif
// Verify Klass
- Klass::verify_on(st, check_dictionary);
+ Klass::verify_on(st);
- // Verify that klass is present in SystemDictionary if not already
- // verifying the SystemDictionary.
- if (is_loaded() && !is_anonymous() && check_dictionary) {
- Symbol* h_name = name();
- SystemDictionary::verify_obj_klass_present(h_name, class_loader_data());
- }
+ // Verify that klass is present in ClassLoaderData
+ guarantee(class_loader_data()->contains_klass(this),
+ "this class isn't found in class loader data");
// Verify vtables
if (is_linked()) {
diff --git a/src/share/vm/oops/instanceKlass.hpp b/src/share/vm/oops/instanceKlass.hpp
index 2b7a73a31..ba2bce874 100644
--- a/src/share/vm/oops/instanceKlass.hpp
+++ b/src/share/vm/oops/instanceKlass.hpp
@@ -1086,7 +1086,7 @@ public:
const char* internal_name() const;
// Verification
- void verify_on(outputStream* st, bool check_dictionary);
+ void verify_on(outputStream* st);
void oop_verify_on(oop obj, outputStream* st);
};
diff --git a/src/share/vm/oops/klass.cpp b/src/share/vm/oops/klass.cpp
index 22b570bbf..adca246aa 100644
--- a/src/share/vm/oops/klass.cpp
+++ b/src/share/vm/oops/klass.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -376,8 +376,6 @@ void Klass::append_to_sibling_list() {
}
bool Klass::is_loader_alive(BoolObjectClosure* is_alive) {
- assert(ClassLoaderDataGraph::contains((address)this), "is in the metaspace");
-
#ifdef ASSERT
// The class is alive iff the class loader is alive.
oop loader = class_loader();
@@ -640,7 +638,7 @@ void Klass::collect_statistics(KlassSizeStats *sz) const {
// Verification
-void Klass::verify_on(outputStream* st, bool check_dictionary) {
+void Klass::verify_on(outputStream* st) {
// This can be expensive, but it is worth checking that this klass is actually
// in the CLD graph but not in production.
diff --git a/src/share/vm/oops/klass.hpp b/src/share/vm/oops/klass.hpp
index 9855fdd32..a31a23573 100644
--- a/src/share/vm/oops/klass.hpp
+++ b/src/share/vm/oops/klass.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -695,8 +695,8 @@ class Klass : public Metadata {
virtual const char* internal_name() const = 0;
// Verification
- virtual void verify_on(outputStream* st, bool check_dictionary);
- void verify(bool check_dictionary = true) { verify_on(tty, check_dictionary); }
+ virtual void verify_on(outputStream* st);
+ void verify() { verify_on(tty); }
#ifndef PRODUCT
bool verify_vtable_index(int index);
diff --git a/src/share/vm/oops/objArrayKlass.cpp b/src/share/vm/oops/objArrayKlass.cpp
index 7294ebe9a..3316a8b27 100644
--- a/src/share/vm/oops/objArrayKlass.cpp
+++ b/src/share/vm/oops/objArrayKlass.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -674,8 +674,8 @@ const char* ObjArrayKlass::internal_name() const {
// Verification
-void ObjArrayKlass::verify_on(outputStream* st, bool check_dictionary) {
- ArrayKlass::verify_on(st, check_dictionary);
+void ObjArrayKlass::verify_on(outputStream* st) {
+ ArrayKlass::verify_on(st);
guarantee(element_klass()->is_klass(), "should be klass");
guarantee(bottom_klass()->is_klass(), "should be klass");
Klass* bk = bottom_klass();
diff --git a/src/share/vm/oops/objArrayKlass.hpp b/src/share/vm/oops/objArrayKlass.hpp
index af06fd420..cfe31e86e 100644
--- a/src/share/vm/oops/objArrayKlass.hpp
+++ b/src/share/vm/oops/objArrayKlass.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -151,7 +151,7 @@ class ObjArrayKlass : public ArrayKlass {
const char* internal_name() const;
// Verification
- void verify_on(outputStream* st, bool check_dictionary);
+ void verify_on(outputStream* st);
void oop_verify_on(oop obj, outputStream* st);
};
diff --git a/src/share/vm/opto/bytecodeInfo.cpp b/src/share/vm/opto/bytecodeInfo.cpp
index 4b3f39f08..101d5f733 100644
--- a/src/share/vm/opto/bytecodeInfo.cpp
+++ b/src/share/vm/opto/bytecodeInfo.cpp
@@ -63,34 +63,14 @@ InlineTree::InlineTree(Compile* c,
assert(_caller_jvms->same_calls_as(caller_jvms), "consistent JVMS");
assert((caller_tree == NULL ? 0 : caller_tree->stack_depth() + 1) == stack_depth(), "correct (redundant) depth parameter");
assert(caller_bci == this->caller_bci(), "correct (redundant) bci parameter");
- if (UseOldInlining) {
- // Update hierarchical counts, count_inline_bcs() and count_inlines()
- InlineTree *caller = (InlineTree *)caller_tree;
- for( ; caller != NULL; caller = ((InlineTree *)(caller->caller_tree())) ) {
- caller->_count_inline_bcs += count_inline_bcs();
- NOT_PRODUCT(caller->_count_inlines++;)
- }
+ // Update hierarchical counts, count_inline_bcs() and count_inlines()
+ InlineTree *caller = (InlineTree *)caller_tree;
+ for( ; caller != NULL; caller = ((InlineTree *)(caller->caller_tree())) ) {
+ caller->_count_inline_bcs += count_inline_bcs();
+ NOT_PRODUCT(caller->_count_inlines++;)
}
}
-InlineTree::InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms,
- float site_invoke_ratio, int max_inline_level) :
- C(c),
- _caller_jvms(caller_jvms),
- _caller_tree(NULL),
- _method(callee_method),
- _site_invoke_ratio(site_invoke_ratio),
- _max_inline_level(max_inline_level),
- _count_inline_bcs(method()->code_size()),
- _msg(NULL)
-{
-#ifndef PRODUCT
- _count_inlines = 0;
- _forced_inline = false;
-#endif
- assert(!UseOldInlining, "do not use for old stuff");
-}
-
/**
* Return true when EA is ON and a java constructor is called or
* a super constructor is called from an inlined java constructor.
@@ -161,11 +141,6 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
return true;
}
- if (!UseOldInlining) {
- set_msg("!UseOldInlining");
- return true; // size and frequency are represented in a new way
- }
-
int default_max_inline_size = C->max_inline_size();
int inline_small_code_size = InlineSmallCode / 4;
int max_inline_size = default_max_inline_size;
@@ -229,35 +204,6 @@ bool InlineTree::should_not_inline(ciMethod *callee_method,
fail_msg = "don't inline by annotation";
}
- if (!UseOldInlining) {
- if (fail_msg != NULL) {
- *wci_result = *(WarmCallInfo::always_cold());
- set_msg(fail_msg);
- return true;
- }
-
- if (callee_method->has_unloaded_classes_in_signature()) {
- wci_result->set_profit(wci_result->profit() * 0.1);
- }
-
- // don't inline exception code unless the top method belongs to an
- // exception class
- if (callee_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
- ciMethod* top_method = jvms->caller() != NULL ? jvms->caller()->of_depth(1)->method() : method();
- if (!top_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
- wci_result->set_profit(wci_result->profit() * 0.1);
- }
- }
-
- if (callee_method->has_compiled_code() &&
- callee_method->instructions_size() > InlineSmallCode) {
- wci_result->set_profit(wci_result->profit() * 0.1);
- // %%% adjust wci_result->size()?
- }
-
- return false;
- }
-
// one more inlining restriction
if (fail_msg == NULL && callee_method->has_unloaded_classes_in_signature()) {
fail_msg = "unloaded signature classes";
@@ -360,9 +306,7 @@ bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
int caller_bci, JVMState* jvms, ciCallProfile& profile,
WarmCallInfo* wci_result, bool& should_delay) {
- // Old algorithm had funny accumulating BC-size counters
- if (UseOldInlining && ClipInlining
- && (int)count_inline_bcs() >= DesiredMethodLimit) {
+ if (ClipInlining && (int)count_inline_bcs() >= DesiredMethodLimit) {
if (!callee_method->force_inline() || !IncrementalInline) {
set_msg("size > DesiredMethodLimit");
return false;
@@ -465,8 +409,7 @@ bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
int size = callee_method->code_size_for_inlining();
- if (UseOldInlining && ClipInlining
- && (int)count_inline_bcs() + size >= DesiredMethodLimit) {
+ if (ClipInlining && (int)count_inline_bcs() + size >= DesiredMethodLimit) {
if (!callee_method->force_inline() || !IncrementalInline) {
set_msg("size > DesiredMethodLimit");
return false;
@@ -584,8 +527,7 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
jvms, profile, &wci, should_delay);
#ifndef PRODUCT
- if (UseOldInlining && InlineWarmCalls
- && (PrintOpto || C->print_inlining())) {
+ if (InlineWarmCalls && (PrintOpto || C->print_inlining())) {
bool cold = wci.is_cold();
bool hot = !cold && wci.is_hot();
bool old_cold = !success;
@@ -599,13 +541,12 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
}
}
#endif
- if (UseOldInlining) {
- if (success) {
- wci = *(WarmCallInfo::always_hot());
- } else {
- wci = *(WarmCallInfo::always_cold());
- }
+ if (success) {
+ wci = *(WarmCallInfo::always_hot());
+ } else {
+ wci = *(WarmCallInfo::always_cold());
}
+
if (!InlineWarmCalls) {
if (!wci.is_cold() && !wci.is_hot()) {
// Do not inline the warm calls.
@@ -619,8 +560,7 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
set_msg("inline (hot)");
}
print_inlining(callee_method, caller_bci, true /* success */);
- if (UseOldInlining)
- build_inline_tree_for_callee(callee_method, jvms, caller_bci);
+ build_inline_tree_for_callee(callee_method, jvms, caller_bci);
if (InlineWarmCalls && !wci.is_hot())
return new (C) WarmCallInfo(wci); // copy to heap
return WarmCallInfo::always_hot();
diff --git a/src/share/vm/opto/c2_globals.hpp b/src/share/vm/opto/c2_globals.hpp
index da7ab96f7..d9829d3c6 100644
--- a/src/share/vm/opto/c2_globals.hpp
+++ b/src/share/vm/opto/c2_globals.hpp
@@ -357,9 +357,6 @@
"File to dump ideal graph to. If set overrides the " \
"use of the network") \
\
- product(bool, UseOldInlining, true, \
- "Enable the 1.3 inlining strategy") \
- \
product(bool, UseBimorphicInlining, true, \
"Profiling based inlining for two receivers") \
\
diff --git a/src/share/vm/opto/callGenerator.cpp b/src/share/vm/opto/callGenerator.cpp
index 86e48e849..a655f27b5 100644
--- a/src/share/vm/opto/callGenerator.cpp
+++ b/src/share/vm/opto/callGenerator.cpp
@@ -722,7 +722,7 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser)
Node* m = kit.map()->in(i);
Node* n = slow_map->in(i);
if (m != n) {
- const Type* t = gvn.type(m)->meet(gvn.type(n));
+ const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
Node* phi = PhiNode::make(region, m, t);
phi->set_req(2, n);
kit.map()->set_req(i, gvn.transform(phi));
@@ -975,7 +975,7 @@ JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_pa
Node* m = kit.map()->in(i);
Node* n = slow_map->in(i);
if (m != n) {
- const Type* t = gvn.type(m)->meet(gvn.type(n));
+ const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
Node* phi = PhiNode::make(region, m, t);
phi->set_req(2, n);
kit.map()->set_req(i, gvn.transform(phi));
diff --git a/src/share/vm/opto/cfgnode.cpp b/src/share/vm/opto/cfgnode.cpp
index 36818b75b..25223035f 100644
--- a/src/share/vm/opto/cfgnode.cpp
+++ b/src/share/vm/opto/cfgnode.cpp
@@ -951,7 +951,7 @@ const Type *PhiNode::Value( PhaseTransform *phase ) const {
if (is_intf != ti_is_intf)
{ t = _type; break; }
}
- t = t->meet(ti);
+ t = t->meet_speculative(ti);
}
}
@@ -968,11 +968,11 @@ const Type *PhiNode::Value( PhaseTransform *phase ) const {
//
// It is not possible to see Type::BOTTOM values as phi inputs,
// because the ciTypeFlow pre-pass produces verifier-quality types.
- const Type* ft = t->filter(_type); // Worst case type
+ const Type* ft = t->filter_speculative(_type); // Worst case type
#ifdef ASSERT
// The following logic has been moved into TypeOopPtr::filter.
- const Type* jt = t->join(_type);
+ const Type* jt = t->join_speculative(_type);
if( jt->empty() ) { // Emptied out???
// Check for evil case of 't' being a class and '_type' expecting an
@@ -1757,7 +1757,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
break;
}
// Accumulate type for resulting Phi
- type = type->meet(in(i)->in(AddPNode::Base)->bottom_type());
+ type = type->meet_speculative(in(i)->in(AddPNode::Base)->bottom_type());
}
Node* base = NULL;
if (doit) {
diff --git a/src/share/vm/opto/compile.cpp b/src/share/vm/opto/compile.cpp
index 1f4f4f683..d838a5b6a 100644
--- a/src/share/vm/opto/compile.cpp
+++ b/src/share/vm/opto/compile.cpp
@@ -705,10 +705,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
print_compile_messages();
- if (UseOldInlining || PrintCompilation NOT_PRODUCT( || PrintOpto) )
- _ilt = InlineTree::build_inline_tree_root();
- else
- _ilt = NULL;
+ _ilt = InlineTree::build_inline_tree_root();
// Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice
assert(num_alias_types() >= AliasIdxRaw, "");
@@ -3948,16 +3945,18 @@ void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
// which may optimize it out.
for (uint next = 0; next < worklist.size(); ++next) {
Node *n = worklist.at(next);
- if (n->is_Type() && n->as_Type()->type()->isa_oopptr() != NULL &&
- n->as_Type()->type()->is_oopptr()->speculative() != NULL) {
+ if (n->is_Type()) {
TypeNode* tn = n->as_Type();
- const TypeOopPtr* t = tn->type()->is_oopptr();
- bool in_hash = igvn.hash_delete(n);
- assert(in_hash, "node should be in igvn hash table");
- tn->set_type(t->remove_speculative());
- igvn.hash_insert(n);
- igvn._worklist.push(n); // give it a chance to go away
- modified++;
+ const Type* t = tn->type();
+ const Type* t_no_spec = t->remove_speculative();
+ if (t_no_spec != t) {
+ bool in_hash = igvn.hash_delete(n);
+ assert(in_hash, "node should be in igvn hash table");
+ tn->set_type(t_no_spec);
+ igvn.hash_insert(n);
+ igvn._worklist.push(n); // give it a chance to go away
+ modified++;
+ }
}
uint max = n->len();
for( uint i = 0; i < max; ++i ) {
@@ -3971,6 +3970,27 @@ void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
if (modified > 0) {
igvn.optimize();
}
+#ifdef ASSERT
+ // Verify that after the IGVN is over no speculative type has resurfaced
+ worklist.clear();
+ worklist.push(root());
+ for (uint next = 0; next < worklist.size(); ++next) {
+ Node *n = worklist.at(next);
+ const Type* t = igvn.type(n);
+ assert(t == t->remove_speculative(), "no more speculative types");
+ if (n->is_Type()) {
+ t = n->as_Type()->type();
+ assert(t == t->remove_speculative(), "no more speculative types");
+ }
+ uint max = n->len();
+ for( uint i = 0; i < max; ++i ) {
+ Node *m = n->in(i);
+ if (not_a_node(m)) continue;
+ worklist.push(m);
+ }
+ }
+ igvn.check_no_speculative_types();
+#endif
}
}
diff --git a/src/share/vm/opto/connode.cpp b/src/share/vm/opto/connode.cpp
index 948ff7b46..0a71bc598 100644
--- a/src/share/vm/opto/connode.cpp
+++ b/src/share/vm/opto/connode.cpp
@@ -188,7 +188,7 @@ Node *CMoveNode::Identity( PhaseTransform *phase ) {
const Type *CMoveNode::Value( PhaseTransform *phase ) const {
if( phase->type(in(Condition)) == Type::TOP )
return Type::TOP;
- return phase->type(in(IfFalse))->meet(phase->type(in(IfTrue)));
+ return phase->type(in(IfFalse))->meet_speculative(phase->type(in(IfTrue)));
}
//------------------------------make-------------------------------------------
@@ -392,14 +392,14 @@ Node *CMoveDNode::Ideal(PhaseGVN *phase, bool can_reshape) {
//=============================================================================
// If input is already higher or equal to cast type, then this is an identity.
Node *ConstraintCastNode::Identity( PhaseTransform *phase ) {
- return phase->type(in(1))->higher_equal(_type) ? in(1) : this;
+ return phase->type(in(1))->higher_equal_speculative(_type) ? in(1) : this;
}
//------------------------------Value------------------------------------------
// Take 'join' of input and cast-up type
const Type *ConstraintCastNode::Value( PhaseTransform *phase ) const {
if( in(0) && phase->type(in(0)) == Type::TOP ) return Type::TOP;
- const Type* ft = phase->type(in(1))->filter(_type);
+const Type* ft = phase->type(in(1))->filter_speculative(_type);
#ifdef ASSERT
// Previous versions of this function had some special case logic,
@@ -409,7 +409,7 @@ const Type *ConstraintCastNode::Value( PhaseTransform *phase ) const {
{
const Type* t1 = phase->type(in(1));
if( t1 == Type::TOP ) assert(ft == Type::TOP, "special case #1");
- const Type* rt = t1->join(_type);
+ const Type* rt = t1->join_speculative(_type);
if (rt->empty()) assert(ft == Type::TOP, "special case #2");
break;
}
diff --git a/src/share/vm/opto/connode.hpp b/src/share/vm/opto/connode.hpp
index 53ff820d8..03a096b23 100644
--- a/src/share/vm/opto/connode.hpp
+++ b/src/share/vm/opto/connode.hpp
@@ -36,7 +36,7 @@ class MachNode;
// Simple constants
class ConNode : public TypeNode {
public:
- ConNode( const Type *t ) : TypeNode(t,1) {
+ ConNode( const Type *t ) : TypeNode(t->remove_speculative(),1) {
init_req(0, (Node*)Compile::current()->root());
init_flags(Flag_is_Con);
}
diff --git a/src/share/vm/opto/doCall.cpp b/src/share/vm/opto/doCall.cpp
index 223f7da36..e888b550f 100644
--- a/src/share/vm/opto/doCall.cpp
+++ b/src/share/vm/opto/doCall.cpp
@@ -161,19 +161,8 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// Try inlining a bytecoded method:
if (!call_does_dispatch) {
- InlineTree* ilt;
- if (UseOldInlining) {
- ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
- } else {
- // Make a disembodied, stateless ILT.
- // TO DO: When UseOldInlining is removed, copy the ILT code elsewhere.
- float site_invoke_ratio = prof_factor;
- // Note: ilt is for the root of this parse, not the present call site.
- ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, MaxInlineLevel);
- }
+ InlineTree* ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
WarmCallInfo scratch_ci;
- if (!UseOldInlining)
- scratch_ci.init(jvms, callee, profile, prof_factor);
bool should_delay = false;
WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci, should_delay);
assert(ci != &scratch_ci, "do not let this pointer escape");
diff --git a/src/share/vm/opto/graphKit.cpp b/src/share/vm/opto/graphKit.cpp
index e2d7e385c..af4b2f452 100644
--- a/src/share/vm/opto/graphKit.cpp
+++ b/src/share/vm/opto/graphKit.cpp
@@ -420,7 +420,7 @@ void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* ph
}
const Type* srctype = _gvn.type(src);
if (phi->type() != srctype) {
- const Type* dsttype = phi->type()->meet(srctype);
+ const Type* dsttype = phi->type()->meet_speculative(srctype);
if (phi->type() != dsttype) {
phi->set_type(dsttype);
_gvn.set_type(phi, dsttype);
@@ -1224,7 +1224,7 @@ Node* GraphKit::null_check_common(Node* value, BasicType type,
// See if mixing in the NULL pointer changes type.
// If so, then the NULL pointer was not allowed in the original
// type. In other words, "value" was not-null.
- if (t->meet(TypePtr::NULL_PTR) != t) {
+ if (t->meet(TypePtr::NULL_PTR) != t->remove_speculative()) {
// same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ...
explicit_null_checks_elided++;
return value; // Elided null check quickly!
@@ -1357,7 +1357,7 @@ Node* GraphKit::null_check_common(Node* value, BasicType type,
// Cast obj to not-null on this path
Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
const Type *t = _gvn.type(obj);
- const Type *t_not_null = t->join(TypePtr::NOTNULL);
+ const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
// Object is already not-null?
if( t == t_not_null ) return obj;
@@ -3014,7 +3014,7 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
if (failure_control != NULL) // failure is now impossible
(*failure_control) = top();
// adjust the type of the phi to the exact klass:
- phi->raise_bottom_type(_gvn.type(cast_obj)->meet(TypePtr::NULL_PTR));
+ phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
}
}
diff --git a/src/share/vm/opto/library_call.cpp b/src/share/vm/opto/library_call.cpp
index 66141a1f9..3a2279ee5 100644
--- a/src/share/vm/opto/library_call.cpp
+++ b/src/share/vm/opto/library_call.cpp
@@ -304,6 +304,7 @@ class LibraryCallKit : public GraphKit {
bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
+ Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
bool inline_encodeISOArray();
bool inline_updateCRC32();
bool inline_updateBytesCRC32();
@@ -5945,10 +5946,22 @@ bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
if (k_start == NULL) return false;
- // Call the stub.
- make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
- stubAddr, stubName, TypePtr::BOTTOM,
- src_start, dest_start, k_start);
+ if (Matcher::pass_original_key_for_aes()) {
+ // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
+ // compatibility issues between Java key expansion and SPARC crypto instructions
+ Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
+ if (original_k_start == NULL) return false;
+
+ // Call the stub.
+ make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
+ stubAddr, stubName, TypePtr::BOTTOM,
+ src_start, dest_start, k_start, original_k_start);
+ } else {
+ // Call the stub.
+ make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
+ stubAddr, stubName, TypePtr::BOTTOM,
+ src_start, dest_start, k_start);
+ }
return true;
}
@@ -6026,14 +6039,29 @@ bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) {
if (objRvec == NULL) return false;
Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
- // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
- make_runtime_call(RC_LEAF|RC_NO_FP,
- OptoRuntime::cipherBlockChaining_aescrypt_Type(),
- stubAddr, stubName, TypePtr::BOTTOM,
- src_start, dest_start, k_start, r_start, len);
+ Node* cbcCrypt;
+ if (Matcher::pass_original_key_for_aes()) {
+ // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
+ // compatibility issues between Java key expansion and SPARC crypto instructions
+ Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
+ if (original_k_start == NULL) return false;
- // return is void so no result needs to be pushed
+ // Call the stub, passing src_start, dest_start, k_start, r_start, src_len and original_k_start
+ cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
+ OptoRuntime::cipherBlockChaining_aescrypt_Type(),
+ stubAddr, stubName, TypePtr::BOTTOM,
+ src_start, dest_start, k_start, r_start, len, original_k_start);
+ } else {
+ // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
+ cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
+ OptoRuntime::cipherBlockChaining_aescrypt_Type(),
+ stubAddr, stubName, TypePtr::BOTTOM,
+ src_start, dest_start, k_start, r_start, len);
+ }
+ // return cipher length (int)
+ Node* retvalue = _gvn.transform(new (C) ProjNode(cbcCrypt, TypeFunc::Parms));
+ set_result(retvalue);
return true;
}
@@ -6048,6 +6076,17 @@ Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object)
return k_start;
}
+//------------------------------get_original_key_start_from_aescrypt_object-----------------------
+Node * LibraryCallKit::get_original_key_start_from_aescrypt_object(Node *aescrypt_object) {
+ Node* objAESCryptKey = load_field_from_object(aescrypt_object, "lastKey", "[B", /*is_exact*/ false);
+ assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
+ if (objAESCryptKey == NULL) return (Node *) NULL;
+
+ // now have the array, need to get the start address of the lastKey array
+ Node* original_k_start = array_element_address(objAESCryptKey, intcon(0), T_BYTE);
+ return original_k_start;
+}
+
//----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
// Return node representing slow path of predicate check.
// the pseudo code we want to emulate with this predicate is:
diff --git a/src/share/vm/opto/loopopts.cpp b/src/share/vm/opto/loopopts.cpp
index ba19f7493..ac97d3ede 100644
--- a/src/share/vm/opto/loopopts.cpp
+++ b/src/share/vm/opto/loopopts.cpp
@@ -1115,8 +1115,8 @@ BoolNode *PhaseIdealLoop::clone_iff( PhiNode *phi, IdealLoopTree *loop ) {
Node *n2 = phi->in(i)->in(1)->in(2);
phi1->set_req( i, n1 );
phi2->set_req( i, n2 );
- phi1->set_type( phi1->type()->meet(n1->bottom_type()) );
- phi2->set_type( phi2->type()->meet(n2->bottom_type()) );
+ phi1->set_type( phi1->type()->meet_speculative(n1->bottom_type()));
+ phi2->set_type( phi2->type()->meet_speculative(n2->bottom_type()));
}
// See if these Phis have been made before.
// Register with optimizer
@@ -1189,8 +1189,8 @@ CmpNode *PhaseIdealLoop::clone_bool( PhiNode *phi, IdealLoopTree *loop ) {
}
phi1->set_req( j, n1 );
phi2->set_req( j, n2 );
- phi1->set_type( phi1->type()->meet(n1->bottom_type()) );
- phi2->set_type( phi2->type()->meet(n2->bottom_type()) );
+ phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
+ phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
}
// See if these Phis have been made before.
diff --git a/src/share/vm/opto/matcher.hpp b/src/share/vm/opto/matcher.hpp
index df641495d..6d0c8e3b0 100644
--- a/src/share/vm/opto/matcher.hpp
+++ b/src/share/vm/opto/matcher.hpp
@@ -286,6 +286,9 @@ public:
// CPU supports misaligned vectors store/load.
static const bool misaligned_vectors_ok();
+ // Should original key array reference be passed to AES stubs
+ static const bool pass_original_key_for_aes();
+
// Used to determine a "low complexity" 64-bit constant. (Zero is simple.)
// The standard of comparison is one (StoreL ConL) vs. two (StoreI ConI).
// Depends on the details of 64-bit constant generation on the CPU.
diff --git a/src/share/vm/opto/memnode.cpp b/src/share/vm/opto/memnode.cpp
index 4cfe229fd..8097eade8 100644
--- a/src/share/vm/opto/memnode.cpp
+++ b/src/share/vm/opto/memnode.cpp
@@ -657,7 +657,7 @@ const TypePtr* MemNode::calculate_adr_type(const Type* t, const TypePtr* cross_c
// disregarding "null"-ness.
// (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.)
const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr();
- assert(cross_check->meet(tp_notnull) == cross_check,
+ assert(cross_check->meet(tp_notnull) == cross_check->remove_speculative(),
"real address must not escape from expected memory type");
}
#endif
@@ -1685,7 +1685,7 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
// t might actually be lower than _type, if _type is a unique
// concrete subclass of abstract class t.
if (off_beyond_header) { // is the offset beyond the header?
- const Type* jt = t->join(_type);
+ const Type* jt = t->join_speculative(_type);
// In any case, do not allow the join, per se, to empty out the type.
if (jt->empty() && !t->empty()) {
// This can happen if a interface-typed array narrows to a class type.
diff --git a/src/share/vm/opto/multnode.cpp b/src/share/vm/opto/multnode.cpp
index 106a0086c..d9e04a274 100644
--- a/src/share/vm/opto/multnode.cpp
+++ b/src/share/vm/opto/multnode.cpp
@@ -94,7 +94,7 @@ const Type* ProjNode::proj_type(const Type* t) const {
if ((_con == TypeFunc::Parms) &&
n->is_CallStaticJava() && n->as_CallStaticJava()->is_boxing_method()) {
// The result of autoboxing is always non-null on normal path.
- t = t->join(TypePtr::NOTNULL);
+ t = t->join_speculative(TypePtr::NOTNULL);
}
return t;
}
diff --git a/src/share/vm/opto/node.cpp b/src/share/vm/opto/node.cpp
index 7de4f8881..2ba71f6bc 100644
--- a/src/share/vm/opto/node.cpp
+++ b/src/share/vm/opto/node.cpp
@@ -995,13 +995,13 @@ void Node::raise_bottom_type(const Type* new_type) {
if (is_Type()) {
TypeNode *n = this->as_Type();
if (VerifyAliases) {
- assert(new_type->higher_equal(n->type()), "new type must refine old type");
+ assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
}
n->set_type(new_type);
} else if (is_Load()) {
LoadNode *n = this->as_Load();
if (VerifyAliases) {
- assert(new_type->higher_equal(n->type()), "new type must refine old type");
+ assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
}
n->set_type(new_type);
}
diff --git a/src/share/vm/opto/parse1.cpp b/src/share/vm/opto/parse1.cpp
index a873e75ba..74b1f1a4a 100644
--- a/src/share/vm/opto/parse1.cpp
+++ b/src/share/vm/opto/parse1.cpp
@@ -1656,7 +1656,7 @@ void Parse::merge_common(Parse::Block* target, int pnum) {
assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
map()->set_req(j, _gvn.transform_no_reclaim(phi));
debug_only(const Type* bt2 = phi->bottom_type());
- assert(bt2->higher_equal(bt1), "must be consistent with type-flow");
+ assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
record_for_igvn(phi);
}
}
@@ -2029,7 +2029,7 @@ void Parse::return_current(Node* value) {
!tp->klass()->is_interface()) {
// sharpen the type eagerly; this eases certain assert checking
if (tp->higher_equal(TypeInstPtr::NOTNULL))
- tr = tr->join(TypeInstPtr::NOTNULL)->is_instptr();
+ tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr();
value = _gvn.transform(new (C) CheckCastPPNode(0,value,tr));
}
}
diff --git a/src/share/vm/opto/parse2.cpp b/src/share/vm/opto/parse2.cpp
index a341780e0..f82d246c3 100644
--- a/src/share/vm/opto/parse2.cpp
+++ b/src/share/vm/opto/parse2.cpp
@@ -88,7 +88,7 @@ Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) {
// If we load from "AbstractClass[]" we must see "ConcreteSubClass".
const Type* subklass = Type::get_const_type(toop->klass());
- elemtype = subklass->join(el);
+ elemtype = subklass->join_speculative(el);
}
}
}
@@ -1278,7 +1278,7 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest,
// Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
// or the narrowOop equivalent.
const Type* obj_type = _gvn.type(obj);
- const TypeOopPtr* tboth = obj_type->join(con_type)->isa_oopptr();
+ const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type &&
tboth->higher_equal(obj_type)) {
// obj has to be of the exact type Foo if the CmpP succeeds.
@@ -1288,7 +1288,7 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest,
(jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
TypeNode* ccast = new (C) CheckCastPPNode(control(), obj, tboth);
const Type* tcc = ccast->as_Type()->type();
- assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
+ assert(tcc != obj_type && tcc->higher_equal_speculative(obj_type), "must improve");
// Delay transform() call to allow recovery of pre-cast value
// at the control merge.
_gvn.set_type_bottom(ccast);
@@ -1318,7 +1318,7 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest,
switch (btest) {
case BoolTest::eq: // Constant test?
{
- const Type* tboth = tcon->join(tval);
+ const Type* tboth = tcon->join_speculative(tval);
if (tboth == tval) break; // Nothing to gain.
if (tcon->isa_int()) {
ccast = new (C) CastIINode(val, tboth);
@@ -1352,7 +1352,7 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest,
if (ccast != NULL) {
const Type* tcc = ccast->as_Type()->type();
- assert(tcc != tval && tcc->higher_equal(tval), "must improve");
+ assert(tcc != tval && tcc->higher_equal_speculative(tval), "must improve");
// Delay transform() call to allow recovery of pre-cast value
// at the control merge.
ccast->set_req(0, control());
diff --git a/src/share/vm/opto/parse3.cpp b/src/share/vm/opto/parse3.cpp
index 3deb9f426..466e1ca3b 100644
--- a/src/share/vm/opto/parse3.cpp
+++ b/src/share/vm/opto/parse3.cpp
@@ -361,7 +361,7 @@ bool Parse::push_constant(ciConstant constant, bool require_constant, bool is_au
// should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
// An oop is not scavengable if it is in the perm gen.
if (stable_type != NULL && con_type != NULL && con_type->isa_oopptr())
- con_type = con_type->join(stable_type);
+ con_type = con_type->join_speculative(stable_type);
break;
case T_ILLEGAL:
diff --git a/src/share/vm/opto/phaseX.cpp b/src/share/vm/opto/phaseX.cpp
index 3e6ab4eaf..fd08b065c 100644
--- a/src/share/vm/opto/phaseX.cpp
+++ b/src/share/vm/opto/phaseX.cpp
@@ -323,6 +323,23 @@ void NodeHash::remove_useless_nodes(VectorSet &useful) {
}
}
+
+void NodeHash::check_no_speculative_types() {
+#ifdef ASSERT
+ uint max = size();
+ Node *sentinel_node = sentinel();
+ for (uint i = 0; i < max; ++i) {
+ Node *n = at(i);
+ if(n != NULL && n != sentinel_node && n->is_Type()) {
+ TypeNode* tn = n->as_Type();
+ const Type* t = tn->type();
+ const Type* t_no_spec = t->remove_speculative();
+ assert(t == t_no_spec, "dead node in hash table or missed node during speculative cleanup");
+ }
+ }
+#endif
+}
+
#ifndef PRODUCT
//------------------------------dump-------------------------------------------
// Dump statistics for the hash table
@@ -1392,11 +1409,11 @@ void PhaseIterGVN::remove_speculative_types() {
assert(UseTypeSpeculation, "speculation is off");
for (uint i = 0; i < _types.Size(); i++) {
const Type* t = _types.fast_lookup(i);
- if (t != NULL && t->isa_oopptr()) {
- const TypeOopPtr* to = t->is_oopptr();
- _types.map(i, to->remove_speculative());
+ if (t != NULL) {
+ _types.map(i, t->remove_speculative());
}
}
+ _table.check_no_speculative_types();
}
//=============================================================================
diff --git a/src/share/vm/opto/phaseX.hpp b/src/share/vm/opto/phaseX.hpp
index d03d47d95..fcdd47ee8 100644
--- a/src/share/vm/opto/phaseX.hpp
+++ b/src/share/vm/opto/phaseX.hpp
@@ -92,7 +92,8 @@ public:
}
void remove_useless_nodes(VectorSet &useful); // replace with sentinel
- void replace_with(NodeHash* nh);
+ void replace_with(NodeHash* nh);
+ void check_no_speculative_types(); // Check no speculative part for type nodes in table
Node *sentinel() { return _sentinel; }
@@ -501,6 +502,9 @@ public:
Deoptimization::DeoptReason reason);
void remove_speculative_types();
+ void check_no_speculative_types() {
+ _table.check_no_speculative_types();
+ }
#ifndef PRODUCT
protected:
diff --git a/src/share/vm/opto/runtime.cpp b/src/share/vm/opto/runtime.cpp
index 0b439c45c..da775791a 100644
--- a/src/share/vm/opto/runtime.cpp
+++ b/src/share/vm/opto/runtime.cpp
@@ -826,12 +826,18 @@ const TypeFunc* OptoRuntime::array_fill_Type() {
const TypeFunc* OptoRuntime::aescrypt_block_Type() {
// create input type (domain)
int num_args = 3;
+ if (Matcher::pass_original_key_for_aes()) {
+ num_args = 4;
+ }
int argcnt = num_args;
const Type** fields = TypeTuple::fields(argcnt);
int argp = TypeFunc::Parms;
fields[argp++] = TypePtr::NOTNULL; // src
fields[argp++] = TypePtr::NOTNULL; // dest
fields[argp++] = TypePtr::NOTNULL; // k array
+ if (Matcher::pass_original_key_for_aes()) {
+ fields[argp++] = TypePtr::NOTNULL; // original k array
+ }
assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
@@ -868,6 +874,9 @@ const TypeFunc* OptoRuntime::updateBytesCRC32_Type() {
const TypeFunc* OptoRuntime::cipherBlockChaining_aescrypt_Type() {
// create input type (domain)
int num_args = 5;
+ if (Matcher::pass_original_key_for_aes()) {
+ num_args = 6;
+ }
int argcnt = num_args;
const Type** fields = TypeTuple::fields(argcnt);
int argp = TypeFunc::Parms;
@@ -876,13 +885,16 @@ const TypeFunc* OptoRuntime::cipherBlockChaining_aescrypt_Type() {
fields[argp++] = TypePtr::NOTNULL; // k array
fields[argp++] = TypePtr::NOTNULL; // r array
fields[argp++] = TypeInt::INT; // src len
+ if (Matcher::pass_original_key_for_aes()) {
+ fields[argp++] = TypePtr::NOTNULL; // original k array
+ }
assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
- // no result type needed
+ // returning cipher len (int)
fields = TypeTuple::fields(1);
- fields[TypeFunc::Parms+0] = NULL; // void
- const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
+ fields[TypeFunc::Parms+0] = TypeInt::INT;
+ const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
return TypeFunc::make(domain, range);
}
diff --git a/src/share/vm/opto/type.cpp b/src/share/vm/opto/type.cpp
index 0e1c4e216..c907fc712 100644
--- a/src/share/vm/opto/type.cpp
+++ b/src/share/vm/opto/type.cpp
@@ -241,6 +241,13 @@ int Type::cmp( const Type *const t1, const Type *const t2 ) {
return !t1->eq(t2); // Return ZERO if equal
}
+const Type* Type::maybe_remove_speculative(bool include_speculative) const {
+ if (!include_speculative) {
+ return remove_speculative();
+ }
+ return this;
+}
+
//------------------------------hash-------------------------------------------
int Type::uhash( const Type *const t ) {
return t->hash();
@@ -633,41 +640,44 @@ bool Type::interface_vs_oop(const Type *t) const {
//------------------------------meet-------------------------------------------
// Compute the MEET of two types. NOT virtual. It enforces that meet is
// commutative and the lattice is symmetric.
-const Type *Type::meet( const Type *t ) const {
+const Type *Type::meet_helper(const Type *t, bool include_speculative) const {
if (isa_narrowoop() && t->isa_narrowoop()) {
- const Type* result = make_ptr()->meet(t->make_ptr());
+ const Type* result = make_ptr()->meet_helper(t->make_ptr(), include_speculative);
return result->make_narrowoop();
}
if (isa_narrowklass() && t->isa_narrowklass()) {
- const Type* result = make_ptr()->meet(t->make_ptr());
+ const Type* result = make_ptr()->meet_helper(t->make_ptr(), include_speculative);
return result->make_narrowklass();
}
- const Type *mt = xmeet(t);
+ const Type *this_t = maybe_remove_speculative(include_speculative);
+ t = t->maybe_remove_speculative(include_speculative);
+
+ const Type *mt = this_t->xmeet(t);
if (isa_narrowoop() || t->isa_narrowoop()) return mt;
if (isa_narrowklass() || t->isa_narrowklass()) return mt;
#ifdef ASSERT
- assert( mt == t->xmeet(this), "meet not commutative" );
+ assert(mt == t->xmeet(this_t), "meet not commutative");
const Type* dual_join = mt->_dual;
const Type *t2t = dual_join->xmeet(t->_dual);
- const Type *t2this = dual_join->xmeet( _dual);
+ const Type *t2this = dual_join->xmeet(this_t->_dual);
// Interface meet Oop is Not Symmetric:
// Interface:AnyNull meet Oop:AnyNull == Interface:AnyNull
// Interface:NotNull meet Oop:NotNull == java/lang/Object:NotNull
- if( !interface_vs_oop(t) && (t2t != t->_dual || t2this != _dual) ) {
+ if( !interface_vs_oop(t) && (t2t != t->_dual || t2this != this_t->_dual) ) {
tty->print_cr("=== Meet Not Symmetric ===");
- tty->print("t = "); t->dump(); tty->cr();
- tty->print("this= "); dump(); tty->cr();
- tty->print("mt=(t meet this)= "); mt->dump(); tty->cr();
+ tty->print("t = "); t->dump(); tty->cr();
+ tty->print("this= "); this_t->dump(); tty->cr();
+ tty->print("mt=(t meet this)= "); mt->dump(); tty->cr();
- tty->print("t_dual= "); t->_dual->dump(); tty->cr();
- tty->print("this_dual= "); _dual->dump(); tty->cr();
- tty->print("mt_dual= "); mt->_dual->dump(); tty->cr();
+ tty->print("t_dual= "); t->_dual->dump(); tty->cr();
+ tty->print("this_dual= "); this_t->_dual->dump(); tty->cr();
+ tty->print("mt_dual= "); mt->_dual->dump(); tty->cr();
- tty->print("mt_dual meet t_dual= "); t2t ->dump(); tty->cr();
- tty->print("mt_dual meet this_dual= "); t2this ->dump(); tty->cr();
+ tty->print("mt_dual meet t_dual= "); t2t ->dump(); tty->cr();
+ tty->print("mt_dual meet this_dual= "); t2this ->dump(); tty->cr();
fatal("meet not symmetric" );
}
@@ -759,8 +769,8 @@ const Type *Type::xmeet( const Type *t ) const {
}
//-----------------------------filter------------------------------------------
-const Type *Type::filter( const Type *kills ) const {
- const Type* ft = join(kills);
+const Type *Type::filter_helper(const Type *kills, bool include_speculative) const {
+ const Type* ft = join_helper(kills, include_speculative);
if (ft->empty())
return Type::TOP; // Canonical empty value
return ft;
@@ -1314,8 +1324,8 @@ const Type *TypeInt::narrow( const Type *old ) const {
}
//-----------------------------filter------------------------------------------
-const Type *TypeInt::filter( const Type *kills ) const {
- const TypeInt* ft = join(kills)->isa_int();
+const Type *TypeInt::filter_helper(const Type *kills, bool include_speculative) const {
+ const TypeInt* ft = join_helper(kills, include_speculative)->isa_int();
if (ft == NULL || ft->empty())
return Type::TOP; // Canonical empty value
if (ft->_widen < this->_widen) {
@@ -1575,8 +1585,8 @@ const Type *TypeLong::narrow( const Type *old ) const {
}
//-----------------------------filter------------------------------------------
-const Type *TypeLong::filter( const Type *kills ) const {
- const TypeLong* ft = join(kills)->isa_long();
+const Type *TypeLong::filter_helper(const Type *kills, bool include_speculative) const {
+ const TypeLong* ft = join_helper(kills, include_speculative)->isa_long();
if (ft == NULL || ft->empty())
return Type::TOP; // Canonical empty value
if (ft->_widen < this->_widen) {
@@ -1731,7 +1741,7 @@ const TypeTuple *TypeTuple::make_domain(ciInstanceKlass* recv, ciSignature* sig)
total_fields++;
field_array = fields(total_fields);
// Use get_const_type here because it respects UseUniqueSubclasses:
- field_array[pos++] = get_const_type(recv)->join(TypePtr::NOTNULL);
+ field_array[pos++] = get_const_type(recv)->join_speculative(TypePtr::NOTNULL);
} else {
field_array = fields(total_fields);
}
@@ -1921,7 +1931,7 @@ const Type *TypeAry::xmeet( const Type *t ) const {
case Array: { // Meeting 2 arrays?
const TypeAry *a = t->is_ary();
- return TypeAry::make(_elem->meet(a->_elem),
+ return TypeAry::make(_elem->meet_speculative(a->_elem),
_size->xmeet(a->_size)->is_int(),
_stable & a->_stable);
}
@@ -1954,6 +1964,13 @@ int TypeAry::hash(void) const {
return (intptr_t)_elem + (intptr_t)_size + (_stable ? 43 : 0);
}
+/**
+ * Return same type without a speculative part in the element
+ */
+const Type* TypeAry::remove_speculative() const {
+ return make(_elem->remove_speculative(), _size, _stable);
+}
+
//----------------------interface_vs_oop---------------------------------------
#ifdef ASSERT
bool TypeAry::interface_vs_oop(const Type *t) const {
@@ -2566,14 +2583,14 @@ const Type *TypeOopPtr::xmeet(const Type *t) const {
return res;
}
- if (res->isa_oopptr() != NULL) {
+ const TypeOopPtr* res_oopptr = res->is_oopptr();
+ if (res_oopptr->speculative() != NULL) {
// type->speculative() == NULL means that speculation is no better
// than type, i.e. type->speculative() == type. So there are 2
// ways to represent the fact that we have no useful speculative
// data and we should use a single one to be able to test for
// equality between types. Check whether type->speculative() ==
// type and set speculative to NULL if it is the case.
- const TypeOopPtr* res_oopptr = res->is_oopptr();
if (res_oopptr->remove_speculative() == res_oopptr->speculative()) {
return res_oopptr->remove_speculative();
}
@@ -2639,7 +2656,7 @@ const Type *TypeOopPtr::xmeet_helper(const Type *t) const {
case OopPtr: { // Meeting to other OopPtrs
const TypeOopPtr *tp = t->is_oopptr();
int instance_id = meet_instance_id(tp->instance_id());
- const TypeOopPtr* speculative = meet_speculative(tp);
+ const TypeOopPtr* speculative = xmeet_speculative(tp);
return make(meet_ptr(tp->ptr()), meet_offset(tp->offset()), instance_id, speculative);
}
@@ -2793,9 +2810,9 @@ intptr_t TypeOopPtr::get_con() const {
//-----------------------------filter------------------------------------------
// Do not allow interface-vs.-noninterface joins to collapse to top.
-const Type *TypeOopPtr::filter(const Type *kills) const {
+const Type *TypeOopPtr::filter_helper(const Type *kills, bool include_speculative) const {
- const Type* ft = join(kills);
+ const Type* ft = join_helper(kills, include_speculative);
const TypeInstPtr* ftip = ft->isa_instptr();
const TypeInstPtr* ktip = kills->isa_instptr();
@@ -2907,7 +2924,10 @@ const TypePtr *TypeOopPtr::add_offset(intptr_t offset) const {
/**
* Return same type without a speculative part
*/
-const TypeOopPtr* TypeOopPtr::remove_speculative() const {
+const Type* TypeOopPtr::remove_speculative() const {
+ if (_speculative == NULL) {
+ return this;
+ }
return make(_ptr, _offset, _instance_id, NULL);
}
@@ -2933,7 +2953,7 @@ int TypeOopPtr::dual_instance_id( ) const {
*
* @param other type to meet with
*/
-const TypeOopPtr* TypeOopPtr::meet_speculative(const TypeOopPtr* other) const {
+const TypeOopPtr* TypeOopPtr::xmeet_speculative(const TypeOopPtr* other) const {
bool this_has_spec = (_speculative != NULL);
bool other_has_spec = (other->speculative() != NULL);
@@ -2958,7 +2978,7 @@ const TypeOopPtr* TypeOopPtr::meet_speculative(const TypeOopPtr* other) const {
other_spec = other;
}
- return this_spec->meet(other_spec)->is_oopptr();
+ return this_spec->meet_speculative(other_spec)->is_oopptr();
}
/**
@@ -3117,7 +3137,7 @@ const TypeInstPtr *TypeInstPtr::xmeet_unloaded(const TypeInstPtr *tinst) const {
int off = meet_offset(tinst->offset());
PTR ptr = meet_ptr(tinst->ptr());
int instance_id = meet_instance_id(tinst->instance_id());
- const TypeOopPtr* speculative = meet_speculative(tinst);
+ const TypeOopPtr* speculative = xmeet_speculative(tinst);
const TypeInstPtr *loaded = is_loaded() ? this : tinst;
const TypeInstPtr *unloaded = is_loaded() ? tinst : this;
@@ -3194,7 +3214,7 @@ const Type *TypeInstPtr::xmeet_helper(const Type *t) const {
int offset = meet_offset(tp->offset());
PTR ptr = meet_ptr(tp->ptr());
int instance_id = meet_instance_id(tp->instance_id());
- const TypeOopPtr* speculative = meet_speculative(tp);
+ const TypeOopPtr* speculative = xmeet_speculative(tp);
switch (ptr) {
case TopPTR:
case AnyNull: // Fall 'down' to dual of object klass
@@ -3244,14 +3264,14 @@ const Type *TypeInstPtr::xmeet_helper(const Type *t) const {
case TopPTR:
case AnyNull: {
int instance_id = meet_instance_id(InstanceTop);
- const TypeOopPtr* speculative = meet_speculative(tp);
+ const TypeOopPtr* speculative = xmeet_speculative(tp);
return make(ptr, klass(), klass_is_exact(),
(ptr == Constant ? const_oop() : NULL), offset, instance_id, speculative);
}
case NotNull:
case BotPTR: {
int instance_id = meet_instance_id(tp->instance_id());
- const TypeOopPtr* speculative = meet_speculative(tp);
+ const TypeOopPtr* speculative = xmeet_speculative(tp);
return TypeOopPtr::make(ptr, offset, instance_id, speculative);
}
default: typerr(t);
@@ -3303,7 +3323,7 @@ const Type *TypeInstPtr::xmeet_helper(const Type *t) const {
int off = meet_offset( tinst->offset() );
PTR ptr = meet_ptr( tinst->ptr() );
int instance_id = meet_instance_id(tinst->instance_id());
- const TypeOopPtr* speculative = meet_speculative(tinst);
+ const TypeOopPtr* speculative = xmeet_speculative(tinst);
// Check for easy case; klasses are equal (and perhaps not loaded!)
// If we have constants, then we created oops so classes are loaded
@@ -3552,7 +3572,10 @@ const TypePtr *TypeInstPtr::add_offset(intptr_t offset) const {
return make(_ptr, klass(), klass_is_exact(), const_oop(), xadd_offset(offset), _instance_id, add_offset_speculative(offset));
}
-const TypeOopPtr *TypeInstPtr::remove_speculative() const {
+const Type *TypeInstPtr::remove_speculative() const {
+ if (_speculative == NULL) {
+ return this;
+ }
return make(_ptr, klass(), klass_is_exact(), const_oop(), _offset, _instance_id, NULL);
}
@@ -3754,14 +3777,14 @@ const Type *TypeAryPtr::xmeet_helper(const Type *t) const {
case TopPTR:
case AnyNull: {
int instance_id = meet_instance_id(InstanceTop);
- const TypeOopPtr* speculative = meet_speculative(tp);
+ const TypeOopPtr* speculative = xmeet_speculative(tp);
return make(ptr, (ptr == Constant ? const_oop() : NULL),
_ary, _klass, _klass_is_exact, offset, instance_id, speculative);
}
case BotPTR:
case NotNull: {
int instance_id = meet_instance_id(tp->instance_id());
- const TypeOopPtr* speculative = meet_speculative(tp);
+ const TypeOopPtr* speculative = xmeet_speculative(tp);
return TypeOopPtr::make(ptr, offset, instance_id, speculative);
}
default: ShouldNotReachHere();
@@ -3799,10 +3822,10 @@ const Type *TypeAryPtr::xmeet_helper(const Type *t) const {
case AryPtr: { // Meeting 2 references?
const TypeAryPtr *tap = t->is_aryptr();
int off = meet_offset(tap->offset());
- const TypeAry *tary = _ary->meet(tap->_ary)->is_ary();
+ const TypeAry *tary = _ary->meet_speculative(tap->_ary)->is_ary();
PTR ptr = meet_ptr(tap->ptr());
int instance_id = meet_instance_id(tap->instance_id());
- const TypeOopPtr* speculative = meet_speculative(tap);
+ const TypeOopPtr* speculative = xmeet_speculative(tap);
ciKlass* lazy_klass = NULL;
if (tary->_elem->isa_int()) {
// Integral array element types have irrelevant lattice relations.
@@ -3882,7 +3905,7 @@ const Type *TypeAryPtr::xmeet_helper(const Type *t) const {
int offset = meet_offset(tp->offset());
PTR ptr = meet_ptr(tp->ptr());
int instance_id = meet_instance_id(tp->instance_id());
- const TypeOopPtr* speculative = meet_speculative(tp);
+ const TypeOopPtr* speculative = xmeet_speculative(tp);
switch (ptr) {
case TopPTR:
case AnyNull: // Fall 'down' to dual of object klass
@@ -3996,8 +4019,8 @@ const TypePtr *TypeAryPtr::add_offset(intptr_t offset) const {
return make(_ptr, _const_oop, _ary, _klass, _klass_is_exact, xadd_offset(offset), _instance_id, add_offset_speculative(offset));
}
-const TypeOopPtr *TypeAryPtr::remove_speculative() const {
- return make(_ptr, _const_oop, _ary, _klass, _klass_is_exact, _offset, _instance_id, NULL);
+const Type *TypeAryPtr::remove_speculative() const {
+ return make(_ptr, _const_oop, _ary->remove_speculative()->is_ary(), _klass, _klass_is_exact, _offset, _instance_id, NULL);
}
//=============================================================================
@@ -4037,9 +4060,9 @@ const Type *TypeNarrowPtr::xdual() const { // Compute dual right now.
}
-const Type *TypeNarrowPtr::filter( const Type *kills ) const {
+const Type *TypeNarrowPtr::filter_helper(const Type *kills, bool include_speculative) const {
if (isa_same_narrowptr(kills)) {
- const Type* ft =_ptrtype->filter(is_same_narrowptr(kills)->_ptrtype);
+ const Type* ft =_ptrtype->filter_helper(is_same_narrowptr(kills)->_ptrtype, include_speculative);
if (ft->empty())
return Type::TOP; // Canonical empty value
if (ft->isa_ptr()) {
@@ -4047,7 +4070,7 @@ const Type *TypeNarrowPtr::filter( const Type *kills ) const {
}
return ft;
} else if (kills->isa_ptr()) {
- const Type* ft = _ptrtype->join(kills);
+ const Type* ft = _ptrtype->join_helper(kills, include_speculative);
if (ft->empty())
return Type::TOP; // Canonical empty value
return ft;
@@ -4177,8 +4200,8 @@ const TypePtr *TypeMetadataPtr::add_offset( intptr_t offset ) const {
//-----------------------------filter------------------------------------------
// Do not allow interface-vs.-noninterface joins to collapse to top.
-const Type *TypeMetadataPtr::filter( const Type *kills ) const {
- const TypeMetadataPtr* ft = join(kills)->isa_metadataptr();
+const Type *TypeMetadataPtr::filter_helper(const Type *kills, bool include_speculative) const {
+ const TypeMetadataPtr* ft = join_helper(kills, include_speculative)->isa_metadataptr();
if (ft == NULL || ft->empty())
return Type::TOP; // Canonical empty value
return ft;
@@ -4380,10 +4403,10 @@ bool TypeKlassPtr::singleton(void) const {
}
// Do not allow interface-vs.-noninterface joins to collapse to top.
-const Type *TypeKlassPtr::filter(const Type *kills) const {
+const Type *TypeKlassPtr::filter_helper(const Type *kills, bool include_speculative) const {
// logic here mirrors the one from TypeOopPtr::filter. See comments
// there.
- const Type* ft = join(kills);
+ const Type* ft = join_helper(kills, include_speculative);
const TypeKlassPtr* ftkp = ft->isa_klassptr();
const TypeKlassPtr* ktkp = kills->isa_klassptr();
diff --git a/src/share/vm/opto/type.hpp b/src/share/vm/opto/type.hpp
index 9aca89886..d02792f7b 100644
--- a/src/share/vm/opto/type.hpp
+++ b/src/share/vm/opto/type.hpp
@@ -164,6 +164,8 @@ private:
virtual bool interface_vs_oop_helper(const Type *t) const;
#endif
+ const Type *meet_helper(const Type *t, bool include_speculative) const;
+
protected:
// Each class of type is also identified by its base.
const TYPES _base; // Enum of Types type
@@ -171,6 +173,10 @@ protected:
Type( TYPES t ) : _dual(NULL), _base(t) {} // Simple types
// ~Type(); // Use fast deallocation
const Type *hashcons(); // Hash-cons the type
+ virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
+ const Type *join_helper(const Type *t, bool include_speculative) const {
+ return dual()->meet_helper(t->dual(), include_speculative)->dual();
+ }
public:
@@ -202,10 +208,24 @@ public:
// Test for equivalence of types
static int cmp( const Type *const t1, const Type *const t2 );
// Test for higher or equal in lattice
- int higher_equal( const Type *t ) const { return !cmp(meet(t),t); }
+ // Variant that drops the speculative part of the types
+ int higher_equal(const Type *t) const {
+ return !cmp(meet(t),t->remove_speculative());
+ }
+ // Variant that keeps the speculative part of the types
+ int higher_equal_speculative(const Type *t) const {
+ return !cmp(meet_speculative(t),t);
+ }
// MEET operation; lower in lattice.
- const Type *meet( const Type *t ) const;
+ // Variant that drops the speculative part of the types
+ const Type *meet(const Type *t) const {
+ return meet_helper(t, false);
+ }
+ // Variant that keeps the speculative part of the types
+ const Type *meet_speculative(const Type *t) const {
+ return meet_helper(t, true);
+ }
// WIDEN: 'widens' for Ints and other range types
virtual const Type *widen( const Type *old, const Type* limit ) const { return this; }
// NARROW: complement for widen, used by pessimistic phases
@@ -221,13 +241,26 @@ public:
// JOIN operation; higher in lattice. Done by finding the dual of the
// meet of the dual of the 2 inputs.
- const Type *join( const Type *t ) const {
- return dual()->meet(t->dual())->dual(); }
+ // Variant that drops the speculative part of the types
+ const Type *join(const Type *t) const {
+ return join_helper(t, false);
+ }
+ // Variant that keeps the speculative part of the types
+ const Type *join_speculative(const Type *t) const {
+ return join_helper(t, true);
+ }
// Modified version of JOIN adapted to the needs Node::Value.
// Normalizes all empty values to TOP. Does not kill _widen bits.
// Currently, it also works around limitations involving interface types.
- virtual const Type *filter( const Type *kills ) const;
+ // Variant that drops the speculative part of the types
+ const Type *filter(const Type *kills) const {
+ return filter_helper(kills, false);
+ }
+ // Variant that keeps the speculative part of the types
+ const Type *filter_speculative(const Type *kills) const {
+ return filter_helper(kills, true);
+ }
#ifdef ASSERT
// One type is interface, the other is oop
@@ -383,6 +416,8 @@ public:
// Speculative type. See TypeInstPtr
virtual ciKlass* speculative_type() const { return NULL; }
+ const Type* maybe_remove_speculative(bool include_speculative) const;
+ virtual const Type* remove_speculative() const { return this; }
private:
// support arrays
@@ -450,12 +485,14 @@ public:
// upper bound, inclusive.
class TypeInt : public Type {
TypeInt( jint lo, jint hi, int w );
+protected:
+ virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
+
public:
virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing
virtual bool singleton(void) const; // TRUE if type is a singleton
virtual bool empty(void) const; // TRUE if type is vacuous
-public:
const jint _lo, _hi; // Lower bound, upper bound
const short _widen; // Limit on times we widen this sucker
@@ -475,7 +512,6 @@ public:
virtual const Type *widen( const Type *t, const Type* limit_type ) const;
virtual const Type *narrow( const Type *t ) const;
// Do not kill _widen bits.
- virtual const Type *filter( const Type *kills ) const;
// Convenience common pre-built types.
static const TypeInt *MINUS_1;
static const TypeInt *ZERO;
@@ -506,6 +542,9 @@ public:
// an upper bound, inclusive.
class TypeLong : public Type {
TypeLong( jlong lo, jlong hi, int w );
+protected:
+ // Do not kill _widen bits.
+ virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
public:
virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing
@@ -533,8 +572,6 @@ public:
virtual const Type *xdual() const; // Compute dual right now.
virtual const Type *widen( const Type *t, const Type* limit_type ) const;
virtual const Type *narrow( const Type *t ) const;
- // Do not kill _widen bits.
- virtual const Type *filter( const Type *kills ) const;
// Convenience common pre-built types.
static const TypeLong *MINUS_1;
static const TypeLong *ZERO;
@@ -625,6 +662,7 @@ public:
virtual const Type *xmeet( const Type *t ) const;
virtual const Type *xdual() const; // Compute dual right now.
bool ary_must_be_exact() const; // true if arrays of such are never generic
+ virtual const Type* remove_speculative() const;
#ifdef ASSERT
// One type is interface, the other is oop
virtual bool interface_vs_oop(const Type *t) const;
@@ -835,7 +873,7 @@ protected:
// utility methods to work on the speculative part of the type
const TypeOopPtr* dual_speculative() const;
- const TypeOopPtr* meet_speculative(const TypeOopPtr* other) const;
+ const TypeOopPtr* xmeet_speculative(const TypeOopPtr* other) const;
bool eq_speculative(const TypeOopPtr* other) const;
int hash_speculative() const;
const TypeOopPtr* add_offset_speculative(intptr_t offset) const;
@@ -843,6 +881,9 @@ protected:
void dump_speculative(outputStream *st) const;
#endif
+ // Do not allow interface-vs.-noninterface joins to collapse to top.
+ virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
+
public:
// Creates a type given a klass. Correctly handles multi-dimensional arrays
// Respects UseUniqueSubclasses.
@@ -898,16 +939,13 @@ public:
virtual const TypePtr *add_offset( intptr_t offset ) const;
// Return same type without a speculative part
- virtual const TypeOopPtr* remove_speculative() const;
+ virtual const Type* remove_speculative() const;
virtual const Type *xmeet(const Type *t) const;
virtual const Type *xdual() const; // Compute dual right now.
// the core of the computation of the meet for TypeOopPtr and for its subclasses
virtual const Type *xmeet_helper(const Type *t) const;
- // Do not allow interface-vs.-noninterface joins to collapse to top.
- virtual const Type *filter( const Type *kills ) const;
-
// Convenience common pre-built type.
static const TypeOopPtr *BOTTOM;
#ifndef PRODUCT
@@ -984,7 +1022,7 @@ class TypeInstPtr : public TypeOopPtr {
virtual const TypePtr *add_offset( intptr_t offset ) const;
// Return same type without a speculative part
- virtual const TypeOopPtr* remove_speculative() const;
+ virtual const Type* remove_speculative() const;
// the core of the computation of the meet of 2 types
virtual const Type *xmeet_helper(const Type *t) const;
@@ -1062,7 +1100,7 @@ public:
virtual bool empty(void) const; // TRUE if type is vacuous
virtual const TypePtr *add_offset( intptr_t offset ) const;
// Return same type without a speculative part
- virtual const TypeOopPtr* remove_speculative() const;
+ virtual const Type* remove_speculative() const;
// the core of the computation of the meet of 2 types
virtual const Type *xmeet_helper(const Type *t) const;
@@ -1103,6 +1141,8 @@ public:
class TypeMetadataPtr : public TypePtr {
protected:
TypeMetadataPtr(PTR ptr, ciMetadata* metadata, int offset);
+ // Do not allow interface-vs.-noninterface joins to collapse to top.
+ virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
public:
virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing
@@ -1128,9 +1168,6 @@ public:
virtual intptr_t get_con() const;
- // Do not allow interface-vs.-noninterface joins to collapse to top.
- virtual const Type *filter( const Type *kills ) const;
-
// Convenience common pre-built types.
static const TypeMetadataPtr *BOTTOM;
@@ -1144,6 +1181,8 @@ public:
class TypeKlassPtr : public TypePtr {
TypeKlassPtr( PTR ptr, ciKlass* klass, int offset );
+protected:
+ virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
public:
virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing
@@ -1205,9 +1244,6 @@ public:
virtual intptr_t get_con() const;
- // Do not allow interface-vs.-noninterface joins to collapse to top.
- virtual const Type *filter( const Type *kills ) const;
-
// Convenience common pre-built types.
static const TypeKlassPtr* OBJECT; // Not-null object klass or below
static const TypeKlassPtr* OBJECT_OR_NULL; // Maybe-null version of same
@@ -1231,6 +1267,8 @@ protected:
virtual const TypeNarrowPtr *is_same_narrowptr(const Type *t) const = 0;
virtual const TypeNarrowPtr *make_same_narrowptr(const TypePtr *t) const = 0;
virtual const TypeNarrowPtr *make_hash_same_narrowptr(const TypePtr *t) const = 0;
+ // Do not allow interface-vs.-noninterface joins to collapse to top.
+ virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
public:
virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing
@@ -1241,9 +1279,6 @@ public:
virtual intptr_t get_con() const;
- // Do not allow interface-vs.-noninterface joins to collapse to top.
- virtual const Type *filter( const Type *kills ) const;
-
virtual bool empty(void) const; // TRUE if type is vacuous
// returns the equivalent ptr type for this compressed pointer
@@ -1294,6 +1329,10 @@ public:
static const TypeNarrowOop *BOTTOM;
static const TypeNarrowOop *NULL_PTR;
+ virtual const Type* remove_speculative() const {
+ return make(_ptrtype->remove_speculative()->is_ptr());
+ }
+
#ifndef PRODUCT
virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
#endif
diff --git a/src/share/vm/prims/jvmtiCodeBlobEvents.cpp b/src/share/vm/prims/jvmtiCodeBlobEvents.cpp
index c46afdc10..18d8786b8 100644
--- a/src/share/vm/prims/jvmtiCodeBlobEvents.cpp
+++ b/src/share/vm/prims/jvmtiCodeBlobEvents.cpp
@@ -26,6 +26,7 @@
#include "code/codeBlob.hpp"
#include "code/codeCache.hpp"
#include "code/scopeDesc.hpp"
+#include "code/vtableStubs.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiCodeBlobEvents.hpp"
@@ -63,6 +64,7 @@ class CodeBlobCollector : StackObj {
// used during a collection
static GrowableArray<JvmtiCodeBlobDesc*>* _global_code_blobs;
static void do_blob(CodeBlob* cb);
+ static void do_vtable_stub(VtableStub* vs);
public:
CodeBlobCollector() {
_code_blobs = NULL;
@@ -119,6 +121,10 @@ void CodeBlobCollector::do_blob(CodeBlob* cb) {
if (cb->is_nmethod()) {
return;
}
+ // exclude VtableStubs, which are processed separately
+ if (cb->is_buffer_blob() && strcmp(cb->name(), "vtable chunks") == 0) {
+ return;
+ }
// check if this starting address has been seen already - the
// assumption is that stubs are inserted into the list before the
@@ -136,6 +142,13 @@ void CodeBlobCollector::do_blob(CodeBlob* cb) {
_global_code_blobs->append(scb);
}
+// called for each VtableStub in VtableStubs
+
+void CodeBlobCollector::do_vtable_stub(VtableStub* vs) {
+ JvmtiCodeBlobDesc* scb = new JvmtiCodeBlobDesc(vs->is_vtable_stub() ? "vtable stub" : "itable stub",
+ vs->code_begin(), vs->code_end());
+ _global_code_blobs->append(scb);
+}
// collects a list of CodeBlobs in the CodeCache.
//
@@ -166,6 +179,10 @@ void CodeBlobCollector::collect() {
_global_code_blobs->append(new JvmtiCodeBlobDesc(desc->name(), desc->begin(), desc->end()));
}
+ // Vtable stubs are not described with StubCodeDesc,
+ // process them separately
+ VtableStubs::vtable_stub_do(do_vtable_stub);
+
// next iterate over all the non-nmethod code blobs and add them to
// the list - as noted above this will filter out duplicates and
// enclosing blobs.
diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp
index 60a7f904d..0dcb9e756 100644
--- a/src/share/vm/runtime/arguments.cpp
+++ b/src/share/vm/runtime/arguments.cpp
@@ -293,6 +293,7 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
{ "UsePermISM", JDK_Version::jdk(8), JDK_Version::jdk(9) },
{ "UseMPSS", JDK_Version::jdk(8), JDK_Version::jdk(9) },
{ "UseStringCache", JDK_Version::jdk(8), JDK_Version::jdk(9) },
+ { "UseOldInlining", JDK_Version::jdk(9), JDK_Version::jdk(10) },
#ifdef PRODUCT
{ "DesiredMethodLimit",
JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
@@ -881,7 +882,7 @@ bool Arguments::process_argument(const char* arg,
arg_len = equal_sign - argname;
}
- Flag* found_flag = Flag::find_flag((const char*)argname, arg_len, true);
+ Flag* found_flag = Flag::find_flag((const char*)argname, arg_len, true, true);
if (found_flag != NULL) {
char locked_message_buf[BUFLEN];
found_flag->get_locked_message(locked_message_buf, BUFLEN);
diff --git a/src/share/vm/runtime/globals.cpp b/src/share/vm/runtime/globals.cpp
index de6fbf708..fbfc08ed0 100644
--- a/src/share/vm/runtime/globals.cpp
+++ b/src/share/vm/runtime/globals.cpp
@@ -62,6 +62,14 @@ ARCH_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, \
MATERIALIZE_FLAGS_EXT
+static bool is_product_build() {
+#ifdef PRODUCT
+ return true;
+#else
+ return false;
+#endif
+}
+
void Flag::check_writable() {
if (is_constant_in_binary()) {
fatal(err_msg("flag is constant: %s", _name));
@@ -235,6 +243,27 @@ bool Flag::is_unlocked() const {
// Get custom message for this locked flag, or return NULL if
// none is available.
void Flag::get_locked_message(char* buf, int buflen) const {
+ buf[0] = '\0';
+ if (is_diagnostic() && !is_unlocked()) {
+ jio_snprintf(buf, buflen, "Error: VM option '%s' is diagnostic and must be enabled via -XX:+UnlockDiagnosticVMOptions.\n",
+ _name);
+ return;
+ }
+ if (is_experimental() && !is_unlocked()) {
+ jio_snprintf(buf, buflen, "Error: VM option '%s' is experimental and must be enabled via -XX:+UnlockExperimentalVMOptions.\n",
+ _name);
+ return;
+ }
+ if (is_develop() && is_product_build()) {
+ jio_snprintf(buf, buflen, "Error: VM option '%s' is develop and is available only in debug version of VM.\n",
+ _name);
+ return;
+ }
+ if (is_notproduct() && is_product_build()) {
+ jio_snprintf(buf, buflen, "Error: VM option '%s' is notproduct and is available only in debug version of VM.\n",
+ _name);
+ return;
+ }
get_locked_message_ext(buf, buflen);
}
@@ -464,13 +493,13 @@ inline bool str_equal(const char* s, const char* q, size_t len) {
}
// Search the flag table for a named flag
-Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked) {
+Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked, bool return_flag) {
for (Flag* current = &flagTable[0]; current->_name != NULL; current++) {
if (str_equal(current->_name, name, length)) {
// Found a matching entry.
// Don't report notproduct and develop flags in product builds.
if (current->is_constant_in_binary()) {
- return NULL;
+ return (return_flag == true ? current : NULL);
}
// Report locked flags only if allowed.
if (!(current->is_unlocked() || current->is_unlocker())) {
diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp
index b828de5ab..6b6fe0251 100644
--- a/src/share/vm/runtime/globals.hpp
+++ b/src/share/vm/runtime/globals.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -255,7 +255,7 @@ struct Flag {
// number of flags
static size_t numFlags;
- static Flag* find_flag(const char* name, size_t length, bool allow_locked = false);
+ static Flag* find_flag(const char* name, size_t length, bool allow_locked = false, bool return_flag = false);
static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false);
void check_writable();
@@ -1272,6 +1272,9 @@ class CommandLineFlags {
develop(bool, TraceJNICalls, false, \
"Trace JNI calls") \
\
+ develop(bool, StressRewriter, false, \
+ "Stress linktime bytecode rewriting") \
+ \
notproduct(bool, TraceJVMCalls, false, \
"Trace JVM calls") \
\
diff --git a/src/share/vm/runtime/os.cpp b/src/share/vm/runtime/os.cpp
index 5e7ec4c9c..04598e985 100644
--- a/src/share/vm/runtime/os.cpp
+++ b/src/share/vm/runtime/os.cpp
@@ -1081,7 +1081,6 @@ void os::print_location(outputStream* st, intptr_t x, bool verbose) {
}
-#ifndef PRODUCT
// Check if in metaspace.
if (ClassLoaderDataGraph::contains((address)addr)) {
// Use addr->print() from the debugger instead (not here)
@@ -1089,7 +1088,6 @@ void os::print_location(outputStream* st, intptr_t x, bool verbose) {
" is pointing into metadata", addr);
return;
}
-#endif
// Try an OS specific find
if (os::find(addr, st)) {
diff --git a/src/share/vm/utilities/array.hpp b/src/share/vm/utilities/array.hpp
index 9f8e45f32..0fbcd94d2 100644
--- a/src/share/vm/utilities/array.hpp
+++ b/src/share/vm/utilities/array.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -58,7 +58,7 @@ class ResourceArray: public ResourceObj {
void initialize(size_t esize, int length) {
assert(length >= 0, "illegal length");
- assert(_data == NULL, "must be new object");
+ assert(StressRewriter || _data == NULL, "must be new object");
_length = length;
_data = resource_allocate_bytes(esize * length);
DEBUG_ONLY(init_nesting();)
diff --git a/src/share/vm/utilities/bitMap.cpp b/src/share/vm/utilities/bitMap.cpp
index a0e5d6c89..b2a2ab7b2 100644
--- a/src/share/vm/utilities/bitMap.cpp
+++ b/src/share/vm/utilities/bitMap.cpp
@@ -110,7 +110,7 @@ void BitMap::par_put_range_within_word(idx_t beg, idx_t end, bool value) {
while (true) {
intptr_t res = Atomic::cmpxchg_ptr(nw, pw, w);
if (res == w) break;
- w = *pw;
+ w = res;
nw = value ? (w | ~mr) : (w & mr);
}
}