aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoraph <none@none>2014-08-04 11:29:21 -0400
committeraph <none@none>2014-08-04 11:29:21 -0400
commit8ba50ee874a7d32229b6e27f52cdaeb337ec0b28 (patch)
tree09d84a8e649f925322664e2159f1617d0838e20b
parent78130099d98f1751d40121e825b895ac7443e610 (diff)
AArch64: try to align metaspace on a 4G boundary.
-rw-r--r--src/cpu/aarch64/vm/macroAssembler_aarch64.cpp19
-rw-r--r--src/share/vm/memory/metaspace.cpp40
2 files changed, 59 insertions, 0 deletions
diff --git a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
index 5de19bb98..1c0432232 100644
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
@@ -2496,6 +2496,11 @@ void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp)
if (Universe::narrow_klass_base() == NULL) {
cmp(trial_klass, tmp, LSL, Universe::narrow_klass_shift());
return;
+ } else if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0
+ && Universe::narrow_klass_shift() == 0) {
+ // Only the bottom 32 bits matter
+ cmpw(trial_klass, tmp);
+ return;
}
decode_klass_not_null(tmp);
} else {
@@ -2680,6 +2685,12 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
return;
}
+ if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0
+ && Universe::narrow_klass_shift() == 0) {
+ movw(dst, src);
+ return;
+ }
+
#ifdef ASSERT
verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?");
#endif
@@ -2723,6 +2734,14 @@ void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
return;
}
+ if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0
+ && Universe::narrow_klass_shift() == 0) {
+ if (dst != src)
+ movw(dst, src);
+ movk(dst, (uint64_t)Universe::narrow_klass_base() >> 32, 32);
+ return;
+ }
+
// Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop.
diff --git a/src/share/vm/memory/metaspace.cpp b/src/share/vm/memory/metaspace.cpp
index 169557399..e28ee53a9 100644
--- a/src/share/vm/memory/metaspace.cpp
+++ b/src/share/vm/memory/metaspace.cpp
@@ -2975,10 +2975,50 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a
// Don't use large pages for the class space.
bool large_pages = false;
+#ifndef AARCH64
ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
_reserve_alignment,
large_pages,
requested_addr, 0);
+#else // AARCH64
+ ReservedSpace metaspace_rs;
+
+ // Our compressed klass pointers may fit nicely into the lower 32
+ // bits.
+ if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G)
+ metaspace_rs = ReservedSpace(compressed_class_space_size(),
+ _reserve_alignment,
+ large_pages,
+ requested_addr, 0);
+
+ if (! metaspace_rs.is_reserved()) {
+ // Try to align metaspace so that we can decode a compressed klass
+ // with a single MOVK instruction. We can do this iff the
+ // compressed class base is a multiple of 4G.
+ for (char *a = (char*)align_ptr_up(requested_addr, 4*G);
+ a < (char*)(1024*G);
+ a += 4*G) {
+ if (UseSharedSpaces
+ && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
+ // We failed to find an aligned base that will reach. Fall
+ // back to using our requested addr.
+ metaspace_rs = ReservedSpace(compressed_class_space_size(),
+ _reserve_alignment,
+ large_pages,
+ requested_addr, 0);
+ break;
+ }
+ metaspace_rs = ReservedSpace(compressed_class_space_size(),
+ _reserve_alignment,
+ large_pages,
+ a, 0);
+ if (metaspace_rs.is_reserved())
+ break;
+ }
+ }
+
+#endif // AARCH64
+
if (!metaspace_rs.is_reserved()) {
if (UseSharedSpaces) {
size_t increment = align_size_up(1*G, _reserve_alignment);