File aarch64-jdk8u40-b09_b10.patch of Package java-1_8_0-openjdk
--- hotspot/make/bsd/makefiles/mapfile-vers-debug 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/make/bsd/makefiles/mapfile-vers-debug 2014-10-16 18:26:51.011957985 +0200
@@ -82,6 +82,7 @@
_JVM_EnableCompiler
_JVM_Exit
_JVM_FillInStackTrace
+ _JVM_FindClassFromCaller
_JVM_FindClassFromClass
_JVM_FindClassFromClassLoader
_JVM_FindClassFromBootLoader
--- hotspot/make/bsd/makefiles/mapfile-vers-product 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/make/bsd/makefiles/mapfile-vers-product 2014-10-16 18:26:51.011957985 +0200
@@ -82,6 +82,7 @@
_JVM_EnableCompiler
_JVM_Exit
_JVM_FillInStackTrace
+ _JVM_FindClassFromCaller
_JVM_FindClassFromClass
_JVM_FindClassFromClassLoader
_JVM_FindClassFromBootLoader
--- hotspot/make/linux/makefiles/mapfile-vers-debug 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/make/linux/makefiles/mapfile-vers-debug 2014-10-16 18:26:51.011957985 +0200
@@ -84,6 +84,7 @@
JVM_EnableCompiler;
JVM_Exit;
JVM_FillInStackTrace;
+ JVM_FindClassFromCaller;
JVM_FindClassFromClass;
JVM_FindClassFromClassLoader;
JVM_FindClassFromBootLoader;
--- hotspot/make/linux/makefiles/mapfile-vers-product 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/make/linux/makefiles/mapfile-vers-product 2014-10-16 18:26:51.011957985 +0200
@@ -84,6 +84,7 @@
JVM_EnableCompiler;
JVM_Exit;
JVM_FillInStackTrace;
+ JVM_FindClassFromCaller;
JVM_FindClassFromClass;
JVM_FindClassFromClassLoader;
JVM_FindClassFromBootLoader;
--- hotspot/make/solaris/makefiles/mapfile-vers 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/make/solaris/makefiles/mapfile-vers 2014-10-16 18:26:51.012957962 +0200
@@ -84,6 +84,7 @@
JVM_EnableCompiler;
JVM_Exit;
JVM_FillInStackTrace;
+ JVM_FindClassFromCaller;
JVM_FindClassFromClass;
JVM_FindClassFromClassLoader;
JVM_FindClassFromBootLoader;
--- hotspot/src/share/vm/c1/c1_LIRGenerator.cpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/c1/c1_LIRGenerator.cpp 2014-10-16 18:26:51.013957939 +0200
@@ -2082,14 +2082,14 @@
LIR_Opr base_op = base.result();
LIR_Opr index_op = idx.result();
#ifndef _LP64
- if (x->base()->type()->tag() == longTag) {
+ if (base_op->type() == T_LONG) {
base_op = new_register(T_INT);
__ convert(Bytecodes::_l2i, base.result(), base_op);
}
if (x->has_index()) {
- if (x->index()->type()->tag() == longTag) {
+ if (index_op->type() == T_LONG) {
LIR_Opr long_index_op = index_op;
- if (x->index()->type()->is_constant()) {
+ if (index_op->is_constant()) {
long_index_op = new_register(T_LONG);
__ move(index_op, long_index_op);
}
@@ -2104,14 +2104,14 @@
assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
#else
if (x->has_index()) {
- if (x->index()->type()->tag() == intTag) {
- if (!x->index()->type()->is_constant()) {
+ if (index_op->type() == T_INT) {
+ if (!index_op->is_constant()) {
index_op = new_register(T_LONG);
__ convert(Bytecodes::_i2l, idx.result(), index_op);
}
} else {
- assert(x->index()->type()->tag() == longTag, "must be");
- if (x->index()->type()->is_constant()) {
+ assert(index_op->type() == T_LONG, "must be");
+ if (index_op->is_constant()) {
index_op = new_register(T_LONG);
__ move(idx.result(), index_op);
}
@@ -2192,12 +2192,12 @@
LIR_Opr index_op = idx.result();
#ifndef _LP64
- if (x->base()->type()->tag() == longTag) {
+ if (base_op->type() == T_LONG) {
base_op = new_register(T_INT);
__ convert(Bytecodes::_l2i, base.result(), base_op);
}
if (x->has_index()) {
- if (x->index()->type()->tag() == longTag) {
+ if (index_op->type() == T_LONG) {
index_op = new_register(T_INT);
__ convert(Bytecodes::_l2i, idx.result(), index_op);
}
@@ -2207,7 +2207,7 @@
assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
#else
if (x->has_index()) {
- if (x->index()->type()->tag() == intTag) {
+ if (index_op->type() == T_INT) {
index_op = new_register(T_LONG);
__ convert(Bytecodes::_i2l, idx.result(), index_op);
}
--- hotspot/src/share/vm/classfile/classFileParser.cpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/classfile/classFileParser.cpp 2014-10-16 18:26:51.015957893 +0200
@@ -2830,6 +2830,11 @@
"bootstrap_method_index %u has bad constant type in class file %s",
bootstrap_method_index,
CHECK);
+
+ guarantee_property((operand_fill_index + 1 + argument_count) < operands->length(),
+ "Invalid BootstrapMethods num_bootstrap_methods or num_bootstrap_arguments value in class file %s",
+ CHECK);
+
operands->at_put(operand_fill_index++, bootstrap_method_index);
operands->at_put(operand_fill_index++, argument_count);
@@ -2847,7 +2852,6 @@
}
assert(operand_fill_index == operands->length(), "exact fill");
- assert(ConstantPool::operand_array_length(operands) == attribute_array_length, "correct decode");
u1* current_end = cfs->current();
guarantee_property(current_end == current_start + attribute_byte_length,
--- hotspot/src/share/vm/classfile/classLoader.cpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/classfile/classLoader.cpp 2014-10-16 18:26:51.016957871 +0200
@@ -90,6 +90,7 @@
typedef jboolean (JNICALL *ReadEntry_t)(jzfile *zip, jzentry *entry, unsigned char *buf, char *namebuf);
typedef jboolean (JNICALL *ReadMappedEntry_t)(jzfile *zip, jzentry *entry, unsigned char **buf, char *namebuf);
typedef jzentry* (JNICALL *GetNextEntry_t)(jzfile *zip, jint n);
+typedef jint (JNICALL *Crc32_t)(jint crc, const jbyte *buf, jint len);
static ZipOpen_t ZipOpen = NULL;
static ZipClose_t ZipClose = NULL;
@@ -98,6 +99,7 @@
static ReadMappedEntry_t ReadMappedEntry = NULL;
static GetNextEntry_t GetNextEntry = NULL;
static canonicalize_fn_t CanonicalizeEntry = NULL;
+static Crc32_t Crc32 = NULL;
// Globals
@@ -810,9 +812,11 @@
ReadEntry = CAST_TO_FN_PTR(ReadEntry_t, os::dll_lookup(handle, "ZIP_ReadEntry"));
ReadMappedEntry = CAST_TO_FN_PTR(ReadMappedEntry_t, os::dll_lookup(handle, "ZIP_ReadMappedEntry"));
GetNextEntry = CAST_TO_FN_PTR(GetNextEntry_t, os::dll_lookup(handle, "ZIP_GetNextEntry"));
+ Crc32 = CAST_TO_FN_PTR(Crc32_t, os::dll_lookup(handle, "ZIP_CRC32"));
// ZIP_Close is not exported on Windows in JDK5.0 so don't abort if ZIP_Close is NULL
- if (ZipOpen == NULL || FindEntry == NULL || ReadEntry == NULL || GetNextEntry == NULL) {
+ if (ZipOpen == NULL || FindEntry == NULL || ReadEntry == NULL ||
+ GetNextEntry == NULL || Crc32 == NULL) {
vm_exit_during_initialization("Corrupted ZIP library", path);
}
@@ -822,6 +826,11 @@
// This lookup only works on 1.3. Do not check for non-null here
}
+int ClassLoader::crc32(int crc, const char* buf, int len) {
+ assert(Crc32 != NULL, "ZIP_CRC32 is not found");
+ return (*Crc32)(crc, (const jbyte*)buf, len);
+}
+
// PackageInfo data exists in order to support the java.lang.Package
// class. A Package object provides information about a java package
// (version, vendor, etc.) which originates in the manifest of the jar
--- hotspot/src/share/vm/classfile/classLoader.hpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/classfile/classLoader.hpp 2014-10-16 18:26:51.016957871 +0200
@@ -228,6 +228,7 @@
// to avoid confusing the zip library
static bool get_canonical_path(const char* orig, char* out, int len);
public:
+ static int crc32(int crc, const char* buf, int len);
static bool update_class_path_entry_list(const char *path,
bool check_for_duplicates,
bool throw_exception=true);
--- hotspot/src/share/vm/classfile/stackMapFrame.cpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/classfile/stackMapFrame.cpp 2014-10-16 18:26:51.016957871 +0200
@@ -148,7 +148,7 @@
VerificationType* from, VerificationType* to, int32_t len, TRAPS) const {
int32_t i = 0;
for (i = 0; i < len; i++) {
- if (!to[i].is_assignable_from(from[i], verifier(), THREAD)) {
+ if (!to[i].is_assignable_from(from[i], verifier(), false, THREAD)) {
break;
}
}
@@ -245,7 +245,7 @@
}
VerificationType top = _stack[--_stack_size];
bool subtype = type.is_assignable_from(
- top, verifier(), CHECK_(VerificationType::bogus_type()));
+ top, verifier(), false, CHECK_(VerificationType::bogus_type()));
if (!subtype) {
verifier()->verify_error(
ErrorContext::bad_type(_offset, stack_top_ctx(),
@@ -265,7 +265,7 @@
return VerificationType::bogus_type();
}
bool subtype = type.is_assignable_from(_locals[index],
- verifier(), CHECK_(VerificationType::bogus_type()));
+ verifier(), false, CHECK_(VerificationType::bogus_type()));
if (!subtype) {
verifier()->verify_error(
ErrorContext::bad_type(_offset,
@@ -288,14 +288,14 @@
"get long/double overflows locals");
return;
}
- bool subtype = type1.is_assignable_from(_locals[index], verifier(), CHECK);
+ bool subtype = type1.is_assignable_from(_locals[index], verifier(), false, CHECK);
if (!subtype) {
verifier()->verify_error(
ErrorContext::bad_type(_offset,
TypeOrigin::local(index, this), TypeOrigin::implicit(type1)),
"Bad local variable type");
} else {
- subtype = type2.is_assignable_from(_locals[index + 1], verifier(), CHECK);
+ subtype = type2.is_assignable_from(_locals[index + 1], verifier(), false, CHECK);
if (!subtype) {
/* Unreachable? All local store routines convert a split long or double
* into a TOP during the store. So we should never end up seeing an
--- hotspot/src/share/vm/classfile/stackMapFrame.hpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/classfile/stackMapFrame.hpp 2014-10-16 18:26:51.017957848 +0200
@@ -234,7 +234,7 @@
if (_stack_size != 0) {
VerificationType top = _stack[_stack_size - 1];
bool subtype = type.is_assignable_from(
- top, verifier(), CHECK_(VerificationType::bogus_type()));
+ top, verifier(), false, CHECK_(VerificationType::bogus_type()));
if (subtype) {
--_stack_size;
return top;
@@ -249,9 +249,9 @@
assert(type2.is_long() || type2.is_double(), "must be long/double_2");
if (_stack_size >= 2) {
VerificationType top1 = _stack[_stack_size - 1];
- bool subtype1 = type1.is_assignable_from(top1, verifier(), CHECK);
+ bool subtype1 = type1.is_assignable_from(top1, verifier(), false, CHECK);
VerificationType top2 = _stack[_stack_size - 2];
- bool subtype2 = type2.is_assignable_from(top2, verifier(), CHECK);
+ bool subtype2 = type2.is_assignable_from(top2, verifier(), false, CHECK);
if (subtype1 && subtype2) {
_stack_size -= 2;
return;
--- hotspot/src/share/vm/classfile/verificationType.cpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/classfile/verificationType.cpp 2014-10-16 18:26:51.017957848 +0200
@@ -42,7 +42,8 @@
}
bool VerificationType::is_reference_assignable_from(
- const VerificationType& from, ClassVerifier* context, TRAPS) const {
+ const VerificationType& from, ClassVerifier* context,
+ bool from_field_is_protected, TRAPS) const {
instanceKlassHandle klass = context->current_class();
if (from.is_null()) {
// null is assignable to any reference
@@ -62,9 +63,11 @@
Handle(THREAD, klass->protection_domain()), true, CHECK_false);
KlassHandle this_class(THREAD, obj);
- if (this_class->is_interface()) {
- // We treat interfaces as java.lang.Object, including
- // java.lang.Cloneable and java.io.Serializable
+ if (this_class->is_interface() && (!from_field_is_protected ||
+ from.name() != vmSymbols::java_lang_Object())) {
+ // If we are not trying to access a protected field or method in
+ // java.lang.Object then we treat interfaces as java.lang.Object,
+ // including java.lang.Cloneable and java.io.Serializable.
return true;
} else if (from.is_object()) {
Klass* from_class = SystemDictionary::resolve_or_fail(
@@ -76,7 +79,8 @@
VerificationType comp_this = get_component(context, CHECK_false);
VerificationType comp_from = from.get_component(context, CHECK_false);
if (!comp_this.is_bogus() && !comp_from.is_bogus()) {
- return comp_this.is_assignable_from(comp_from, context, CHECK_false);
+ return comp_this.is_assignable_from(comp_from, context,
+ from_field_is_protected, CHECK_false);
}
}
return false;
--- hotspot/src/share/vm/classfile/verificationType.hpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/classfile/verificationType.hpp 2014-10-16 18:26:51.017957848 +0200
@@ -265,7 +265,8 @@
// is assignable to another. Returns true if one can assign 'from' to
// this.
bool is_assignable_from(
- const VerificationType& from, ClassVerifier* context, TRAPS) const {
+ const VerificationType& from, ClassVerifier* context,
+ bool from_field_is_protected, TRAPS) const {
if (equals(from) || is_bogus()) {
return true;
} else {
@@ -286,7 +287,9 @@
return from.is_integer();
default:
if (is_reference() && from.is_reference()) {
- return is_reference_assignable_from(from, context, CHECK_false);
+ return is_reference_assignable_from(from, context,
+ from_field_is_protected,
+ CHECK_false);
} else {
return false;
}
@@ -308,7 +311,8 @@
private:
bool is_reference_assignable_from(
- const VerificationType&, ClassVerifier*, TRAPS) const;
+ const VerificationType&, ClassVerifier*, bool from_field_is_protected,
+ TRAPS) const;
};
#endif // SHARE_VM_CLASSFILE_VERIFICATIONTYPE_HPP
--- hotspot/src/share/vm/classfile/verifier.cpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/classfile/verifier.cpp 2014-10-16 18:26:51.018957825 +0200
@@ -1737,7 +1737,7 @@
VerificationType throwable =
VerificationType::reference_type(vmSymbols::java_lang_Throwable());
bool is_subclass = throwable.is_assignable_from(
- catch_type, this, CHECK_VERIFY(this));
+ catch_type, this, false, CHECK_VERIFY(this));
if (!is_subclass) {
// 4286534: should throw VerifyError according to recent spec change
verify_error(ErrorContext::bad_type(handler_pc,
@@ -2192,7 +2192,7 @@
stack_object_type = current_type();
}
is_assignable = target_class_type.is_assignable_from(
- stack_object_type, this, CHECK_VERIFY(this));
+ stack_object_type, this, false, CHECK_VERIFY(this));
if (!is_assignable) {
verify_error(ErrorContext::bad_type(bci,
current_frame->stack_top_ctx(),
@@ -2219,7 +2219,7 @@
// It's protected access, check if stack object is assignable to
// current class.
is_assignable = current_type().is_assignable_from(
- stack_object_type, this, CHECK_VERIFY(this));
+ stack_object_type, this, true, CHECK_VERIFY(this));
if (!is_assignable) {
verify_error(ErrorContext::bad_type(bci,
current_frame->stack_top_ctx(),
@@ -2492,7 +2492,7 @@
instanceKlassHandle mh(THREAD, m->method_holder());
if (m->is_protected() && !mh->is_same_class_package(_klass())) {
bool assignable = current_type().is_assignable_from(
- objectref_type, this, CHECK_VERIFY(this));
+ objectref_type, this, true, CHECK_VERIFY(this));
if (!assignable) {
verify_error(ErrorContext::bad_type(bci,
TypeOrigin::cp(new_class_index, objectref_type),
@@ -2667,11 +2667,11 @@
bool have_imr_indirect = cp->tag_at(index).value() == JVM_CONSTANT_InterfaceMethodref;
if (!current_class()->is_anonymous()) {
subtype = ref_class_type.is_assignable_from(
- current_type(), this, CHECK_VERIFY(this));
+ current_type(), this, false, CHECK_VERIFY(this));
} else {
VerificationType host_klass_type =
VerificationType::reference_type(current_class()->host_klass()->name());
- subtype = ref_class_type.is_assignable_from(host_klass_type, this, CHECK_VERIFY(this));
+ subtype = ref_class_type.is_assignable_from(host_klass_type, this, false, CHECK_VERIFY(this));
// If invokespecial of IMR, need to recheck for same or
// direct interface relative to the host class
@@ -2715,7 +2715,7 @@
VerificationType top = current_frame->pop_stack(CHECK_VERIFY(this));
VerificationType hosttype =
VerificationType::reference_type(current_class()->host_klass()->name());
- bool subtype = hosttype.is_assignable_from(top, this, CHECK_VERIFY(this));
+ bool subtype = hosttype.is_assignable_from(top, this, false, CHECK_VERIFY(this));
if (!subtype) {
verify_error( ErrorContext::bad_type(current_frame->offset(),
current_frame->stack_top_ctx(),
@@ -2740,7 +2740,7 @@
// It's protected access, check if stack object is
// assignable to current class.
bool is_assignable = current_type().is_assignable_from(
- stack_object_type, this, CHECK_VERIFY(this));
+ stack_object_type, this, true, CHECK_VERIFY(this));
if (!is_assignable) {
if (ref_class_type.name() == vmSymbols::java_lang_Object()
&& stack_object_type.is_array()
@@ -2923,7 +2923,7 @@
"Method expects a return value");
return;
}
- bool match = return_type.is_assignable_from(type, this, CHECK_VERIFY(this));
+ bool match = return_type.is_assignable_from(type, this, false, CHECK_VERIFY(this));
if (!match) {
verify_error(ErrorContext::bad_type(bci,
current_frame->stack_top_ctx(), TypeOrigin::signature(return_type)),
--- hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp 2014-10-16 18:26:51.019957802 +0200
@@ -2733,10 +2733,12 @@
}
}
-void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
- assert(fl->count() == 0, "Precondition.");
- assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
- "Precondition");
+// Used by par_get_chunk_of_blocks() for the chunks from the
+// indexed_free_lists. Looks for a chunk with size that is a multiple
+// of "word_sz" and if found, splits it into "word_sz" chunks and add
+// to the free list "fl". "n" is the maximum number of chunks to
+// be added to "fl".
+bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
// We'll try all multiples of word_sz in the indexed set, starting with
// word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
@@ -2817,11 +2819,15 @@
Mutex::_no_safepoint_check_flag);
ssize_t births = _indexedFreeList[word_sz].split_births() + num;
_indexedFreeList[word_sz].set_split_births(births);
- return;
+ return true;
}
}
+ return found;
}
- // Otherwise, we'll split a block from the dictionary.
+}
+
+FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
+
FreeChunk* fc = NULL;
FreeChunk* rem_fc = NULL;
size_t rem;
@@ -2832,16 +2838,12 @@
fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
FreeBlockDictionary<FreeChunk>::atLeast);
if (fc != NULL) {
- _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
- dictionary()->dict_census_update(fc->size(),
- true /*split*/,
- false /*birth*/);
break;
} else {
n--;
}
}
- if (fc == NULL) return;
+ if (fc == NULL) return NULL;
// Otherwise, split up that block.
assert((ssize_t)n >= 1, "Control point invariant");
assert(fc->is_free(), "Error: should be a free block");
@@ -2863,10 +2865,14 @@
// dictionary and return, leaving "fl" empty.
if (n == 0) {
returnChunkToDictionary(fc);
- assert(fl->count() == 0, "We never allocated any blocks");
- return;
+ return NULL;
}
+ _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
+ dictionary()->dict_census_update(fc->size(),
+ true /*split*/,
+ false /*birth*/);
+
// First return the remainder, if any.
// Note that we hold the lock until we decide if we're going to give
// back the remainder to the dictionary, since a concurrent allocation
@@ -2899,7 +2905,24 @@
_indexedFreeList[rem].return_chunk_at_head(rem_fc);
smallSplitBirth(rem);
}
- assert((ssize_t)n > 0 && fc != NULL, "Consistency");
+ assert(n * word_sz == fc->size(),
+ err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
+ SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
+ fc->size(), n, word_sz));
+ return fc;
+}
+
+void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
+
+ FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
+
+ if (fc == NULL) {
+ return;
+ }
+
+ size_t n = fc->size() / word_sz;
+
+ assert((ssize_t)n > 0, "Consistency");
// Now do the splitting up.
// Must do this in reverse order, so that anybody attempting to
// access the main chunk sees it as a single free block until we
@@ -2947,6 +2970,20 @@
assert(fl->tail()->next() == NULL, "List invariant.");
}
+void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
+ assert(fl->count() == 0, "Precondition.");
+ assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
+ "Precondition");
+
+ if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
+ // Got it
+ return;
+ }
+
+ // Otherwise, we'll split a block from the dictionary.
+ par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
+}
+
// Set up the space's par_seq_tasks structure for work claiming
// for parallel rescan. See CMSParRemarkTask where this is currently used.
// XXX Need to suitably abstract and generalize this and the next
--- hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp 2014-10-16 18:26:51.020957780 +0200
@@ -172,6 +172,20 @@
// list of size "word_sz", and must now be decremented.
void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
+ // Used by par_get_chunk_of_blocks() for the chunks from the
+ // indexed_free_lists.
+ bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
+
+ // Used by par_get_chunk_of_blocks_dictionary() to get a chunk
+ // evenly splittable into "n" "word_sz" chunks. Returns that
+ // evenly splittable chunk. May split a larger chunk to get the
+ // evenly splittable chunk.
+ FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n);
+
+ // Used by par_get_chunk_of_blocks() for the chunks from the
+ // dictionary.
+ void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
+
// Allocation helper functions
// Allocate using a strategy that takes from the indexed free lists
// first. This allocation strategy assumes a companion sweeping
--- hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2014-10-16 18:26:51.022957734 +0200
@@ -2343,6 +2343,7 @@
case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
case GCCause::_g1_humongous_allocation: return true;
+ case GCCause::_update_allocation_context_stats_inc: return true;
default: return false;
}
}
--- hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp 2014-10-16 18:26:51.022957734 +0200
@@ -95,8 +95,9 @@
assert(!_should_initiate_conc_mark ||
((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
(_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
- _gc_cause == GCCause::_g1_humongous_allocation),
- "only a GC locker, a System.gc() or a hum allocation induced GC should start a cycle");
+ _gc_cause == GCCause::_g1_humongous_allocation ||
+ _gc_cause == GCCause::_update_allocation_context_stats_inc),
+ "only a GC locker, a System.gc(), stats update or a hum allocation induced GC should start a cycle");
if (_word_size > 0) {
// An allocation has been requested. So, try to do that first.
--- hotspot/src/share/vm/gc_interface/gcCause.cpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/gc_interface/gcCause.cpp 2014-10-16 18:26:51.023957711 +0200
@@ -54,7 +54,8 @@
case _wb_young_gc:
return "WhiteBox Initiated Young GC";
- case _update_allocation_context_stats:
+ case _update_allocation_context_stats_inc:
+ case _update_allocation_context_stats_full:
return "Update Allocation Context Stats";
case _no_gc:
--- hotspot/src/share/vm/gc_interface/gcCause.hpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/gc_interface/gcCause.hpp 2014-10-16 18:26:51.023957711 +0200
@@ -47,7 +47,8 @@
_heap_inspection,
_heap_dump,
_wb_young_gc,
- _update_allocation_context_stats,
+ _update_allocation_context_stats_inc,
+ _update_allocation_context_stats_full,
/* implementation independent, but reserved for GC use */
_no_gc,
--- hotspot/src/share/vm/interpreter/linkResolver.cpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/interpreter/linkResolver.cpp 2014-10-16 18:26:51.024957688 +0200
@@ -246,6 +246,12 @@
// Ignore overpasses so statics can be found during resolution
Method* result_oop = klass->uncached_lookup_method(name, signature, Klass::skip_overpass);
+ if (klass->oop_is_array()) {
+ // Only consider klass and super klass for arrays
+ result = methodHandle(THREAD, result_oop);
+ return;
+ }
+
// JDK 8, JVMS 5.4.3.4: Interface method resolution should
// ignore static and non-public methods of java.lang.Object,
// like clone, finalize, registerNatives.
@@ -290,6 +296,11 @@
result = methodHandle(THREAD, super_klass->uncached_lookup_method(name, signature, Klass::normal));
}
+ if (klass->oop_is_array()) {
+ // Only consider klass and super klass for arrays
+ return;
+ }
+
if (result.is_null()) {
Array<Method*>* default_methods = InstanceKlass::cast(klass())->default_methods();
if (default_methods != NULL) {
@@ -546,7 +557,7 @@
// 2. lookup method in resolved klass and its super klasses
lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, true, false, CHECK);
- if (resolved_method.is_null()) { // not found in the class hierarchy
+ if (resolved_method.is_null() && !resolved_klass->oop_is_array()) { // not found in the class hierarchy
// 3. lookup method in all the interfaces implemented by the resolved klass
lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK);
@@ -559,6 +570,7 @@
CLEAR_PENDING_EXCEPTION;
}
}
+ }
if (resolved_method.is_null()) {
// 4. method lookup failed
@@ -569,7 +581,6 @@
method_signature),
nested_exception);
}
- }
// 5. access checks, access checking may be turned off when calling from within the VM.
if (check_access) {
@@ -634,9 +645,11 @@
// JDK8: also look for static methods
lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, false, true, CHECK);
- if (resolved_method.is_null()) {
+ if (resolved_method.is_null() && !resolved_klass->oop_is_array()) {
// lookup method in all the super-interfaces
lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK);
+ }
+
if (resolved_method.is_null()) {
// no method found
ResourceMark rm(THREAD);
@@ -645,7 +658,6 @@
method_name,
method_signature));
}
- }
if (check_access) {
// JDK8 adds non-public interface methods, and accessability check requirement
@@ -776,7 +788,7 @@
}
// Resolve instance field
- KlassHandle sel_klass(THREAD, InstanceKlass::cast(resolved_klass())->find_field(field, sig, &fd));
+ KlassHandle sel_klass(THREAD, resolved_klass->find_field(field, sig, &fd));
// check if field exists; i.e., if a klass containing the field def has been selected
if (sel_klass.is_null()) {
ResourceMark rm(THREAD);
--- hotspot/src/share/vm/memory/filemap.cpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/memory/filemap.cpp 2014-10-16 18:26:51.024957688 +0200
@@ -314,7 +314,6 @@
fail_continue("The shared archive file has the wrong version.");
return false;
}
- _file_offset = (long)n;
size_t info_size = _header->_paths_misc_info_size;
_paths_misc_info = NEW_C_HEAP_ARRAY_RETURN_NULL(char, info_size, mtClass);
@@ -330,6 +329,14 @@
return false;
}
+ size_t len = lseek(fd, 0, SEEK_END);
+ struct FileMapInfo::FileMapHeader::space_info* si =
+ &_header->_space[MetaspaceShared::mc];
+ if (si->_file_offset >= len || len - si->_file_offset < si->_used) {
+ fail_continue("The shared archive file has been truncated.");
+ return false;
+ }
+
_file_offset += (long)n;
return true;
}
@@ -430,6 +437,7 @@
si->_capacity = capacity;
si->_read_only = read_only;
si->_allow_exec = allow_exec;
+ si->_crc = ClassLoader::crc32(0, base, (jint)size);
write_bytes_aligned(base, (int)size);
}
@@ -454,14 +462,15 @@
// Align file position to an allocation unit boundary.
void FileMapInfo::align_file_position() {
- long new_file_offset = align_size_up(_file_offset, os::vm_allocation_granularity());
+ size_t new_file_offset = align_size_up(_file_offset,
+ os::vm_allocation_granularity());
if (new_file_offset != _file_offset) {
_file_offset = new_file_offset;
if (_file_open) {
// Seek one byte back from the target and write a byte to insure
// that the written file is the correct length.
_file_offset -= 1;
- if (lseek(_fd, _file_offset, SEEK_SET) < 0) {
+ if (lseek(_fd, (long)_file_offset, SEEK_SET) < 0) {
fail_stop("Unable to seek.", NULL);
}
char zero = 0;
@@ -568,6 +577,19 @@
return base;
}
+bool FileMapInfo::verify_region_checksum(int i) {
+ if (!VerifySharedSpaces) {
+ return true;
+ }
+ const char* buf = _header->_space[i]._base;
+ size_t sz = _header->_space[i]._used;
+ int crc = ClassLoader::crc32(0, buf, (jint)sz);
+ if (crc != _header->_space[i]._crc) {
+ fail_continue("Checksum verification failed.");
+ return false;
+ }
+ return true;
+}
// Unmap a memory region in the address space.
@@ -628,15 +650,33 @@
return true;
}
-bool FileMapInfo::FileMapHeader::validate() {
- if (_version != current_version()) {
- FileMapInfo::fail_continue("The shared archive file is the wrong version.");
- return false;
+int FileMapInfo::FileMapHeader::compute_crc() {
+ char* header = data();
+ // start computing from the field after _crc
+ char* buf = (char*)&_crc + sizeof(int);
+ size_t sz = data_size() - (buf - header);
+ int crc = ClassLoader::crc32(0, buf, (jint)sz);
+ return crc;
+}
+
+int FileMapInfo::compute_header_crc() {
+ return _header->compute_crc();
}
+
+bool FileMapInfo::FileMapHeader::validate() {
if (_magic != (int)0xf00baba2) {
FileMapInfo::fail_continue("The shared archive file has a bad magic number.");
return false;
}
+ if (VerifySharedSpaces && compute_crc() != _crc) {
+ fail_continue("Header checksum verification failed.");
+ return false;
+ }
+ if (_version != current_version()) {
+ FileMapInfo::fail_continue("The shared archive file is the wrong version.");
+
+ return false;
+ }
char header_version[JVM_IDENT_MAX];
get_header_version(header_version);
if (strncmp(_jvm_ident, header_version, JVM_IDENT_MAX-1) != 0) {
--- hotspot/src/share/vm/memory/filemap.hpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/memory/filemap.hpp 2014-10-16 18:26:51.024957688 +0200
@@ -61,7 +61,7 @@
bool _file_open;
int _fd;
- long _file_offset;
+ size_t _file_offset;
private:
static SharedClassPathEntry* _classpath_entry_table;
@@ -87,12 +87,14 @@
}
int _magic; // identify file type.
+ int _crc; // header crc checksum.
int _version; // (from enum, above.)
size_t _alignment; // how shared archive should be aligned
int _obj_alignment; // value of ObjectAlignmentInBytes
struct space_info {
- int _file_offset; // sizeof(this) rounded to vm page size
+ int _crc; // crc checksum of the current space
+ size_t _file_offset; // sizeof(this) rounded to vm page size
char* _base; // copy-on-write base address
size_t _capacity; // for validity checking
size_t _used; // for setting space top on read
@@ -135,6 +137,7 @@
virtual bool validate();
virtual void populate(FileMapInfo* info, size_t alignment);
+ int compute_crc();
};
FileMapHeader * _header;
@@ -153,6 +156,8 @@
~FileMapInfo();
static int current_version() { return _current_version; }
+ int compute_header_crc();
+ void set_header_crc(int crc) { _header->_crc = crc; }
void populate_header(size_t alignment);
bool validate_header();
void invalidate();
@@ -181,6 +186,7 @@
void write_bytes_aligned(const void* buffer, int count);
char* map_region(int i);
void unmap_region(int i);
+ bool verify_region_checksum(int i);
void close();
bool is_open() { return _file_open; }
ReservedSpace reserve_shared_memory();
--- hotspot/src/share/vm/memory/metaspaceShared.cpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/memory/metaspaceShared.cpp 2014-10-16 18:26:51.025957665 +0200
@@ -607,6 +607,7 @@
// Pass 2 - write data.
mapinfo->open_for_write();
+ mapinfo->set_header_crc(mapinfo->compute_header_crc());
mapinfo->write_header();
mapinfo->write_space(MetaspaceShared::ro, _loader_data->ro_metaspace(), true);
mapinfo->write_space(MetaspaceShared::rw, _loader_data->rw_metaspace(), false);
@@ -936,9 +937,13 @@
// Map each shared region
if ((_ro_base = mapinfo->map_region(ro)) != NULL &&
+ mapinfo->verify_region_checksum(ro) &&
(_rw_base = mapinfo->map_region(rw)) != NULL &&
+ mapinfo->verify_region_checksum(rw) &&
(_md_base = mapinfo->map_region(md)) != NULL &&
+ mapinfo->verify_region_checksum(md) &&
(_mc_base = mapinfo->map_region(mc)) != NULL &&
+ mapinfo->verify_region_checksum(mc) &&
(image_alignment == (size_t)max_alignment()) &&
mapinfo->validate_classpath_entry_table()) {
// Success (no need to do anything)
--- hotspot/src/share/vm/oops/arrayKlass.cpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/oops/arrayKlass.cpp 2014-10-16 18:26:51.025957665 +0200
@@ -64,6 +64,13 @@
return NULL;
}
+// find field according to JVM spec 5.4.3.2, returns the klass in which the field is defined
+Klass* ArrayKlass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
+ // There are no fields in an array klass but look to the super class (Object)
+ assert(super(), "super klass must be present");
+ return super()->find_field(name, sig, fd);
+}
+
Method* ArrayKlass::uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const {
// There are no methods in an array klass but the super class (Object) has some
assert(super(), "super klass must be present");
--- hotspot/src/share/vm/oops/arrayKlass.hpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/oops/arrayKlass.hpp 2014-10-16 18:26:51.026957643 +0200
@@ -28,6 +28,7 @@
#include "memory/universe.hpp"
#include "oops/klass.hpp"
+class fieldDescriptor;
class klassVtable;
// ArrayKlass is the abstract baseclass for all array classes
@@ -85,6 +86,9 @@
virtual oop multi_allocate(int rank, jint* sizes, TRAPS);
objArrayOop allocate_arrayArray(int n, int length, TRAPS);
+ // find field according to JVM spec 5.4.3.2, returns the klass in which the field is defined
+ Klass* find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const;
+
// Lookup operations
Method* uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const;
--- hotspot/src/share/vm/oops/klass.cpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/oops/klass.cpp 2014-10-16 18:26:51.026957643 +0200
@@ -130,6 +130,15 @@
return is_subclass_of(k);
}
+Klass* Klass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
+#ifdef ASSERT
+ tty->print_cr("Error: find_field called on a klass oop."
+ " Likely error: reflection method does not correctly"
+ " wrap return value in a mirror object.");
+#endif
+ ShouldNotReachHere();
+ return NULL;
+}
Method* Klass::uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const {
#ifdef ASSERT
--- hotspot/src/share/vm/oops/klass.hpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/oops/klass.hpp 2014-10-16 18:26:51.026957643 +0200
@@ -90,6 +90,7 @@
class klassVtable;
class ParCompactionManager;
class KlassSizeStats;
+class fieldDescriptor;
class Klass : public Metadata {
friend class VMStructs;
@@ -441,6 +442,7 @@
virtual void initialize(TRAPS);
// lookup operation for MethodLookupCache
friend class MethodLookupCache;
+ virtual Klass* find_field(Symbol* name, Symbol* signature, fieldDescriptor* fd) const;
virtual Method* uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const;
public:
Method* lookup_method(Symbol* name, Symbol* signature) const {
--- hotspot/src/share/vm/prims/jvm.cpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/prims/jvm.cpp 2014-10-16 18:26:51.027957620 +0200
@@ -808,6 +808,7 @@
return (jclass) JNIHandles::make_local(env, k->java_mirror());
JVM_END
+// Not used; JVM_FindClassFromCaller replaces this.
JVM_ENTRY(jclass, JVM_FindClassFromClassLoader(JNIEnv* env, const char* name,
jboolean init, jobject loader,
jboolean throwError))
@@ -834,6 +835,42 @@
return result;
JVM_END
+// Find a class with this name in this loader, using the caller's protection domain.
+JVM_ENTRY(jclass, JVM_FindClassFromCaller(JNIEnv* env, const char* name,
+ jboolean init, jobject loader,
+ jclass caller))
+ JVMWrapper2("JVM_FindClassFromCaller %s throws ClassNotFoundException", name);
+ // Java libraries should ensure that name is never null...
+ if (name == NULL || (int)strlen(name) > Symbol::max_length()) {
+ // It's impossible to create this class; the name cannot fit
+ // into the constant pool.
+ THROW_MSG_0(vmSymbols::java_lang_ClassNotFoundException(), name);
+ }
+
+ TempNewSymbol h_name = SymbolTable::new_symbol(name, CHECK_NULL);
+
+ oop loader_oop = JNIHandles::resolve(loader);
+ oop from_class = JNIHandles::resolve(caller);
+ oop protection_domain = NULL;
+ // If loader is null, shouldn't call ClassLoader.checkPackageAccess; otherwise get
+ // NPE. Put it in another way, the bootstrap class loader has all permission and
+ // thus no checkPackageAccess equivalence in the VM class loader.
+ // The caller is also passed as NULL by the java code if there is no security
+ // manager to avoid the performance cost of getting the calling class.
+ if (from_class != NULL && loader_oop != NULL) {
+ protection_domain = java_lang_Class::as_Klass(from_class)->protection_domain();
+ }
+
+ Handle h_loader(THREAD, loader_oop);
+ Handle h_prot(THREAD, protection_domain);
+ jclass result = find_class_from_class_loader(env, h_name, init, h_loader,
+ h_prot, false, THREAD);
+
+ if (TraceClassResolution && result != NULL) {
+ trace_class_resolution(java_lang_Class::as_Klass(JNIHandles::resolve_non_null(result)));
+ }
+ return result;
+JVM_END
JVM_ENTRY(jclass, JVM_FindClassFromClass(JNIEnv *env, const char *name,
jboolean init, jclass from))
@@ -4007,10 +4044,15 @@
// Shared JNI/JVM entry points //////////////////////////////////////////////////////////////
-jclass find_class_from_class_loader(JNIEnv* env, Symbol* name, jboolean init, Handle loader, Handle protection_domain, jboolean throwError, TRAPS) {
+jclass find_class_from_class_loader(JNIEnv* env, Symbol* name, jboolean init,
+ Handle loader, Handle protection_domain,
+ jboolean throwError, TRAPS) {
// Security Note:
// The Java level wrapper will perform the necessary security check allowing
- // us to pass the NULL as the initiating class loader.
+ // us to pass the NULL as the initiating class loader. The VM is responsible for
+ // the checkPackageAccess relative to the initiating class loader via the
+ // protection_domain. The protection_domain is passed as NULL by the java code
+ // if there is no security manager in 3-arg Class.forName().
Klass* klass = SystemDictionary::resolve_or_fail(name, loader, protection_domain, throwError != 0, CHECK_NULL);
KlassHandle klass_handle(THREAD, klass);
--- hotspot/src/share/vm/prims/jvm.h 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/prims/jvm.h 2014-10-16 18:26:51.028957597 +0200
@@ -420,6 +420,19 @@
JVM_FindClassFromBootLoader(JNIEnv *env, const char *name);
/*
+ * Find a class from a given class loader. Throws ClassNotFoundException.
+ * name: name of class
+ * init: whether initialization is done
+ * loader: class loader to look up the class. This may not be the same as the caller's
+ * class loader.
+ * caller: initiating class. The initiating class may be null when a security
+ * manager is not installed.
+ */
+JNIEXPORT jclass JNICALL
+JVM_FindClassFromCaller(JNIEnv *env, const char *name, jboolean init,
+ jobject loader, jclass caller);
+
+/*
* Find a class from a given class.
*/
JNIEXPORT jclass JNICALL
--- hotspot/src/share/vm/runtime/arguments.cpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/runtime/arguments.cpp 2014-10-16 18:26:51.029957574 +0200
@@ -2455,6 +2455,10 @@
warning("The VM option CICompilerCountPerCPU overrides CICompilerCount.");
}
+#ifdef COMPILER1
+ status &= verify_interval(SafepointPollOffset, 0, os::vm_page_size() - BytesPerWord, "SafepointPollOffset");
+#endif
+
return status;
}
@@ -3708,6 +3712,11 @@
return JNI_ENOMEM;
}
+ // Set up VerifySharedSpaces
+ if (FLAG_IS_DEFAULT(VerifySharedSpaces) && SharedArchiveFile != NULL) {
+ VerifySharedSpaces = true;
+ }
+
// Delay warning until here so that we've had a chance to process
// the -XX:-PrintWarnings flag
if (needs_hotspotrc_warning) {
--- hotspot/src/share/vm/runtime/globals.hpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/runtime/globals.hpp 2014-10-16 18:26:51.030957551 +0200
@@ -1166,11 +1166,11 @@
"Prevent spurious or premature wakeups from object.wait " \
"(Solaris only)") \
\
- product(intx, NativeMonitorTimeout, -1, "(Unstable)") \
+ experimental(intx, NativeMonitorTimeout, -1, "(Unstable)") \
\
- product(intx, NativeMonitorFlags, 0, "(Unstable)") \
+ experimental(intx, NativeMonitorFlags, 0, "(Unstable)") \
\
- product(intx, NativeMonitorSpinLimit, 20, "(Unstable)") \
+ experimental(intx, NativeMonitorSpinLimit, 20, "(Unstable)") \
\
develop(bool, UsePthreads, false, \
"Use pthread-based instead of libthread-based synchronization " \
@@ -3787,6 +3787,10 @@
product(bool, UseSharedSpaces, true, \
"Use shared spaces for metadata") \
\
+ product(bool, VerifySharedSpaces, false, \
+ "Verify shared spaces (false for default archive, true for " \
+ "archive specified by -XX:SharedArchiveFile)") \
+ \
product(bool, RequireSharedSpaces, false, \
"Require shared spaces for metadata") \
\
--- hotspot/src/share/vm/runtime/reflection.cpp 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/src/share/vm/runtime/reflection.cpp 2014-10-16 18:26:51.031957528 +0200
@@ -484,7 +484,7 @@
ik = InstanceKlass::cast(hc);
// There's no way to make a host class loop short of patching memory.
- // Therefore there cannot be a loop here unles there's another bug.
+ // Therefore there cannot be a loop here unless there's another bug.
// Still, let's check for it.
assert(--inf_loop_check > 0, "no host_klass loop");
}
@@ -553,7 +553,8 @@
if (access.is_protected()) {
if (!protected_restriction) {
// See if current_class (or outermost host class) is a subclass of field_class
- if (host_class->is_subclass_of(field_class)) {
+ // An interface may not access protected members of j.l.Object
+ if (!host_class->is_interface() && host_class->is_subclass_of(field_class)) {
if (access.is_static() || // static fields are ok, see 6622385
current_class == resolved_class ||
field_class == resolved_class ||
--- hotspot/test/compiler/osr/TestRangeCheck.java 1970-01-01 01:00:00.000000000 +0100
+++ hotspot/test/compiler/osr/TestRangeCheck.java 2014-10-16 18:26:51.031957528 +0200
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestRangeCheck
+ * @bug 8054883
+ * @summary Tests that range check is not skipped
+ */
+
+public class TestRangeCheck {
+ public static void main(String args[]) {
+ try {
+ test();
+ throw new AssertionError("Expected ArrayIndexOutOfBoundsException was not thrown");
+ } catch (ArrayIndexOutOfBoundsException e) {
+ System.out.println("Expected ArrayIndexOutOfBoundsException was thrown");
+ }
+ }
+
+ private static void test() {
+ int arr[] = new int[1];
+ int result = 1;
+
+ // provoke OSR compilation
+ for (int i = 0; i < Integer.MAX_VALUE; i++) {
+ }
+
+ if (result > 0 && arr[~result] > 0) {
+ arr[~result] = 0;
+ }
+ }
+}
--- hotspot/test/gc/class_unloading/AllocateBeyondMetaspaceSize.java 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/test/gc/class_unloading/AllocateBeyondMetaspaceSize.java 1970-01-01 01:00:00.000000000 +0100
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-import sun.hotspot.WhiteBox;
-
-class AllocateBeyondMetaspaceSize {
- public static Object dummy;
-
- public static void main(String [] args) {
- if (args.length != 2) {
- throw new IllegalArgumentException("Usage: <MetaspaceSize> <YoungGenSize>");
- }
-
- long metaspaceSize = Long.parseLong(args[0]);
- long youngGenSize = Long.parseLong(args[1]);
-
- run(metaspaceSize, youngGenSize);
- }
-
- private static void run(long metaspaceSize, long youngGenSize) {
- WhiteBox wb = WhiteBox.getWhiteBox();
-
- long allocationBeyondMetaspaceSize = metaspaceSize * 2;
- long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
-
- triggerYoungGC(youngGenSize);
-
- wb.freeMetaspace(null, metaspace, metaspace);
- }
-
- private static void triggerYoungGC(long youngGenSize) {
- long approxAllocSize = 32 * 1024;
- long numAllocations = 2 * youngGenSize / approxAllocSize;
-
- for (long i = 0; i < numAllocations; i++) {
- dummy = new byte[(int)approxAllocSize];
- }
- }
-}
--- hotspot/test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java 2014-10-16 18:26:51.032957506 +0200
@@ -26,7 +26,7 @@
* @key gc
* @bug 8049831
* @library /testlibrary /testlibrary/whitebox
- * @build TestCMSClassUnloadingEnabledHWM AllocateBeyondMetaspaceSize
+ * @build TestCMSClassUnloadingEnabledHWM
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run driver TestCMSClassUnloadingEnabledHWM
* @summary Test that -XX:-CMSClassUnloadingEnabled will trigger a Full GC when more than MetaspaceSize metadata is allocated.
@@ -34,9 +34,11 @@
import com.oracle.java.testlibrary.OutputAnalyzer;
import com.oracle.java.testlibrary.ProcessTools;
-
+import java.lang.management.GarbageCollectorMXBean;
+import java.lang.management.ManagementFactory;
import java.util.ArrayList;
import java.util.Arrays;
+import sun.hotspot.WhiteBox;
public class TestCMSClassUnloadingEnabledHWM {
private static long MetaspaceSize = 32 * 1024 * 1024;
@@ -47,15 +49,18 @@
"-Xbootclasspath/a:.",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
+ "-Xmx128m",
+ "-XX:CMSMaxAbortablePrecleanTime=1",
+ "-XX:CMSWaitDuration=50",
"-XX:MetaspaceSize=" + MetaspaceSize,
"-Xmn" + YoungGenSize,
"-XX:+UseConcMarkSweepGC",
"-XX:" + (enableUnloading ? "+" : "-") + "CMSClassUnloadingEnabled",
"-XX:+PrintHeapAtGC",
"-XX:+PrintGCDetails",
- "AllocateBeyondMetaspaceSize",
- "" + MetaspaceSize,
- "" + YoungGenSize);
+ "-XX:+PrintGCTimeStamps",
+ TestCMSClassUnloadingEnabledHWM.AllocateBeyondMetaspaceSize.class.getName(),
+ "" + MetaspaceSize);
return new OutputAnalyzer(pb.start());
}
@@ -87,5 +92,37 @@
testWithCMSClassUnloading();
testWithoutCMSClassUnloading();
}
+
+ public static class AllocateBeyondMetaspaceSize {
+ public static void main(String [] args) throws Exception {
+ if (args.length != 1) {
+ throw new IllegalArgumentException("Usage: <MetaspaceSize>");
+ }
+
+ WhiteBox wb = WhiteBox.getWhiteBox();
+
+ // Allocate past the MetaspaceSize limit.
+ long metaspaceSize = Long.parseLong(args[0]);
+ long allocationBeyondMetaspaceSize = metaspaceSize * 2;
+ long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
+
+ // Wait for at least one GC to occur. The caller will parse the log files produced.
+ GarbageCollectorMXBean cmsGCBean = getCMSGCBean();
+ while (cmsGCBean.getCollectionCount() == 0) {
+ Thread.sleep(100);
+ }
+
+ wb.freeMetaspace(null, metaspace, metaspace);
+ }
+
+ private static GarbageCollectorMXBean getCMSGCBean() {
+ for (GarbageCollectorMXBean gcBean : ManagementFactory.getGarbageCollectorMXBeans()) {
+ if (gcBean.getObjectName().toString().equals("java.lang:type=GarbageCollector,name=ConcurrentMarkSweep")) {
+ return gcBean;
+ }
+ }
+ return null;
+ }
+ }
}
--- hotspot/test/gc/class_unloading/TestG1ClassUnloadingHWM.java 2014-10-13 11:53:11.000000000 +0200
+++ hotspot/test/gc/class_unloading/TestG1ClassUnloadingHWM.java 2014-10-16 18:26:51.032957506 +0200
@@ -26,7 +26,7 @@
* @key gc
* @bug 8049831
* @library /testlibrary /testlibrary/whitebox
- * @build TestG1ClassUnloadingHWM AllocateBeyondMetaspaceSize
+ * @build TestG1ClassUnloadingHWM
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run driver TestG1ClassUnloadingHWM
* @summary Test that -XX:-ClassUnloadingWithConcurrentMark will trigger a Full GC when more than MetaspaceSize metadata is allocated.
@@ -34,9 +34,9 @@
import com.oracle.java.testlibrary.OutputAnalyzer;
import com.oracle.java.testlibrary.ProcessTools;
-
import java.util.ArrayList;
import java.util.Arrays;
+import sun.hotspot.WhiteBox;
public class TestG1ClassUnloadingHWM {
private static long MetaspaceSize = 32 * 1024 * 1024;
@@ -53,7 +53,7 @@
"-XX:" + (enableUnloading ? "+" : "-") + "ClassUnloadingWithConcurrentMark",
"-XX:+PrintHeapAtGC",
"-XX:+PrintGCDetails",
- "AllocateBeyondMetaspaceSize",
+ TestG1ClassUnloadingHWM.AllocateBeyondMetaspaceSize.class.getName(),
"" + MetaspaceSize,
"" + YoungGenSize);
return new OutputAnalyzer(pb.start());
@@ -87,5 +87,36 @@
testWithG1ClassUnloading();
testWithoutG1ClassUnloading();
}
+
+ public static class AllocateBeyondMetaspaceSize {
+ public static Object dummy;
+
+ public static void main(String [] args) throws Exception {
+ if (args.length != 2) {
+ throw new IllegalArgumentException("Usage: <MetaspaceSize> <YoungGenSize>");
+ }
+
+ WhiteBox wb = WhiteBox.getWhiteBox();
+
+ // Allocate past the MetaspaceSize limit
+ long metaspaceSize = Long.parseLong(args[0]);
+ long allocationBeyondMetaspaceSize = metaspaceSize * 2;
+ long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
+
+ long youngGenSize = Long.parseLong(args[1]);
+ triggerYoungGCs(youngGenSize);
+
+ wb.freeMetaspace(null, metaspace, metaspace);
+ }
+
+ public static void triggerYoungGCs(long youngGenSize) {
+ long approxAllocSize = 32 * 1024;
+ long numAllocations = 2 * youngGenSize / approxAllocSize;
+
+ for (long i = 0; i < numAllocations; i++) {
+ dummy = new byte[(int)approxAllocSize];
+ }
+ }
+ }
}