source Udiff src/hotspot/share/memory/metaspace.cpp (original) (raw)

< prev index next >

Print this page

rev 49010 : [mq]: metaspace-coalesc-patch rev 49011 : [mq]: metaspace-coal-2


@@ -54,62 +54,100 @@ typedef BinaryTreeDictionary<Metablock, FreeList > BlockTreeDictionary; typedef BinaryTreeDictionary<Metachunk, FreeList > ChunkTreeDictionary; // Set this constant to enable slow integrity checking of the free chunk lists -const bool metaspace_slow_verify = false; +const bool metaspace_slow_verify = DEBUG_ONLY(true) NOT_DEBUG(false); + +// Helper function that does a bunch of checks for a chunk. +DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);) + +// Given a Metachunk, update its in-use information (both in the +// chunk and the occupancy map). +static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse); size_t const allocation_from_dictionary_limit = 4 * K; MetaWord* last_allocated = 0; size_t Metaspace::_compressed_class_space_size; const MetaspaceTracer* Metaspace::_tracer = NULL; DEBUG_ONLY(bool Metaspace::_frozen = false;) -// Used in declarations in SpaceManager and ChunkManager -enum ChunkIndex { - ZeroIndex = 0, - SpecializedIndex = ZeroIndex, - SmallIndex = SpecializedIndex + 1, - MediumIndex = SmallIndex + 1, - HumongousIndex = MediumIndex + 1, - NumberOfFreeLists = 3, - NumberOfInUseLists = 4 -};

-// Helper, returns a descriptive name for the given index. -static const char* chunk_size_name(ChunkIndex index) { - switch (index) { - case SpecializedIndex: - return "specialized"; - case SmallIndex: - return "small"; - case MediumIndex: - return "medium"; - case HumongousIndex: - return "humongous"; - default: - return "Invalid index"; - } -}

enum ChunkSizes { // in words. ClassSpecializedChunk = 128, SpecializedChunk = 128, ClassSmallChunk = 256, SmallChunk = 512, ClassMediumChunk = 4 * K, MediumChunk = 8 * K };

+// Returns size of this chunk type. +size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {

+static ChunkIndex prev_chunk_index(ChunkIndex i) {


@@ -134,10 +172,13 @@ // SpecializedChunk // SmallChunk // MediumChunk ChunkList _free_chunks[NumberOfFreeLists];


@@ -176,10 +217,26 @@ locked_verify_free_chunks_count(); } } void verify_free_chunks_count();


@@ -188,29 +245,35 @@ void locked_get_statistics(ChunkManagerStatistics* stat) const; void get_statistics(ChunkManagerStatistics* stat) const; static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale); - public: - ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size) - : _free_chunks_total(0), _free_chunks_count(0) { - _free_chunks[SpecializedIndex].set_size(specialized_size); - _free_chunks[SmallIndex].set_size(small_size); - _free_chunks[MediumIndex].set_size(medium_size); + ChunkManager(bool is_class) + : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) { + _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class)); + _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class)); + _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class)); } - // add or delete (return) a chunk to the global freelist. + // Add or delete (return) a chunk to the global freelist. Metachunk* chunk_freelist_allocate(size_t word_size); // Map a size to a list index assuming that there are lists // for special, small, medium, and humongous chunks. ChunkIndex list_index(size_t size); // Map a given index to the chunk size. size_t size_by_index(ChunkIndex index) const; + bool is_class() const { return _is_class; } + + // Convenience accessors. + size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); } + size_t small_chunk_word_size() const { return size_by_index(SmallIndex); } + size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); } + // Take a chunk from the ChunkManager. The chunk is expected to be in // the chunk manager (the freelist if non-humongous, the dictionary if // humongous). void remove_chunk(Metachunk* chunk);

@@ -491,10 +855,14 @@ void mangle(); #endif

void print_on(outputStream* st) const; void print_map(outputStream* st, bool is_class) const; +


@@ -513,11 +881,12 @@

return false; }

// byte_size is the size of the associated virtualspace. -VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { +VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :


@@ -529,118 +898,121 @@ MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); } } void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { + DEBUG_ONLY(this->verify();) Metachunk* chunk = first_chunk(); Metachunk* invalid_chunk = (Metachunk*) top(); while (chunk < invalid_chunk ) { assert(chunk->is_tagged_free(), "Should be tagged free"); MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); chunk_manager->remove_chunk(chunk); + DEBUG_ONLY(chunk->remove_sentinel();) assert(chunk->next() == NULL && chunk->prev() == NULL, "Was not removed from its list"); chunk = (Metachunk*) next; } } void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const { - // Format: - // - // . .. . . .. - // SSxSSMMMMMMMMMMMMMMMMsssXX - // 112114444444444444444 - // . .. . . .. - // SSxSSMMMMMMMMMMMMMMMMsssXX - // 112114444444444444444

if (bottom() == top()) { return; } - // First line: dividers for every med-chunk-sized interval - // Second line: a dot for the start of a chunk - // Third line: a letter per chunk type (x,s,m,h), uppercase if in use.

const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk; const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk; const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;

int line_len = 100; const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size); line_len = (int)(section_len / spec_chunk_size);

+#endif + p += spec_chunk_size; pos ++; } if (pos > 0) {

#ifdef ASSERT uintx VirtualSpaceNode::container_count_slow() { uintx count = 0; Metachunk* chunk = first_chunk(); Metachunk* invalid_chunk = (Metachunk*) top(); while (chunk < invalid_chunk ) { MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();


@@ -649,10 +1021,61 @@ } return count; } #endif

+// Verify all chunks in this list node. +void VirtualSpaceNode::verify() {


@@ -920,12 +1343,10 @@ Metachunk* get_new_chunk(size_t chunk_word_size);

// Block allocation and deallocation. // Allocates a block from the current chunk MetaWord* allocate(size_t word_size);


@@ -1076,10 +1497,13 @@

// VirtualSpaceNode methods

VirtualSpaceNode::~VirtualSpaceNode() { _rs.release();

#ifdef ASSERT size_t word_size = sizeof(this) / BytesPerWord; Copy::fill_to_words((HeapWord) this, word_size, 0xf1f1f1f1); #endif }


@@ -1099,10 +1523,102 @@

// Allocates the chunk from the virtual space only. // This interface is also used internally for debugging. Not all // chunks removed here are necessarily used for allocation. Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {


@@ -1124,11 +1640,24 @@

// Take the space (bump top on the current virtual space). inc_top(chunk_word_size);

// Initialize the chunk


@@ -1143,21 +1672,26 @@ } size_t commit = MIN2(preferred_bytes, uncommitted); bool result = virtual_space()->expand_by(commit, false); + if (result) { + log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.", + (is_class() ? "class" : "non-class"), commit); + } else { + log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.", + (is_class() ? "class" : "non-class"), commit); + } + assert(result, "Failed to commit memory"); return result; } Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { assert_lock_strong(SpaceManager::expand_lock()); Metachunk* result = take_from_committed(chunk_word_size); - if (result != NULL) { - inc_container_count(); - } return result; } bool VirtualSpaceNode::initialize() {

@@ -1193,10 +1727,14 @@ "Reserved size was not set properly " SIZE_FORMAT " != " SIZE_FORMAT, reserved()->word_size(), _rs.size() / BytesPerWord); }


@@ -1277,10 +1815,143 @@

// Chunk has been removed from the chunks free list, update counters. account_for_removed_chunk(chunk); }

+bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {


@@ -1295,10 +1966,12 @@ DEBUG_ONLY(vsl->verify_container_count();) next_vsl = vsl->next(); // Don't free the current virtual space since it will likely // be needed soon. if (vsl->container_count() == 0 && vsl != current_virtual_space()) {


@@ -1356,18 +2029,26 @@ vsn->retire(cm); } void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { DEBUG_ONLY(verify_container_count();) + assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?"); for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { ChunkIndex index = (ChunkIndex)i; size_t chunk_size = chunk_manager->size_by_index(index); while (free_words_in_vs() >= chunk_size) { Metachunk* chunk = get_chunk_vs(chunk_size); - assert(chunk != NULL, "allocation should have been successful");


@@ -1392,11 +2073,11 @@ _reserved_words(0), _committed_words(0), _virtual_space_count(0) { MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);


@@ -1424,11 +2105,11 @@ // Reserve the space size_t vs_byte_size = vs_word_size * BytesPerWord; assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());

// Allocate the meta virtual space and initialize it.


@@ -1481,28 +2162,38 @@ bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { assert_is_aligned(min_words, Metaspace::commit_alignment_words()); assert_is_aligned(preferred_words, Metaspace::commit_alignment_words()); assert(min_words <= preferred_words, "Invalid arguments");


@@ -1522,10 +2213,28 @@ } return false; } +// Given a chunk, calculate the largest possible padding space which +// could be required when allocating it. +static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) { + const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class); + if (chunk_type != HumongousIndex) { + // Normal, non-humongous chunks are allocated at chunk size + // boundaries, so the largest padding space required would be that + // minus the smallest chunk size. + const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk; + return chunk_word_size - smallest_chunk_size; + } else { + // Humongous chunks are allocated at smallest-chunksize + // boundaries, so there is no padding required. + return 0; + } +} + + Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) { // Allocate a chunk out of the current virtual space. Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);

@@ -1534,11 +2243,15 @@ }

// The expand amount is currently only determined by the requested sizes // and not how much committed memory is left in the current virtual space.


@@ -1674,17 +2387,21 @@ bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { // Check if the compressed class space is full. if (is_class && Metaspace::using_class_space()) { size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType); if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {

}


@@ -1698,10 +2415,13 @@ capacity_until_gc, committed_bytes);

size_t left_until_max = MaxMetaspaceSize - committed_bytes; size_t left_until_GC = capacity_until_gc - committed_bytes; size_t left_to_commit = MIN2(left_until_GC, left_until_max);

}

void MetaspaceGC::compute_new_size() {


@@ -1946,10 +2666,21 @@ }

void ChunkManager::locked_verify() { locked_verify_free_chunks_count(); locked_verify_free_chunks_total();

void ChunkManager::locked_print_free_chunks(outputStream* st) { assert_lock_strong(SpaceManager::expand_lock()); st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,


@@ -2017,10 +2748,139 @@ ChunkList* free_list = find_free_chunks_list(word_size); assert(free_list != NULL, "Sanity check");

 chunk = free_list->head();

@@ -2046,14 +2906,16 @@ chunk->set_next(NULL); chunk->set_prev(NULL);

// Chunk is no longer on any freelist. Setting to false make container_count_slow() // work.

Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { assert_lock_strong(SpaceManager::expand_lock());


@@ -2087,19 +2949,21 @@ return chunk; }

void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) { assert_lock_strong(SpaceManager::expand_lock());


@@ -2114,15 +2978,28 @@ _humongous_dictionary.return_chunk(chunk); log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.", chunk_size_name(index), p2i(chunk), chunk->word_size()); } chunk->container()->dec_container_count();


@@ -2587,10 +3464,15 @@ sum_capacity_in_chunks_in_use(), allocated_chunks_words());

MutexLockerEx fcl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);


@@ -2710,49 +3592,10 @@ } return next; } -/* - * The policy is to allocate up to _small_chunk_limit small chunks - * after which only medium chunks are allocated. This is done to - * reduce fragmentation. In some cases, this can result in a lot - * of small chunks being allocated to the point where it's not - * possible to expand. If this happens, there may be no medium chunks - * available and OOME would be thrown. Instead of doing that, - * if the allocation request size fits in a small chunk, an attempt - * will be made to allocate a small chunk. - / -MetaWord SpaceManager::get_small_chunk_and_allocate(size_t word_size) { - size_t raw_word_size = get_allocation_word_size(word_size);


@@ -2806,12 +3649,12 @@ // being in the dictionary alters a chunk. if (block_freelists() != NULL && block_freelists()->total_size() == 0) { for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { Metachunk* curr = chunks_in_use(i); while (curr != NULL) {


@@ -3648,11 +4491,11 @@ // The reserved space size may be bigger because of alignment, esp with UseLargePages assert(rs.size() >= CompressedClassSpaceSize, SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize); assert(using_class_space(), "Must be using class space"); _class_space_list = new VirtualSpaceList(rs);

}


@@ -3755,11 +4598,11 @@ size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; word_size = align_up(word_size, Metaspace::reserve_alignment_words()); // Initialize the list of virtual spaces. _space_list = new VirtualSpaceList(word_size); - _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); + _chunk_manager_metadata = new ChunkManager(false/metaspace/); if (!_space_list->initialization_succeeded()) { vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); }

@@ -3955,23 +4798,12 @@ result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype); } } if (result == NULL) { - SpaceManager* sm; - if (is_class_space_allocation(mdtype)) { - sm = loader_data->metaspace_non_null()->class_vsm(); - } else { - sm = loader_data->metaspace_non_null()->vsm(); - }


@@ -4097,10 +4929,28 @@ out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm())); class_vsm()->dump(out); } }

+#ifdef ASSERT +static void do_verify_chunk(Metachunk* chunk) {


@@ -4187,20 +5037,20 @@ // The chunk sizes must be multiples of eachother, or this will fail STATIC_ASSERT(MediumChunk % SmallChunk == 0); STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);

 { // No committed memory in VSN

@@ -4210,12 +5060,12 @@

 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
 // This doesn't work for systems with vm_page_size >= 16K.
 if (page_chunks < MediumChunk) {
   // 4 pages of VSN is committed, some is used by chunks

@@ -4231,12 +5081,12 @@ assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); assert(cm.sum_free_chunks() == words_left, "sizes should add up"); } { // Half of VSN is committed, a humongous chunk is used - ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); - VirtualSpaceNode vsn(vsn_test_size_bytes); + ChunkManager cm(false); + VirtualSpaceNode vsn(false, vsn_test_size_bytes); vsn.initialize(); vsn.expand_by(MediumChunk * 2, MediumChunk * 2); vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk vsn.retire(&cm);

@@ -4263,11 +5113,11 @@ "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")",
(uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));

static void test_is_available_positive() { // Reserve some memory.


@@ -4281,11 +5131,11 @@ assert_is_available_positive(expand_word_size); }

static void test_is_available_negative() { // Reserve some memory.


@@ -4296,11 +5146,11 @@ assert_is_available_negative(two_times_commit_word_size); }

static void test_is_available_overflow() { // Reserve some memory.


@@ -4321,19 +5171,14 @@ TestVirtualSpaceNodeTest::test_is_available_negative(); TestVirtualSpaceNodeTest::test_is_available_overflow(); } }; -void TestVirtualSpaceNode_test() { - TestVirtualSpaceNodeTest::test(); - TestVirtualSpaceNodeTest::test_is_available(); -}

// The following test is placed here instead of a gtest / unittest file // because the ChunkManager class is only available in this file. void ChunkManager_test_list_index() {


@@ -4366,270 +5211,10 @@ #endif // !PRODUCT #ifdef ASSERT -// ChunkManagerReturnTest stresses taking/returning chunks from the ChunkManager. It takes and -// returns chunks from/to the ChunkManager while keeping track of the expected ChunkManager -// content. -class ChunkManagerReturnTestImpl : public CHeapObj {


@@ -4676,5 +5261,41 @@ void SpaceManager_test_adjust_initial_chunk_size() { SpaceManagerTest::test_adjust_initial_chunk_size(); }

#endif // ASSERT + +struct chunkmanager_statistics_t {

< prev index next >