Skip to content

Instantly share code, notes, and snippets.

@taichi
Last active August 29, 2015 14:03
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save taichi/14b20f932c070a848f38 to your computer and use it in GitHub Desktop.
Save taichi/14b20f932c070a848f38 to your computer and use it in GitHub Desktop.
// Support for concurrent collection policy decisions.
bool CompactibleFreeListSpace::should_concurrent_collect() const {
// In the future we might want to add in frgamentation stats --
// including erosion of the "mountain" into this decision as well.
return !adaptive_freelists() && linearAllocationWouldFail();
}
bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
return _smallLinearAllocBlock._word_size == 0;
}
HeapWord*
CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
assert_locked();
assert(size >= MinChunkSize, "minimum chunk size");
assert(size < _smallLinearAllocBlock._allocation_size_limit,
"maximum from smallLinearAllocBlock");
return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
}
HeapWord*
CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
size_t size) {
assert_locked();
assert(size >= MinChunkSize, "too small");
HeapWord* res = NULL;
// Try to do linear allocation from blk, making sure that
if (blk->_word_size == 0) {
// We have probably been unable to fill this either in the prologue or
// when it was exhausted at the last linear allocation. Bail out until
// next time.
assert(blk->_ptr == NULL, "consistency check");
return NULL;
}
assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
res = getChunkFromLinearAllocBlockRemainder(blk, size);
if (res != NULL) return res;
// about to exhaust this linear allocation block
if (blk->_word_size == size) { // exactly satisfied
res = blk->_ptr;
_bt.allocated(res, blk->_word_size);
} else if (size + MinChunkSize <= blk->_refillSize) {
size_t sz = blk->_word_size;
// Update _unallocated_block if the size is such that chunk would be
// returned to the indexed free list. All other chunks in the indexed
// free lists are allocated from the dictionary so that _unallocated_block
// has already been adjusted for them. Do it here so that the cost
// for all chunks added back to the indexed free lists.
if (sz < SmallForDictionary) {
_bt.allocated(blk->_ptr, sz);
}
// Return the chunk that isn't big enough, and then refill below.
addChunkToFreeLists(blk->_ptr, sz);
split_birth(sz);
// Don't keep statistics on adding back chunk from a LinAB.
} else {
// A refilled block would not satisfy the request.
return NULL;
}
blk->_ptr = NULL; blk->_word_size = 0;
refillLinearAllocBlock(blk);
assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
"block was replenished");
if (res != NULL) {
split_birth(size);
repairLinearAllocBlock(blk);
} else if (blk->_ptr != NULL) {
res = blk->_ptr;
size_t blk_size = blk->_word_size;
blk->_word_size -= size;
blk->_ptr += size;
split_birth(size);
repairLinearAllocBlock(blk);
// Update BOT last so that other (parallel) GC threads see a consistent
// view of the BOT and free blocks.
// Above must occur before BOT is updated below.
OrderAccess::storestore();
_bt.split_block(res, blk_size, size); // adjust block offset table
}
return res;
}
HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
LinearAllocBlock* blk,
size_t size) {
assert_locked();
assert(size >= MinChunkSize, "too small");
HeapWord* res = NULL;
// This is the common case. Keep it simple.
if (blk->_word_size >= size + MinChunkSize) {
assert(blk->_ptr != NULL, "consistency check");
res = blk->_ptr;
// Note that the BOT is up-to-date for the linAB before allocation. It
// indicates the start of the linAB. The split_block() updates the
// BOT for the linAB after the allocation (indicates the start of the
// next chunk to be allocated).
size_t blk_size = blk->_word_size;
blk->_word_size -= size;
blk->_ptr += size;
split_birth(size);
repairLinearAllocBlock(blk);
// Update BOT last so that other (parallel) GC threads see a consistent
// view of the BOT and free blocks.
// Above must occur before BOT is updated below.
OrderAccess::storestore();
_bt.split_block(res, blk_size, size); // adjust block offset table
_bt.allocated(res, size);
}
return res;
}
// We should be conservative in starting a collection cycle. To
// start too eagerly runs the risk of collecting too often in the
// extreme. To collect too rarely falls back on full collections,
// which works, even if not optimum in terms of concurrent work.
// As a work around for too eagerly collecting, use the flag
// UseCMSInitiatingOccupancyOnly. This also has the advantage of
// giving the user an easily understandable way of controlling the
// collections.
// We want to start a new collection cycle if any of the following
// conditions hold:
// . our current occupancy exceeds the configured initiating occupancy
// for this generation, or
// . we recently needed to expand this space and have not, since that
// expansion, done a collection of this generation, or
// . the underlying space believes that it may be a good idea to initiate
// a concurrent collection (this may be based on criteria such as the
// following: the space uses linear allocation and linear allocation is
// going to fail, or there is believed to be excessive fragmentation in
// the generation, etc... or ...
// [.(currently done by CMSCollector::shouldConcurrentCollect() only for
// the case of the old generation; see CR 6543076):
// we may be approaching a point at which allocation requests may fail because
// we will be out of sufficient free space given allocation rate estimates.]
bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
assert_lock_strong(freelistLock());
if (occupancy() > initiating_occupancy()) {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
short_name(), occupancy(), initiating_occupancy());
}
return true;
}
if (UseCMSInitiatingOccupancyOnly) {
return false;
}
if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print(" %s: collect because expanded for allocation ",
short_name());
}
return true;
}
if (_cmsSpace->should_concurrent_collect()) {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print(" %s: collect because cmsSpace says so ",
short_name());
}
return true;
}
return false;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment