8345655: Move reservation code out of ReservedSpace
Reviewed-by: azafari, jsjolen
This commit is contained in:
parent
d50b725ac0
commit
73b5dbaec3
@ -47,6 +47,7 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/compressedKlass.inline.hpp"
|
||||
@ -193,7 +194,7 @@ ArchiveBuilder::~ArchiveBuilder() {
|
||||
delete _klasses;
|
||||
delete _symbols;
|
||||
if (_shared_rs.is_reserved()) {
|
||||
_shared_rs.release();
|
||||
MemoryReserver::release(_shared_rs);
|
||||
}
|
||||
}
|
||||
|
||||
@ -347,7 +348,9 @@ size_t ArchiveBuilder::estimate_archive_size() {
|
||||
|
||||
address ArchiveBuilder::reserve_buffer() {
|
||||
size_t buffer_size = estimate_archive_size();
|
||||
ReservedSpace rs(buffer_size, MetaspaceShared::core_region_alignment(), os::vm_page_size());
|
||||
ReservedSpace rs = MemoryReserver::reserve(buffer_size,
|
||||
MetaspaceShared::core_region_alignment(),
|
||||
os::vm_page_size());
|
||||
if (!rs.is_reserved()) {
|
||||
log_error(cds)("Failed to reserve " SIZE_FORMAT " bytes of output buffer.", buffer_size);
|
||||
MetaspaceShared::unrecoverable_writing_error();
|
||||
|
@ -29,6 +29,8 @@
|
||||
#include "cds/dumpAllocStats.hpp"
|
||||
#include "memory/metaspace.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "memory/reservedSpace.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "oops/array.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,7 +29,6 @@
|
||||
#include "classfile/compactHashtable.hpp"
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "oops/array.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
|
@ -49,6 +49,7 @@ class ClassFileStream;
|
||||
class ClassLoaderData;
|
||||
class ClassPathEntry;
|
||||
class outputStream;
|
||||
class ReservedSpace;
|
||||
|
||||
class SharedClassPathEntry : public MetaspaceObj {
|
||||
enum {
|
||||
@ -481,7 +482,6 @@ public:
|
||||
void unmap_region(int i);
|
||||
void close();
|
||||
bool is_open() { return _file_open; }
|
||||
ReservedSpace reserve_shared_memory();
|
||||
|
||||
// JVM/TI RedefineClasses() support:
|
||||
// Remap the shared readonly space to shared readwrite, private.
|
||||
|
@ -62,6 +62,7 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logMessage.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "memory/metaspace.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
@ -282,7 +283,7 @@ void MetaspaceShared::initialize_for_static_dump() {
|
||||
SharedBaseAddress = (size_t)_requested_base_address;
|
||||
|
||||
size_t symbol_rs_size = LP64_ONLY(3 * G) NOT_LP64(128 * M);
|
||||
_symbol_rs = ReservedSpace(symbol_rs_size, mtClassShared);
|
||||
_symbol_rs = MemoryReserver::reserve(symbol_rs_size, mtClassShared);
|
||||
if (!_symbol_rs.is_reserved()) {
|
||||
log_error(cds)("Unable to reserve memory for symbols: " SIZE_FORMAT " bytes.", symbol_rs_size);
|
||||
MetaspaceShared::unrecoverable_writing_error();
|
||||
@ -1266,7 +1267,9 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
|
||||
if (use_requested_addr) {
|
||||
assert(!total_space_rs.is_reserved(), "Should not be reserved for Windows");
|
||||
log_info(cds)("Windows mmap workaround: releasing archive space.");
|
||||
archive_space_rs.release();
|
||||
MemoryReserver::release(archive_space_rs);
|
||||
// Mark as not reserved
|
||||
archive_space_rs = {};
|
||||
}
|
||||
}
|
||||
MapArchiveResult static_result = map_archive(static_mapinfo, mapped_base_address, archive_space_rs);
|
||||
@ -1438,8 +1441,10 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
|
||||
"Archive base address unaligned: " PTR_FORMAT ", needs alignment: %zu.",
|
||||
p2i(base_address), archive_space_alignment);
|
||||
|
||||
archive_space_rs = ReservedSpace(archive_space_size, archive_space_alignment,
|
||||
os::vm_page_size(), (char*)base_address);
|
||||
archive_space_rs = MemoryReserver::reserve((char*)base_address,
|
||||
archive_space_size,
|
||||
archive_space_alignment,
|
||||
os::vm_page_size());
|
||||
if (archive_space_rs.is_reserved()) {
|
||||
assert(base_address == nullptr ||
|
||||
(address)archive_space_rs.base() == base_address, "Sanity");
|
||||
@ -1505,10 +1510,14 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
|
||||
// caller will not split the combined space for mapping, instead read the archive data
|
||||
// via sequential file IO.
|
||||
address ccs_base = base_address + archive_space_size + gap_size;
|
||||
archive_space_rs = ReservedSpace(archive_space_size, archive_space_alignment,
|
||||
os::vm_page_size(), (char*)base_address);
|
||||
class_space_rs = ReservedSpace(class_space_size, class_space_alignment,
|
||||
os::vm_page_size(), (char*)ccs_base);
|
||||
archive_space_rs = MemoryReserver::reserve((char*)base_address,
|
||||
archive_space_size,
|
||||
archive_space_alignment,
|
||||
os::vm_page_size());
|
||||
class_space_rs = MemoryReserver::reserve((char*)ccs_base,
|
||||
class_space_size,
|
||||
class_space_alignment,
|
||||
os::vm_page_size());
|
||||
}
|
||||
if (!archive_space_rs.is_reserved() || !class_space_rs.is_reserved()) {
|
||||
release_reserved_spaces(total_space_rs, archive_space_rs, class_space_rs);
|
||||
@ -1519,8 +1528,10 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
|
||||
MemTracker::record_virtual_memory_tag(class_space_rs.base(), mtClass);
|
||||
} else {
|
||||
if (use_archive_base_addr && base_address != nullptr) {
|
||||
total_space_rs = ReservedSpace(total_range_size, base_address_alignment,
|
||||
os::vm_page_size(), (char*) base_address);
|
||||
total_space_rs = MemoryReserver::reserve((char*) base_address,
|
||||
total_range_size,
|
||||
base_address_alignment,
|
||||
os::vm_page_size());
|
||||
} else {
|
||||
// We did not manage to reserve at the preferred address, or were instructed to relocate. In that
|
||||
// case we reserve wherever possible, but the start address needs to be encodable as narrow Klass
|
||||
@ -1568,15 +1579,18 @@ void MetaspaceShared::release_reserved_spaces(ReservedSpace& total_space_rs,
|
||||
ReservedSpace& class_space_rs) {
|
||||
if (total_space_rs.is_reserved()) {
|
||||
log_debug(cds)("Released shared space (archive + class) " INTPTR_FORMAT, p2i(total_space_rs.base()));
|
||||
total_space_rs.release();
|
||||
MemoryReserver::release(total_space_rs);
|
||||
total_space_rs = {};
|
||||
} else {
|
||||
if (archive_space_rs.is_reserved()) {
|
||||
log_debug(cds)("Released shared space (archive) " INTPTR_FORMAT, p2i(archive_space_rs.base()));
|
||||
archive_space_rs.release();
|
||||
MemoryReserver::release(archive_space_rs);
|
||||
archive_space_rs = {};
|
||||
}
|
||||
if (class_space_rs.is_reserved()) {
|
||||
log_debug(cds)("Released shared space (classes) " INTPTR_FORMAT, p2i(class_space_rs.base()));
|
||||
class_space_rs.release();
|
||||
MemoryReserver::release(class_space_rs);
|
||||
class_space_rs = {};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "memory/reservedSpace.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
@ -44,6 +44,7 @@
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/method.inline.hpp"
|
||||
@ -318,7 +319,7 @@ void CodeCache::initialize_heaps() {
|
||||
FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
|
||||
FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
|
||||
|
||||
ReservedCodeSpace rs = reserve_heap_memory(cache_size, ps);
|
||||
ReservedSpace rs = reserve_heap_memory(cache_size, ps);
|
||||
|
||||
// Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
|
||||
LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
|
||||
@ -348,11 +349,12 @@ size_t CodeCache::page_size(bool aligned, size_t min_pages) {
|
||||
os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
|
||||
}
|
||||
|
||||
ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
|
||||
ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
|
||||
// Align and reserve space for code cache
|
||||
const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
|
||||
const size_t rs_size = align_up(size, rs_align);
|
||||
ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
|
||||
|
||||
ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
|
||||
if (!rs.is_reserved()) {
|
||||
vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
|
||||
rs_size/K));
|
||||
@ -1130,7 +1132,7 @@ void CodeCache::initialize() {
|
||||
// If InitialCodeCacheSize is equal to ReservedCodeCacheSize, then it's more likely
|
||||
// users want to use the largest available page.
|
||||
const size_t min_pages = (InitialCodeCacheSize == ReservedCodeCacheSize) ? 1 : 8;
|
||||
ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize, page_size(false, min_pages));
|
||||
ReservedSpace rs = reserve_heap_memory(ReservedCodeCacheSize, page_size(false, min_pages));
|
||||
// Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
|
||||
LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
|
||||
add_heap(rs, "CodeCache", CodeBlobType::All);
|
||||
|
@ -79,6 +79,7 @@ class OopClosure;
|
||||
class ShenandoahParallelCodeHeapIterator;
|
||||
class NativePostCallNop;
|
||||
class DeoptimizationScope;
|
||||
class ReservedSpace;
|
||||
|
||||
#ifdef LINUX
|
||||
#define DEFAULT_PERFMAP_FILENAME "/tmp/perf-%p.map"
|
||||
@ -122,7 +123,7 @@ class CodeCache : AllStatic {
|
||||
static CodeHeap* get_code_heap(CodeBlobType code_blob_type); // Returns the CodeHeap for the given CodeBlobType
|
||||
// Returns the name of the VM option to set the size of the corresponding CodeHeap
|
||||
static const char* get_code_heap_flag_name(CodeBlobType code_blob_type);
|
||||
static ReservedCodeSpace reserve_heap_memory(size_t size, size_t rs_ps); // Reserves one continuous chunk of memory for the CodeHeaps
|
||||
static ReservedSpace reserve_heap_memory(size_t size, size_t rs_ps); // Reserves one continuous chunk of memory for the CodeHeaps
|
||||
|
||||
// Iteration
|
||||
static CodeBlob* first_blob(CodeHeap* heap); // Returns the first CodeBlob on the given CodeHeap
|
||||
|
@ -98,6 +98,7 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/heapInspection.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "memory/metaspaceUtils.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
@ -1212,8 +1213,21 @@ G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* des
|
||||
size_t size,
|
||||
size_t translation_factor) {
|
||||
size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
|
||||
|
||||
// When a page size is given we don't want to mix large
|
||||
// and normal pages. If the size is not a multiple of the
|
||||
// page size it will be aligned up to achieve this.
|
||||
size_t alignment = os::vm_allocation_granularity();
|
||||
if (preferred_page_size != os::vm_page_size()) {
|
||||
alignment = MAX2(preferred_page_size, alignment);
|
||||
size = align_up(size, alignment);
|
||||
}
|
||||
|
||||
// Allocate a new reserved space, preferring to use large pages.
|
||||
ReservedSpace rs(size, preferred_page_size);
|
||||
ReservedSpace rs = MemoryReserver::reserve(size,
|
||||
alignment,
|
||||
preferred_page_size);
|
||||
|
||||
size_t page_size = rs.page_size();
|
||||
G1RegionToSpaceMapper* result =
|
||||
G1RegionToSpaceMapper::create_mapper(rs,
|
||||
@ -1288,7 +1302,7 @@ jint G1CollectedHeap::initialize() {
|
||||
initialize_reserved_region(heap_rs);
|
||||
|
||||
// Create the barrier set for the entire reserved region.
|
||||
G1CardTable* ct = new G1CardTable(heap_rs.region());
|
||||
G1CardTable* ct = new G1CardTable(_reserved);
|
||||
G1BarrierSet* bs = new G1BarrierSet(ct);
|
||||
bs->initialize();
|
||||
assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
|
||||
@ -1440,7 +1454,7 @@ jint G1CollectedHeap::initialize() {
|
||||
|
||||
G1InitLogger::print();
|
||||
|
||||
FullGCForwarding::initialize(heap_rs.region());
|
||||
FullGCForwarding::initialize(_reserved);
|
||||
|
||||
return JNI_OK;
|
||||
}
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
|
||||
#include "gc/g1/g1HeapRegion.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
|
||||
G1CMBitMap::G1CMBitMap() : MarkBitMap(), _listener() {
|
||||
_listener.set_bitmap(this);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,10 +26,10 @@
|
||||
#define SHARE_GC_G1_G1PAGEBASEDVIRTUALSPACE_HPP
|
||||
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
|
||||
class ReservedSpace;
|
||||
class WorkerThreads;
|
||||
|
||||
// Virtual space management helper for a virtual space with an OS page allocation
|
||||
|
@ -28,7 +28,7 @@
|
||||
#include "gc/g1/g1RegionToSpaceMapper.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "memory/reservedSpace.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
class ReservedSpace;
|
||||
class WorkerThreads;
|
||||
|
||||
class G1MappingChangedListener {
|
||||
|
@ -25,7 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/parallel/objectStartArray.inline.hpp"
|
||||
#include "gc/shared/cardTableBarrierSet.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
@ -47,7 +47,7 @@ void ObjectStartArray::initialize(MemRegion reserved_region) {
|
||||
|
||||
// Do not use large-pages for the backing store. The one large page region
|
||||
// will be used for the heap proper.
|
||||
ReservedSpace backing_store(bytes_to_reserve, mtGC);
|
||||
ReservedSpace backing_store = MemoryReserver::reserve(bytes_to_reserve, mtGC);
|
||||
if (!backing_store.is_reserved()) {
|
||||
vm_exit_during_initialization("Could not reserve space for ObjectStartArray");
|
||||
}
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "gc/parallel/parMarkBitMap.inline.hpp"
|
||||
#include "gc/parallel/psCompactionManager.inline.hpp"
|
||||
#include "gc/parallel/psParallelCompact.inline.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
@ -42,11 +43,14 @@ ParMarkBitMap::initialize(MemRegion covered_region)
|
||||
const size_t raw_bytes = words * sizeof(idx_t);
|
||||
const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
|
||||
const size_t granularity = os::vm_allocation_granularity();
|
||||
_reserved_byte_size = align_up(raw_bytes, MAX2(page_sz, granularity));
|
||||
const size_t rs_align = MAX2(page_sz, granularity);
|
||||
|
||||
_reserved_byte_size = align_up(raw_bytes, rs_align);
|
||||
|
||||
ReservedSpace rs = MemoryReserver::reserve(_reserved_byte_size,
|
||||
rs_align,
|
||||
page_sz);
|
||||
|
||||
const size_t rs_align = page_sz == os::vm_page_size() ? 0 :
|
||||
MAX2(page_sz, granularity);
|
||||
ReservedSpace rs(_reserved_byte_size, rs_align, page_sz);
|
||||
const size_t used_page_sz = rs.page_size();
|
||||
os::trace_page_sizes("Mark Bitmap", raw_bytes, raw_bytes,
|
||||
rs.base(), rs.size(), used_page_sz);
|
||||
@ -68,7 +72,9 @@ ParMarkBitMap::initialize(MemRegion covered_region)
|
||||
delete _virtual_space;
|
||||
_virtual_space = nullptr;
|
||||
// Release memory reserved in the space.
|
||||
rs.release();
|
||||
if (rs.is_reserved()) {
|
||||
MemoryReserver::release(rs);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -45,8 +45,8 @@
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/metaspaceCounters.hpp"
|
||||
#include "memory/metaspaceUtils.hpp"
|
||||
#include "memory/reservedSpace.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/cpuTimeCounters.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
@ -74,7 +74,7 @@ jint ParallelScavengeHeap::initialize() {
|
||||
ReservedSpace young_rs = heap_rs.last_part(MaxOldSize, GenAlignment);
|
||||
assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap");
|
||||
|
||||
PSCardTable* card_table = new PSCardTable(heap_rs.region());
|
||||
PSCardTable* card_table = new PSCardTable(_reserved);
|
||||
card_table->initialize(old_rs.base(), young_rs.base());
|
||||
|
||||
CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);
|
||||
@ -130,7 +130,7 @@ jint ParallelScavengeHeap::initialize() {
|
||||
|
||||
ParallelInitLogger::print();
|
||||
|
||||
FullGCForwarding::initialize(heap_rs.region());
|
||||
FullGCForwarding::initialize(_reserved);
|
||||
|
||||
return JNI_OK;
|
||||
}
|
||||
|
@ -44,6 +44,7 @@ class MemoryPool;
|
||||
class PSAdaptiveSizePolicy;
|
||||
class PSCardTable;
|
||||
class PSHeapSummary;
|
||||
class ReservedSpace;
|
||||
|
||||
// ParallelScavengeHeap is the implementation of CollectedHeap for Parallel GC.
|
||||
//
|
||||
|
@ -33,6 +33,8 @@
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
class ReservedSpace;
|
||||
|
||||
class PSOldGen : public CHeapObj<mtGC> {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
|
@ -70,6 +70,7 @@
|
||||
#include "gc/shared/workerUtils.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "memory/metaspaceUtils.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
@ -240,11 +241,14 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size)
|
||||
const size_t raw_bytes = count * element_size;
|
||||
const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
|
||||
const size_t granularity = os::vm_allocation_granularity();
|
||||
_reserved_byte_size = align_up(raw_bytes, MAX2(page_sz, granularity));
|
||||
const size_t rs_align = MAX2(page_sz, granularity);
|
||||
|
||||
_reserved_byte_size = align_up(raw_bytes, rs_align);
|
||||
|
||||
ReservedSpace rs = MemoryReserver::reserve(_reserved_byte_size,
|
||||
rs_align,
|
||||
page_sz);
|
||||
|
||||
const size_t rs_align = page_sz == os::vm_page_size() ? 0 :
|
||||
MAX2(page_sz, granularity);
|
||||
ReservedSpace rs(_reserved_byte_size, rs_align, page_sz);
|
||||
os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, rs.base(),
|
||||
rs.size(), page_sz);
|
||||
|
||||
@ -257,7 +261,10 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size)
|
||||
}
|
||||
delete vspace;
|
||||
// Release memory reserved in the space.
|
||||
rs.release();
|
||||
if (rs.is_reserved()) {
|
||||
MemoryReserver::release(rs);
|
||||
rs = {};
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
|
@ -24,7 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/parallel/psVirtualspace.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "memory/reservedSpace.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,6 +26,7 @@
|
||||
#define SHARE_GC_PARALLEL_PSVIRTUALSPACE_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/reservedSpace.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
|
||||
// VirtualSpace for the parallel scavenge collector.
|
||||
|
@ -31,6 +31,8 @@
|
||||
#include "gc/parallel/psVirtualspace.hpp"
|
||||
#include "gc/parallel/spaceCounters.hpp"
|
||||
|
||||
class ReservedSpace;
|
||||
|
||||
class PSYoungGen : public CHeapObj<mtGC> {
|
||||
friend class VMStructs;
|
||||
friend class ParallelScavengeHeap;
|
||||
|
@ -47,6 +47,7 @@
|
||||
#include "gc/shared/weakProcessor.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/reservedSpace.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/instanceRefKlass.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
@ -54,6 +54,7 @@ class DefNewGeneration;
|
||||
class GCMemoryManager;
|
||||
class ContiguousSpace;
|
||||
class OopClosure;
|
||||
class ReservedSpace;
|
||||
|
||||
class Generation: public CHeapObj<mtGC> {
|
||||
friend class VMStructs;
|
||||
|
@ -28,8 +28,8 @@
|
||||
#include "gc/shared/collectedHeap.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
@ -47,7 +47,7 @@ SerialBlockOffsetTable::SerialBlockOffsetTable(MemRegion reserved,
|
||||
size_t init_word_size):
|
||||
_reserved(reserved) {
|
||||
size_t size = compute_size(reserved.word_size());
|
||||
ReservedSpace rs(size, mtGC);
|
||||
ReservedSpace rs = MemoryReserver::reserve(size, mtGC);
|
||||
if (!rs.is_reserved()) {
|
||||
vm_exit_during_initialization("Could not reserve enough space for heap offset array");
|
||||
}
|
||||
|
@ -63,6 +63,7 @@
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/metaspaceCounters.hpp"
|
||||
#include "memory/metaspaceUtils.hpp"
|
||||
#include "memory/reservedSpace.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
@ -189,7 +190,7 @@ jint SerialHeap::initialize() {
|
||||
ReservedSpace young_rs = heap_rs.first_part(MaxNewSize, GenAlignment);
|
||||
ReservedSpace old_rs = heap_rs.last_part(MaxNewSize, GenAlignment);
|
||||
|
||||
_rem_set = new CardTableRS(heap_rs.region());
|
||||
_rem_set = new CardTableRS(_reserved);
|
||||
_rem_set->initialize(young_rs.base(), old_rs.base());
|
||||
|
||||
CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
|
||||
|
@ -29,7 +29,7 @@
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/shared/space.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
@ -80,15 +80,14 @@ void CardTable::initialize(void* region0_start, void* region1_start) {
|
||||
HeapWord* low_bound = _whole_heap.start();
|
||||
HeapWord* high_bound = _whole_heap.end();
|
||||
|
||||
const size_t rs_align = _page_size == os::vm_page_size() ? 0 :
|
||||
MAX2(_page_size, os::vm_allocation_granularity());
|
||||
ReservedSpace heap_rs(_byte_map_size, rs_align, _page_size);
|
||||
const size_t rs_align = MAX2(_page_size, os::vm_allocation_granularity());
|
||||
ReservedSpace rs = MemoryReserver::reserve(_byte_map_size, rs_align, _page_size);
|
||||
|
||||
MemTracker::record_virtual_memory_tag((address)heap_rs.base(), mtGC);
|
||||
MemTracker::record_virtual_memory_tag((address)rs.base(), mtGC);
|
||||
|
||||
os::trace_page_sizes("Card Table", num_bytes, num_bytes,
|
||||
heap_rs.base(), heap_rs.size(), _page_size);
|
||||
if (!heap_rs.is_reserved()) {
|
||||
rs.base(), rs.size(), _page_size);
|
||||
if (!rs.is_reserved()) {
|
||||
vm_exit_during_initialization("Could not reserve enough space for the "
|
||||
"card marking array");
|
||||
}
|
||||
@ -97,7 +96,7 @@ void CardTable::initialize(void* region0_start, void* region1_start) {
|
||||
// then add it to _byte_map_base, i.e.
|
||||
//
|
||||
// _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift)
|
||||
_byte_map = (CardValue*) heap_rs.base();
|
||||
_byte_map = (CardValue*) rs.base();
|
||||
_byte_map_base = _byte_map - (uintptr_t(low_bound) >> _card_shift);
|
||||
assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
|
||||
assert(byte_for(high_bound-1) <= &_byte_map[last_valid_index()], "Checking end of map");
|
||||
|
@ -30,7 +30,6 @@
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "gc/shared/space.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include "memory/classLoaderMetaspace.hpp"
|
||||
#include "memory/metaspace.hpp"
|
||||
#include "memory/metaspaceUtils.hpp"
|
||||
#include "memory/reservedSpace.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/instanceMirrorKlass.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,6 +26,7 @@
|
||||
#include "gc/shared/generationCounters.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "runtime/perfData.hpp"
|
||||
|
||||
void GenerationCounters::initialize(const char* name, int ordinal, int spaces,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,9 +25,11 @@
|
||||
#ifndef SHARE_GC_SHARED_GENERATIONCOUNTERS_HPP
|
||||
#define SHARE_GC_SHARED_GENERATIONCOUNTERS_HPP
|
||||
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/perfDataTypes.hpp"
|
||||
|
||||
class VirtualSpace;
|
||||
|
||||
// A GenerationCounter is a holder class for performance counters
|
||||
// that track a generation
|
||||
|
||||
|
@ -26,8 +26,10 @@
|
||||
#include "gc/shenandoah/shenandoahCardTable.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahUtils.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "memory/reservedSpace.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
|
||||
void ShenandoahCardTable::initialize() {
|
||||
size_t num_cards = cards_required(_whole_heap.word_size());
|
||||
@ -41,9 +43,9 @@ void ShenandoahCardTable::initialize() {
|
||||
HeapWord* high_bound = _whole_heap.end();
|
||||
|
||||
// ReservedSpace constructor would assert rs_align >= os::vm_page_size().
|
||||
const size_t rs_align = _page_size == os::vm_page_size() ? 0 : MAX2(_page_size, granularity);
|
||||
const size_t rs_align = MAX2(_page_size, granularity);
|
||||
|
||||
ReservedSpace write_space(_byte_map_size, rs_align, _page_size);
|
||||
ReservedSpace write_space = MemoryReserver::reserve(_byte_map_size, rs_align, _page_size);
|
||||
initialize(write_space);
|
||||
|
||||
// The assembler store_check code will do an unsigned shift of the oop,
|
||||
@ -58,7 +60,7 @@ void ShenandoahCardTable::initialize() {
|
||||
_write_byte_map = _byte_map;
|
||||
_write_byte_map_base = _byte_map_base;
|
||||
|
||||
ReservedSpace read_space(_byte_map_size, rs_align, _page_size);
|
||||
ReservedSpace read_space = MemoryReserver::reserve(_byte_map_size, rs_align, _page_size);
|
||||
initialize(read_space);
|
||||
|
||||
_read_byte_map = (CardValue*) read_space.base();
|
||||
|
@ -32,8 +32,8 @@
|
||||
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahUtils.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
|
||||
ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) :
|
||||
|
@ -26,11 +26,12 @@
|
||||
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_HPP
|
||||
#define SHARE_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeapRegion.hpp"
|
||||
#include "gc/shenandoah/shenandoahPadding.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/reservedSpace.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
|
||||
class ShenandoahCollectionSet : public CHeapObj<mtGC> {
|
||||
friend class ShenandoahHeap;
|
||||
|
@ -28,7 +28,6 @@
|
||||
|
||||
#include "cds/archiveHeapWriter.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
|
||||
#include "gc/shared/classUnloadingContext.hpp"
|
||||
#include "gc/shared/fullGCForwarding.hpp"
|
||||
@ -86,9 +85,10 @@
|
||||
#include "gc/shenandoah/shenandoahJfrSupport.hpp"
|
||||
#endif
|
||||
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/classLoaderMetaspace.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "memory/metaspaceUtils.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "nmt/mallocTracker.hpp"
|
||||
@ -156,6 +156,19 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
static ReservedSpace reserve(size_t size, size_t preferred_page_size) {
|
||||
// When a page size is given we don't want to mix large
|
||||
// and normal pages. If the size is not a multiple of the
|
||||
// page size it will be aligned up to achieve this.
|
||||
size_t alignment = os::vm_allocation_granularity();
|
||||
if (preferred_page_size != os::vm_page_size()) {
|
||||
alignment = MAX2(preferred_page_size, alignment);
|
||||
size = align_up(size, alignment);
|
||||
}
|
||||
|
||||
return MemoryReserver::reserve(size, alignment, preferred_page_size);
|
||||
}
|
||||
|
||||
jint ShenandoahHeap::initialize() {
|
||||
//
|
||||
// Figure out heap sizing
|
||||
@ -281,7 +294,7 @@ jint ShenandoahHeap::initialize() {
|
||||
"Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
|
||||
_bitmap_bytes_per_slice, bitmap_page_size);
|
||||
|
||||
ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
|
||||
ReservedSpace bitmap = reserve(_bitmap_size, bitmap_page_size);
|
||||
os::trace_page_sizes_for_requested_size("Mark Bitmap",
|
||||
bitmap_size_orig, bitmap_page_size,
|
||||
bitmap.base(),
|
||||
@ -301,7 +314,7 @@ jint ShenandoahHeap::initialize() {
|
||||
_marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
|
||||
|
||||
if (ShenandoahVerify) {
|
||||
ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
|
||||
ReservedSpace verify_bitmap = reserve(_bitmap_size, bitmap_page_size);
|
||||
os::trace_page_sizes_for_requested_size("Verify Bitmap",
|
||||
bitmap_size_orig, bitmap_page_size,
|
||||
verify_bitmap.base(),
|
||||
@ -319,7 +332,7 @@ jint ShenandoahHeap::initialize() {
|
||||
// Reserve aux bitmap for use in object_iterate(). We don't commit it here.
|
||||
size_t aux_bitmap_page_size = bitmap_page_size;
|
||||
|
||||
ReservedSpace aux_bitmap(_bitmap_size, aux_bitmap_page_size);
|
||||
ReservedSpace aux_bitmap = reserve(_bitmap_size, aux_bitmap_page_size);
|
||||
os::trace_page_sizes_for_requested_size("Aux Bitmap",
|
||||
bitmap_size_orig, aux_bitmap_page_size,
|
||||
aux_bitmap.base(),
|
||||
@ -337,7 +350,7 @@ jint ShenandoahHeap::initialize() {
|
||||
size_t region_storage_size = align_up(region_storage_size_orig,
|
||||
MAX2(region_page_size, os::vm_allocation_granularity()));
|
||||
|
||||
ReservedSpace region_storage(region_storage_size, region_page_size);
|
||||
ReservedSpace region_storage = reserve(region_storage_size, region_page_size);
|
||||
os::trace_page_sizes_for_requested_size("Region Storage",
|
||||
region_storage_size_orig, region_page_size,
|
||||
region_storage.base(),
|
||||
@ -363,7 +376,7 @@ jint ShenandoahHeap::initialize() {
|
||||
for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
|
||||
char* req_addr = (char*)addr;
|
||||
assert(is_aligned(req_addr, cset_align), "Should be aligned");
|
||||
cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
|
||||
cset_rs = MemoryReserver::reserve(req_addr, cset_size, cset_align, cset_page_size);
|
||||
if (cset_rs.is_reserved()) {
|
||||
assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
|
||||
_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
|
||||
@ -372,7 +385,7 @@ jint ShenandoahHeap::initialize() {
|
||||
}
|
||||
|
||||
if (_collection_set == nullptr) {
|
||||
cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
|
||||
cset_rs = MemoryReserver::reserve(cset_size, cset_align, os::vm_page_size());
|
||||
_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
|
||||
}
|
||||
os::trace_page_sizes_for_requested_size("Collection Set",
|
||||
@ -2738,4 +2751,3 @@ void ShenandoahHeap::log_heap_status(const char* msg) const {
|
||||
global_generation()->log_status(msg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/recorder/storage/jfrVirtualMemory.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
@ -97,14 +98,16 @@ JfrVirtualMemorySegment::JfrVirtualMemorySegment() :
|
||||
|
||||
JfrVirtualMemorySegment::~JfrVirtualMemorySegment() {
|
||||
decommit();
|
||||
_rs.release();
|
||||
if (_rs.is_reserved()) {
|
||||
MemoryReserver::release(_rs);
|
||||
}
|
||||
}
|
||||
|
||||
bool JfrVirtualMemorySegment::initialize(size_t reservation_size_request_bytes) {
|
||||
assert(is_aligned(reservation_size_request_bytes, os::vm_allocation_granularity()), "invariant");
|
||||
_rs = ReservedSpace(reservation_size_request_bytes,
|
||||
os::vm_allocation_granularity(),
|
||||
os::vm_page_size());
|
||||
_rs = MemoryReserver::reserve(reservation_size_request_bytes,
|
||||
os::vm_allocation_granularity(),
|
||||
os::vm_page_size());
|
||||
if (!_rs.is_reserved()) {
|
||||
return false;
|
||||
}
|
||||
|
@ -24,7 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/heap.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
@ -223,7 +223,7 @@ bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_s
|
||||
const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
|
||||
|
||||
// reserve space for _segmap
|
||||
ReservedSpace seg_rs(reserved_segments_size, mtCode);
|
||||
ReservedSpace seg_rs = MemoryReserver::reserve(reserved_segments_size, mtCode);
|
||||
if (!_segmap.initialize(seg_rs, committed_segments_size)) {
|
||||
return false;
|
||||
}
|
||||
|
@ -31,6 +31,8 @@
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
class ReservedSpace;
|
||||
|
||||
// Blocks
|
||||
|
||||
class HeapBlock {
|
||||
|
693
src/hotspot/share/memory/memoryReserver.cpp
Normal file
693
src/hotspot/share/memory/memoryReserver.cpp
Normal file
@ -0,0 +1,693 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jvm.h"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "oops/compressedOops.hpp"
|
||||
#include "oops/markWord.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/formatBuffer.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
static void sanity_check_size_and_alignment(size_t size, size_t alignment) {
|
||||
assert(size > 0, "Precondition");
|
||||
|
||||
DEBUG_ONLY(const size_t granularity = os::vm_allocation_granularity());
|
||||
assert(is_aligned(size, granularity), "size not aligned to os::vm_allocation_granularity()");
|
||||
|
||||
assert(alignment >= granularity, "Must be set");
|
||||
assert(is_power_of_2(alignment), "not a power of 2");
|
||||
assert(is_aligned(alignment, granularity), "alignment not aligned to os::vm_allocation_granularity()");
|
||||
}
|
||||
|
||||
static void sanity_check_page_size(size_t page_size) {
|
||||
assert(page_size >= os::vm_page_size(), "Invalid page size");
|
||||
assert(is_power_of_2(page_size), "Invalid page size");
|
||||
}
|
||||
|
||||
static void sanity_check_arguments(size_t size, size_t alignment, size_t page_size) {
|
||||
sanity_check_size_and_alignment(size, alignment);
|
||||
sanity_check_page_size(page_size);
|
||||
}
|
||||
|
||||
static bool large_pages_requested() {
|
||||
return UseLargePages &&
|
||||
(!FLAG_IS_DEFAULT(UseLargePages) || !FLAG_IS_DEFAULT(LargePageSizeInBytes));
|
||||
}
|
||||
|
||||
static void log_on_large_pages_failure(char* req_addr, size_t bytes) {
|
||||
if (large_pages_requested()) {
|
||||
// Compressed oops logging.
|
||||
log_debug(gc, heap, coops)("Reserve regular memory without large pages");
|
||||
// JVM style warning that we did not succeed in using large pages.
|
||||
char msg[128];
|
||||
jio_snprintf(msg, sizeof(msg), "Failed to reserve and commit memory using large pages. "
|
||||
"req_addr: " PTR_FORMAT " bytes: " SIZE_FORMAT,
|
||||
req_addr, bytes);
|
||||
warning("%s", msg);
|
||||
}
|
||||
}
|
||||
|
||||
static bool use_explicit_large_pages(size_t page_size) {
|
||||
return !os::can_commit_large_page_memory() &&
|
||||
page_size != os::vm_page_size();
|
||||
}
|
||||
|
||||
static char* reserve_memory_inner(char* requested_address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
bool exec,
|
||||
MemTag mem_tag) {
|
||||
// If the memory was requested at a particular address, use
|
||||
// os::attempt_reserve_memory_at() to avoid mapping over something
|
||||
// important. If the reservation fails, return null.
|
||||
if (requested_address != nullptr) {
|
||||
assert(is_aligned(requested_address, alignment),
|
||||
"Requested address " PTR_FORMAT " must be aligned to " SIZE_FORMAT,
|
||||
p2i(requested_address), alignment);
|
||||
return os::attempt_reserve_memory_at(requested_address, size, exec, mem_tag);
|
||||
}
|
||||
|
||||
// Optimistically assume that the OS returns an aligned base pointer.
|
||||
// When reserving a large address range, most OSes seem to align to at
|
||||
// least 64K.
|
||||
char* base = os::reserve_memory(size, exec, mem_tag);
|
||||
if (is_aligned(base, alignment)) {
|
||||
return base;
|
||||
}
|
||||
|
||||
// Base not aligned, retry.
|
||||
if (!os::release_memory(base, size)) {
|
||||
fatal("os::release_memory failed");
|
||||
}
|
||||
|
||||
// Map using the requested alignment.
|
||||
return os::reserve_memory_aligned(size, alignment, exec);
|
||||
}
|
||||
|
||||
ReservedSpace MemoryReserver::reserve_memory(char* requested_address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
bool exec,
|
||||
MemTag mem_tag) {
|
||||
char* base = reserve_memory_inner(requested_address, size, alignment, exec, mem_tag);
|
||||
|
||||
if (base != nullptr) {
|
||||
return ReservedSpace(base, size, alignment, os::vm_page_size(), exec, false /* special */);
|
||||
}
|
||||
|
||||
// Failed
|
||||
return {};
|
||||
}
|
||||
|
||||
ReservedSpace MemoryReserver::reserve_memory_special(char* requested_address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
bool exec) {
|
||||
log_trace(pagesize)("Attempt special mapping: size: " SIZE_FORMAT "%s, "
|
||||
"alignment: " SIZE_FORMAT "%s",
|
||||
byte_size_in_exact_unit(size), exact_unit_for_byte_size(size),
|
||||
byte_size_in_exact_unit(alignment), exact_unit_for_byte_size(alignment));
|
||||
|
||||
char* base = os::reserve_memory_special(size, alignment, page_size, requested_address, exec);
|
||||
|
||||
if (base != nullptr) {
|
||||
assert(is_aligned(base, alignment),
|
||||
"reserve_memory_special() returned an unaligned address, "
|
||||
"base: " PTR_FORMAT " alignment: " SIZE_FORMAT_X,
|
||||
p2i(base), alignment);
|
||||
|
||||
return ReservedSpace(base, size, alignment, page_size, exec, true /* special */);
|
||||
}
|
||||
|
||||
// Failed
|
||||
return {};
|
||||
}
|
||||
|
||||
ReservedSpace MemoryReserver::reserve(char* requested_address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
bool executable,
|
||||
MemTag mem_tag) {
|
||||
sanity_check_arguments(size, alignment, page_size);
|
||||
|
||||
// Reserve the memory.
|
||||
|
||||
// There are basically three different cases that we need to handle:
|
||||
// 1. Mapping backed by a file
|
||||
// 2. Mapping backed by explicit large pages
|
||||
// 3. Mapping backed by normal pages or transparent huge pages
|
||||
// The first two have restrictions that requires the whole mapping to be
|
||||
// committed up front. To record this the ReservedSpace is marked 'special'.
|
||||
|
||||
// == Case 1 ==
|
||||
// This case is contained within the HeapReserver
|
||||
|
||||
// == Case 2 ==
|
||||
if (use_explicit_large_pages(page_size)) {
|
||||
// System can't commit large pages i.e. use transparent huge pages and
|
||||
// the caller requested large pages. To satisfy this request we use
|
||||
// explicit large pages and these have to be committed up front to ensure
|
||||
// no reservations are lost.
|
||||
do {
|
||||
ReservedSpace reserved = reserve_memory_special(requested_address, size, alignment, page_size, executable);
|
||||
if (reserved.is_reserved()) {
|
||||
// Successful reservation using large pages.
|
||||
return reserved;
|
||||
}
|
||||
page_size = os::page_sizes().next_smaller(page_size);
|
||||
} while (page_size > os::vm_page_size());
|
||||
|
||||
// Failed to reserve explicit large pages, do proper logging.
|
||||
log_on_large_pages_failure(requested_address, size);
|
||||
// Now fall back to normal reservation.
|
||||
assert(page_size == os::vm_page_size(), "inv");
|
||||
}
|
||||
|
||||
// == Case 3 ==
|
||||
return reserve_memory(requested_address, size, alignment, executable, mem_tag);
|
||||
}
|
||||
|
||||
ReservedSpace MemoryReserver::reserve(char* requested_address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
MemTag mem_tag) {
|
||||
return reserve(requested_address,
|
||||
size,
|
||||
alignment,
|
||||
page_size,
|
||||
!ExecMem,
|
||||
mem_tag);
|
||||
}
|
||||
|
||||
|
||||
ReservedSpace MemoryReserver::reserve(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
MemTag mem_tag) {
|
||||
return reserve(nullptr /* requested_address */,
|
||||
size,
|
||||
alignment,
|
||||
page_size,
|
||||
mem_tag);
|
||||
}
|
||||
|
||||
ReservedSpace MemoryReserver::reserve(size_t size,
|
||||
MemTag mem_tag) {
|
||||
// Want to use large pages where possible. If the size is
|
||||
// not large page aligned the mapping will be a mix of
|
||||
// large and normal pages.
|
||||
size_t page_size = os::page_size_for_region_unaligned(size, 1);
|
||||
size_t alignment = os::vm_allocation_granularity();
|
||||
|
||||
return reserve(size,
|
||||
alignment,
|
||||
page_size,
|
||||
mem_tag);
|
||||
}
|
||||
|
||||
bool MemoryReserver::release(const ReservedSpace& reserved) {
|
||||
assert(reserved.is_reserved(), "Precondition");
|
||||
|
||||
if (reserved.special()) {
|
||||
return os::release_memory_special(reserved.base(), reserved.size());
|
||||
} else {
|
||||
return os::release_memory(reserved.base(), reserved.size());
|
||||
}
|
||||
}
|
||||
|
||||
static char* map_memory_to_file(char* requested_address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
int fd,
|
||||
MemTag mem_tag) {
|
||||
// If the memory was requested at a particular address, use
|
||||
// os::attempt_reserve_memory_at() to avoid mapping over something
|
||||
// important. If the reservation fails, return null.
|
||||
if (requested_address != nullptr) {
|
||||
assert(is_aligned(requested_address, alignment),
|
||||
"Requested address " PTR_FORMAT " must be aligned to " SIZE_FORMAT,
|
||||
p2i(requested_address), alignment);
|
||||
return os::attempt_map_memory_to_file_at(requested_address, size, fd, mem_tag);
|
||||
}
|
||||
|
||||
// Optimistically assume that the OS returns an aligned base pointer.
|
||||
// When reserving a large address range, most OSes seem to align to at
|
||||
// least 64K.
|
||||
char* base = os::map_memory_to_file(size, fd);
|
||||
if (is_aligned(base, alignment)) {
|
||||
return base;
|
||||
}
|
||||
|
||||
|
||||
// Base not aligned, retry.
|
||||
if (!os::unmap_memory(base, size)) {
|
||||
fatal("os::unmap_memory failed");
|
||||
}
|
||||
|
||||
// Map using the requested alignment.
|
||||
return os::map_memory_to_file_aligned(size, alignment, fd, mem_tag);
|
||||
}
|
||||
|
||||
ReservedSpace FileMappedMemoryReserver::reserve(char* requested_address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
int fd,
|
||||
MemTag mem_tag) {
|
||||
sanity_check_size_and_alignment(size, alignment);
|
||||
|
||||
char* base = map_memory_to_file(requested_address, size, alignment, fd, mem_tag);
|
||||
|
||||
if (base != nullptr) {
|
||||
return ReservedSpace(base, size, alignment, os::vm_page_size(), !ExecMem, true /* special */);
|
||||
}
|
||||
|
||||
// Failed
|
||||
return {};
|
||||
}
|
||||
|
||||
ReservedSpace CodeMemoryReserver::reserve(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size) {
|
||||
return MemoryReserver::reserve(nullptr /* requested_address */,
|
||||
size,
|
||||
alignment,
|
||||
page_size,
|
||||
ExecMem,
|
||||
mtCode);
|
||||
}
|
||||
|
||||
ReservedHeapSpace HeapReserver::Instance::reserve_uncompressed_oops_heap(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size) {
|
||||
ReservedSpace reserved = reserve_memory(size, alignment, page_size);
|
||||
|
||||
if (reserved.is_reserved()) {
|
||||
return ReservedHeapSpace(reserved, 0 /* noaccess_prefix */);
|
||||
}
|
||||
|
||||
// Failed
|
||||
return {};
|
||||
}
|
||||
|
||||
|
||||
static int maybe_create_file(const char* heap_allocation_directory) {
|
||||
if (heap_allocation_directory == nullptr) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int fd = os::create_file_for_heap(heap_allocation_directory);
|
||||
if (fd == -1) {
|
||||
vm_exit_during_initialization(
|
||||
err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
|
||||
}
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
HeapReserver::Instance::Instance(const char* heap_allocation_directory)
|
||||
: _fd(maybe_create_file(heap_allocation_directory)) {}
|
||||
|
||||
HeapReserver::Instance::~Instance() {
|
||||
if (_fd != -1) {
|
||||
::close(_fd);
|
||||
}
|
||||
}
|
||||
|
||||
ReservedSpace HeapReserver::Instance::reserve_memory(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
char* requested_address) {
|
||||
|
||||
// There are basically three different cases that we need to handle below:
|
||||
// 1. Mapping backed by a file
|
||||
// 2. Mapping backed by explicit large pages
|
||||
// 3. Mapping backed by normal pages or transparent huge pages
|
||||
// The first two have restrictions that requires the whole mapping to be
|
||||
// committed up front. To record this the ReservedSpace is marked 'special'.
|
||||
|
||||
// == Case 1 ==
|
||||
if (_fd != -1) {
|
||||
// When there is a backing file directory for this space then whether
|
||||
// large pages are allocated is up to the filesystem of the backing file.
|
||||
// So UseLargePages is not taken into account for this reservation.
|
||||
//
|
||||
// If requested, let the user know that explicit large pages can't be used.
|
||||
if (use_explicit_large_pages(page_size) && large_pages_requested()) {
|
||||
log_debug(gc, heap)("Cannot allocate explicit large pages for Java Heap when AllocateHeapAt option is set.");
|
||||
}
|
||||
|
||||
// Always return, not possible to fall back to reservation not using a file.
|
||||
return FileMappedMemoryReserver::reserve(requested_address, size, alignment, _fd, mtJavaHeap);
|
||||
}
|
||||
|
||||
// == Case 2 & 3 ==
|
||||
return MemoryReserver::reserve(requested_address, size, alignment, page_size, mtJavaHeap);
|
||||
}
|
||||
|
||||
// Compressed oop support is not relevant in 32bit builds.
|
||||
#ifdef _LP64
|
||||
|
||||
void HeapReserver::Instance::release(const ReservedSpace& reserved) {
|
||||
if (reserved.is_reserved()) {
|
||||
if (_fd == -1) {
|
||||
if (reserved.special()) {
|
||||
os::release_memory_special(reserved.base(), reserved.size());
|
||||
} else{
|
||||
os::release_memory(reserved.base(), reserved.size());
|
||||
}
|
||||
} else {
|
||||
os::unmap_memory(reserved.base(), reserved.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
|
||||
// Does not check whether the reserved memory actually is at requested_address, as the memory returned
|
||||
// might still fulfill the wishes of the caller.
|
||||
// Assures the memory is aligned to 'alignment'.
|
||||
ReservedSpace HeapReserver::Instance::try_reserve_memory(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
char* requested_address) {
|
||||
// Try to reserve the memory for the heap.
|
||||
log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
|
||||
" heap of size " SIZE_FORMAT_X,
|
||||
p2i(requested_address),
|
||||
size);
|
||||
|
||||
ReservedSpace reserved = reserve_memory(size, alignment, page_size, requested_address);
|
||||
|
||||
if (reserved.is_reserved()) {
|
||||
// Check alignment constraints.
|
||||
assert(reserved.alignment() == alignment, "Unexpected");
|
||||
assert(is_aligned(reserved.base(), alignment), "Unexpected");
|
||||
return reserved;
|
||||
}
|
||||
|
||||
// Failed
|
||||
return {};
|
||||
}
|
||||
|
||||
ReservedSpace HeapReserver::Instance::try_reserve_range(char *highest_start,
|
||||
char *lowest_start,
|
||||
size_t attach_point_alignment,
|
||||
char *aligned_heap_base_min_address,
|
||||
char *upper_bound,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size) {
|
||||
const size_t attach_range = highest_start - lowest_start;
|
||||
// Cap num_attempts at possible number.
|
||||
// At least one is possible even for 0 sized attach range.
|
||||
const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
|
||||
const uint64_t num_attempts_to_try = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
|
||||
|
||||
const size_t stepsize = (attach_range == 0) ? // Only one try.
|
||||
(size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
|
||||
|
||||
// Try attach points from top to bottom.
|
||||
for (char* attach_point = highest_start;
|
||||
attach_point >= lowest_start && attach_point <= highest_start; // Avoid wrap around.
|
||||
attach_point -= stepsize) {
|
||||
ReservedSpace reserved = try_reserve_memory(size, alignment, page_size, attach_point);
|
||||
|
||||
if (reserved.is_reserved()) {
|
||||
if (reserved.base() >= aligned_heap_base_min_address &&
|
||||
size <= (uintptr_t)(upper_bound - reserved.base())) {
|
||||
// Got a successful reservation.
|
||||
return reserved;
|
||||
}
|
||||
|
||||
release(reserved);
|
||||
}
|
||||
}
|
||||
|
||||
// Failed
|
||||
return {};
|
||||
}
|
||||
|
||||
#define SIZE_64K ((uint64_t) UCONST64( 0x10000))
|
||||
#define SIZE_256M ((uint64_t) UCONST64( 0x10000000))
|
||||
#define SIZE_32G ((uint64_t) UCONST64( 0x800000000))
|
||||
|
||||
// Helper for heap allocation. Returns an array with addresses
|
||||
// (OS-specific) which are suited for disjoint base mode. Array is
|
||||
// null terminated.
|
||||
static char** get_attach_addresses_for_disjoint_mode() {
|
||||
static uint64_t addresses[] = {
|
||||
2 * SIZE_32G,
|
||||
3 * SIZE_32G,
|
||||
4 * SIZE_32G,
|
||||
8 * SIZE_32G,
|
||||
10 * SIZE_32G,
|
||||
1 * SIZE_64K * SIZE_32G,
|
||||
2 * SIZE_64K * SIZE_32G,
|
||||
3 * SIZE_64K * SIZE_32G,
|
||||
4 * SIZE_64K * SIZE_32G,
|
||||
16 * SIZE_64K * SIZE_32G,
|
||||
32 * SIZE_64K * SIZE_32G,
|
||||
34 * SIZE_64K * SIZE_32G,
|
||||
0
|
||||
};
|
||||
|
||||
// Sort out addresses smaller than HeapBaseMinAddress. This assumes
|
||||
// the array is sorted.
|
||||
uint i = 0;
|
||||
while (addresses[i] != 0 &&
|
||||
(addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
|
||||
i++;
|
||||
}
|
||||
uint start = i;
|
||||
|
||||
// Avoid more steps than requested.
|
||||
i = 0;
|
||||
while (addresses[start+i] != 0) {
|
||||
if (i == HeapSearchSteps) {
|
||||
addresses[start+i] = 0;
|
||||
break;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
return (char**) &addresses[start];
|
||||
}
|
||||
|
||||
// Create protection page at the beginning of the space.
|
||||
static ReservedSpace establish_noaccess_prefix(const ReservedSpace& reserved, size_t noaccess_prefix) {
|
||||
assert(reserved.alignment() >= os::vm_page_size(), "must be at least page size big");
|
||||
assert(reserved.is_reserved(), "should only be called on a reserved memory area");
|
||||
|
||||
if (reserved.end() > (char *)OopEncodingHeapMax) {
|
||||
if (true
|
||||
WIN64_ONLY(&& !UseLargePages)
|
||||
AIX_ONLY(&& (os::Aix::supports_64K_mmap_pages() || os::vm_page_size() == 4*K))) {
|
||||
// Protect memory at the base of the allocated region.
|
||||
if (!os::protect_memory(reserved.base(), noaccess_prefix, os::MEM_PROT_NONE, reserved.special())) {
|
||||
fatal("cannot protect protection page");
|
||||
}
|
||||
log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
|
||||
PTR_FORMAT " / " INTX_FORMAT " bytes",
|
||||
p2i(reserved.base()),
|
||||
noaccess_prefix);
|
||||
assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
|
||||
} else {
|
||||
CompressedOops::set_use_implicit_null_checks(false);
|
||||
}
|
||||
}
|
||||
|
||||
return reserved.last_part(noaccess_prefix);
|
||||
}
|
||||
|
||||
ReservedHeapSpace HeapReserver::Instance::reserve_compressed_oops_heap(const size_t size, size_t alignment, size_t page_size) {
|
||||
const size_t noaccess_prefix_size = lcm(os::vm_page_size(), alignment);
|
||||
const size_t granularity = os::vm_allocation_granularity();
|
||||
|
||||
assert(size + noaccess_prefix_size <= OopEncodingHeapMax, "can not allocate compressed oop heap for this size");
|
||||
assert(is_aligned(size, granularity), "size not aligned to os::vm_allocation_granularity()");
|
||||
|
||||
assert(alignment >= os::vm_page_size(), "alignment too small");
|
||||
assert(is_aligned(alignment, granularity), "alignment not aligned to os::vm_allocation_granularity()");
|
||||
assert(is_power_of_2(alignment), "not a power of 2");
|
||||
|
||||
// The necessary attach point alignment for generated wish addresses.
|
||||
// This is needed to increase the chance of attaching for mmap and shmat.
|
||||
// AIX is the only platform that uses System V shm for reserving virtual memory.
|
||||
// In this case, the required alignment of the allocated size (64K) and the alignment
|
||||
// of possible start points of the memory region (256M) differ.
|
||||
// This is not reflected by os_allocation_granularity().
|
||||
// The logic here is dual to the one in pd_reserve_memory in os_aix.cpp
|
||||
const size_t os_attach_point_alignment =
|
||||
AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M)
|
||||
NOT_AIX(os::vm_allocation_granularity());
|
||||
|
||||
const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
|
||||
|
||||
char* aligned_heap_base_min_address = align_up((char*)HeapBaseMinAddress, alignment);
|
||||
size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
|
||||
noaccess_prefix_size : 0;
|
||||
|
||||
ReservedSpace reserved{};
|
||||
|
||||
// Attempt to alloc at user-given address.
|
||||
if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
|
||||
reserved = try_reserve_memory(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
|
||||
if (reserved.base() != aligned_heap_base_min_address) { // Enforce this exact address.
|
||||
release(reserved);
|
||||
reserved = {};
|
||||
}
|
||||
}
|
||||
|
||||
// Keep heap at HeapBaseMinAddress.
|
||||
if (!reserved.is_reserved()) {
|
||||
|
||||
// Try to allocate the heap at addresses that allow efficient oop compression.
|
||||
// Different schemes are tried, in order of decreasing optimization potential.
|
||||
//
|
||||
// For this, try_reserve_heap() is called with the desired heap base addresses.
|
||||
// A call into the os layer to allocate at a given address can return memory
|
||||
// at a different address than requested. Still, this might be memory at a useful
|
||||
// address. try_reserve_heap() always returns this allocated memory, as only here
|
||||
// the criteria for a good heap are checked.
|
||||
|
||||
// Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
|
||||
// Give it several tries from top of range to bottom.
|
||||
if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
|
||||
|
||||
// Calc address range within we try to attach (range of possible start addresses).
|
||||
char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
|
||||
char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment);
|
||||
reserved = try_reserve_range(highest_start, lowest_start, attach_point_alignment,
|
||||
aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size);
|
||||
}
|
||||
|
||||
// zerobased: Attempt to allocate in the lower 32G.
|
||||
char *zerobased_max = (char *)OopEncodingHeapMax;
|
||||
|
||||
// Give it several tries from top of range to bottom.
|
||||
if (aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible.
|
||||
((!reserved.is_reserved()) || // No previous try succeeded.
|
||||
(reserved.end() > zerobased_max))) { // Unscaled delivered an arbitrary address.
|
||||
|
||||
// Release previous reservation
|
||||
release(reserved);
|
||||
|
||||
// Calc address range within we try to attach (range of possible start addresses).
|
||||
char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
|
||||
// Need to be careful about size being guaranteed to be less
|
||||
// than UnscaledOopHeapMax due to type constraints.
|
||||
char *lowest_start = aligned_heap_base_min_address;
|
||||
uint64_t unscaled_end = UnscaledOopHeapMax - size;
|
||||
if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
|
||||
lowest_start = MAX2(lowest_start, (char*)unscaled_end);
|
||||
}
|
||||
lowest_start = align_up(lowest_start, attach_point_alignment);
|
||||
reserved = try_reserve_range(highest_start, lowest_start, attach_point_alignment,
|
||||
aligned_heap_base_min_address, zerobased_max, size, alignment, page_size);
|
||||
}
|
||||
|
||||
// Now we go for heaps with base != 0. We need a noaccess prefix to efficiently
|
||||
// implement null checks.
|
||||
noaccess_prefix = noaccess_prefix_size;
|
||||
|
||||
// Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
|
||||
char** addresses = get_attach_addresses_for_disjoint_mode();
|
||||
int i = 0;
|
||||
while ((addresses[i] != nullptr) && // End of array not yet reached.
|
||||
((!reserved.is_reserved()) || // No previous try succeeded.
|
||||
(reserved.end() > zerobased_max && // Not zerobased or unscaled address.
|
||||
// Not disjoint address.
|
||||
!CompressedOops::is_disjoint_heap_base_address((address)reserved.base())))) {
|
||||
|
||||
// Release previous reservation
|
||||
release(reserved);
|
||||
|
||||
char* const attach_point = addresses[i];
|
||||
assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
|
||||
reserved = try_reserve_memory(size + noaccess_prefix, alignment, page_size, attach_point);
|
||||
i++;
|
||||
}
|
||||
|
||||
// Last, desperate try without any placement.
|
||||
if (!reserved.is_reserved()) {
|
||||
log_trace(gc, heap, coops)("Trying to allocate at address null heap of size " SIZE_FORMAT_X, size + noaccess_prefix);
|
||||
assert(alignment >= os::vm_page_size(), "Unexpected");
|
||||
reserved = reserve_memory(size + noaccess_prefix, alignment, page_size);
|
||||
}
|
||||
}
|
||||
|
||||
// No more reserve attempts
|
||||
|
||||
if (reserved.is_reserved()) {
|
||||
// Successfully found and reserved memory for the heap.
|
||||
|
||||
if (reserved.size() > size) {
|
||||
// We reserved heap memory with a noaccess prefix.
|
||||
|
||||
assert(reserved.size() == size + noaccess_prefix, "Prefix should be included");
|
||||
// It can happen we get a zerobased/unscaled heap with noaccess prefix,
|
||||
// if we had to try at arbitrary address.
|
||||
reserved = establish_noaccess_prefix(reserved, noaccess_prefix);
|
||||
assert(reserved.size() == size, "Prefix should be gone");
|
||||
return ReservedHeapSpace(reserved, noaccess_prefix);
|
||||
}
|
||||
|
||||
// We reserved heap memory without a noaccess prefix.
|
||||
return ReservedHeapSpace(reserved, 0 /* noaccess_prefix */);
|
||||
}
|
||||
|
||||
// Failed
|
||||
return {};
|
||||
}
|
||||
|
||||
#endif // _LP64
|
||||
|
||||
ReservedHeapSpace HeapReserver::Instance::reserve_heap(size_t size, size_t alignment, size_t page_size) {
|
||||
if (UseCompressedOops) {
|
||||
#ifdef _LP64
|
||||
return reserve_compressed_oops_heap(size, alignment, page_size);
|
||||
#endif
|
||||
} else {
|
||||
return reserve_uncompressed_oops_heap(size, alignment, page_size);
|
||||
}
|
||||
}
|
||||
|
||||
ReservedHeapSpace HeapReserver::reserve(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) {
|
||||
sanity_check_arguments(size, alignment, page_size);
|
||||
|
||||
assert(alignment != 0, "Precondition");
|
||||
assert(is_aligned(size, alignment), "Precondition");
|
||||
|
||||
Instance instance(heap_allocation_directory);
|
||||
|
||||
return instance.reserve_heap(size, alignment, page_size);
|
||||
}
|
147
src/hotspot/share/memory/memoryReserver.hpp
Normal file
147
src/hotspot/share/memory/memoryReserver.hpp
Normal file
@ -0,0 +1,147 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_MEMORY_MEMORYRESERVER_HPP
|
||||
#define SHARE_MEMORY_MEMORYRESERVER_HPP
|
||||
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "memory/reservedSpace.hpp"
|
||||
#include "nmt/memTag.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
class MemoryReserver : AllStatic {
|
||||
static ReservedSpace reserve_memory(char* requested_address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
bool exec,
|
||||
MemTag mem_tag);
|
||||
|
||||
static ReservedSpace reserve_memory_special(char* requested_address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
bool exec);
|
||||
|
||||
public:
|
||||
// Final destination
|
||||
static ReservedSpace reserve(char* requested_address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
bool executable,
|
||||
MemTag mem_tag);
|
||||
|
||||
// Convenience overloads
|
||||
|
||||
static ReservedSpace reserve(char* requested_address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
MemTag mem_tag = mtNone);
|
||||
|
||||
static ReservedSpace reserve(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
MemTag mem_tag = mtNone);
|
||||
|
||||
static ReservedSpace reserve(size_t size,
|
||||
MemTag mem_tag);
|
||||
|
||||
// Release reserved memory
|
||||
static bool release(const ReservedSpace& reserved);
|
||||
};
|
||||
|
||||
class CodeMemoryReserver : AllStatic {
|
||||
public:
|
||||
static ReservedSpace reserve(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size);
|
||||
};
|
||||
|
||||
class FileMappedMemoryReserver : AllStatic {
|
||||
public:
|
||||
static ReservedSpace reserve(char* requested_address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
int fd,
|
||||
MemTag mem_tag);
|
||||
};
|
||||
|
||||
class HeapReserver : AllStatic {
|
||||
class Instance {
|
||||
const int _fd;
|
||||
|
||||
NONCOPYABLE(Instance);
|
||||
|
||||
ReservedSpace reserve_memory(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
char* requested_address = nullptr);
|
||||
|
||||
void release(const ReservedSpace& reserved);
|
||||
|
||||
// CompressedOops support
|
||||
#ifdef _LP64
|
||||
|
||||
ReservedSpace try_reserve_memory(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
char* requested_address);
|
||||
|
||||
ReservedSpace try_reserve_range(char *highest_start,
|
||||
char *lowest_start,
|
||||
size_t attach_point_alignment,
|
||||
char *aligned_heap_base_min_address,
|
||||
char *upper_bound,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size);
|
||||
|
||||
ReservedHeapSpace reserve_compressed_oops_heap(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size);
|
||||
|
||||
#endif // _LP64
|
||||
|
||||
ReservedHeapSpace reserve_uncompressed_oops_heap(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size);
|
||||
|
||||
public:
|
||||
Instance(const char* heap_allocation_directory);
|
||||
~Instance();
|
||||
|
||||
ReservedHeapSpace reserve_heap(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size);
|
||||
}; // Instance
|
||||
|
||||
public:
|
||||
static ReservedHeapSpace reserve(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
const char* heap_allocation_directory);
|
||||
};
|
||||
|
||||
#endif // SHARE_MEMORY_MEMORYRESERVER_HPP
|
@ -32,6 +32,7 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/classLoaderMetaspace.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "memory/metaspace.hpp"
|
||||
#include "memory/metaspace/chunkHeaderPool.hpp"
|
||||
#include "memory/metaspace/chunkManager.hpp"
|
||||
@ -57,11 +58,11 @@
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/formatBuffer.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "virtualspace.hpp"
|
||||
|
||||
using metaspace::ChunkManager;
|
||||
using metaspace::CommitLimiter;
|
||||
@ -597,17 +598,20 @@ ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t siz
|
||||
}
|
||||
|
||||
// Wrap resulting range in ReservedSpace
|
||||
ReservedSpace rs;
|
||||
if (result != nullptr) {
|
||||
log_debug(metaspace, map)("Mapped at " PTR_FORMAT, p2i(result));
|
||||
assert(is_aligned(result, Metaspace::reserve_alignment()), "Alignment too small for metaspace");
|
||||
rs = ReservedSpace::space_for_range(result, size, Metaspace::reserve_alignment(),
|
||||
os::vm_page_size(), false, false);
|
||||
|
||||
return ReservedSpace(result,
|
||||
size,
|
||||
Metaspace::reserve_alignment(),
|
||||
os::vm_page_size(),
|
||||
!ExecMem,
|
||||
false /* special */);
|
||||
} else {
|
||||
log_debug(metaspace, map)("Failed to map.");
|
||||
rs = ReservedSpace();
|
||||
return {};
|
||||
}
|
||||
return rs;
|
||||
}
|
||||
#endif // _LP64
|
||||
|
||||
@ -760,8 +764,12 @@ void Metaspace::global_initialize() {
|
||||
"(must be aligned to " SIZE_FORMAT_X ").",
|
||||
CompressedClassSpaceBaseAddress, Metaspace::reserve_alignment()));
|
||||
}
|
||||
rs = ReservedSpace(size, Metaspace::reserve_alignment(),
|
||||
os::vm_page_size() /* large */, (char*)base);
|
||||
|
||||
rs = MemoryReserver::reserve((char*)base,
|
||||
size,
|
||||
Metaspace::reserve_alignment(),
|
||||
os::vm_page_size());
|
||||
|
||||
if (rs.is_reserved()) {
|
||||
log_info(metaspace)("Successfully forced class space address to " PTR_FORMAT, p2i(base));
|
||||
} else {
|
||||
@ -1021,4 +1029,3 @@ bool Metaspace::is_in_shared_metaspace(const void* ptr) {
|
||||
bool Metaspace::is_in_nonclass_metaspace(const void* ptr) {
|
||||
return VirtualSpaceList::vslist_nonclass()->contains((MetaWord*)ptr);
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,6 @@
|
||||
#define SHARE_MEMORY_METASPACE_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
@ -36,6 +35,7 @@ class MetaspaceShared;
|
||||
class MetaspaceTracer;
|
||||
class Mutex;
|
||||
class outputStream;
|
||||
class ReservedSpace;
|
||||
|
||||
////////////////// Metaspace ///////////////////////
|
||||
|
||||
|
@ -28,10 +28,10 @@
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/metaspace/counters.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
class outputStream;
|
||||
class ReservedSpace;
|
||||
|
||||
namespace metaspace {
|
||||
|
||||
|
@ -24,6 +24,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "memory/metaspace/chunkManager.hpp"
|
||||
#include "memory/metaspace/metaspaceArena.hpp"
|
||||
#include "memory/metaspace/metaspaceArenaGrowthPolicy.hpp"
|
||||
@ -82,7 +83,7 @@ MetaspaceTestContext::MetaspaceTestContext(const char* name, size_t commit_limit
|
||||
reserve_limit, Metaspace::reserve_alignment_words());
|
||||
if (reserve_limit > 0) {
|
||||
// have reserve limit -> non-expandable context
|
||||
_rs = ReservedSpace(reserve_limit * BytesPerWord, Metaspace::reserve_alignment(), os::vm_page_size());
|
||||
_rs = MemoryReserver::reserve(reserve_limit * BytesPerWord, Metaspace::reserve_alignment(), os::vm_page_size());
|
||||
_context = MetaspaceContext::create_nonexpandable_context(name, _rs, &_commit_limiter);
|
||||
} else {
|
||||
// no reserve limit -> expandable vslist
|
||||
@ -96,7 +97,7 @@ MetaspaceTestContext::~MetaspaceTestContext() {
|
||||
MutexLocker fcl(Metaspace_lock, Mutex::_no_safepoint_check_flag);
|
||||
delete _context;
|
||||
if (_rs.is_reserved()) {
|
||||
_rs.release();
|
||||
MemoryReserver::release(_rs);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
#include "memory/metaspace/commitLimiter.hpp"
|
||||
#include "memory/metaspace/counters.hpp"
|
||||
#include "memory/metaspace/metaspaceContext.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "memory/reservedSpace.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
// This is just convenience classes for metaspace-related tests
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "memory/metaspace.hpp"
|
||||
#include "memory/metaspace/chunkHeaderPool.hpp"
|
||||
#include "memory/metaspace/chunklevel.hpp"
|
||||
@ -253,9 +254,10 @@ VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size,
|
||||
SizeCounter* commit_words_counter)
|
||||
{
|
||||
DEBUG_ONLY(assert_is_aligned(word_size, chunklevel::MAX_CHUNK_WORD_SIZE);)
|
||||
ReservedSpace rs(word_size * BytesPerWord,
|
||||
Settings::virtual_space_node_reserve_alignment_words() * BytesPerWord,
|
||||
os::vm_page_size());
|
||||
|
||||
ReservedSpace rs = MemoryReserver::reserve(word_size * BytesPerWord,
|
||||
Settings::virtual_space_node_reserve_alignment_words() * BytesPerWord,
|
||||
os::vm_page_size());
|
||||
if (!rs.is_reserved()) {
|
||||
vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace");
|
||||
}
|
||||
@ -286,7 +288,9 @@ VirtualSpaceNode::~VirtualSpaceNode() {
|
||||
UL(debug, ": dies.");
|
||||
|
||||
if (_owns_rs) {
|
||||
_rs.release();
|
||||
if (_rs.is_reserved()) {
|
||||
MemoryReserver::release(_rs);
|
||||
}
|
||||
}
|
||||
|
||||
// Update counters in vslist
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2020 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -32,7 +32,7 @@
|
||||
#include "memory/metaspace/counters.hpp"
|
||||
#include "memory/metaspace/metaspaceSettings.hpp"
|
||||
#include "memory/metaspace/rootChunkArea.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "memory/reservedSpace.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
37
src/hotspot/share/memory/reservedSpace.cpp
Normal file
37
src/hotspot/share/memory/reservedSpace.cpp
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/reservedSpace.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
|
||||
#ifdef ASSERT
|
||||
void ReservedSpace::sanity_checks() {
|
||||
assert(is_aligned(_base, os::vm_allocation_granularity()), "Unaligned base");
|
||||
assert(is_aligned(_base, _alignment), "Unaligned base");
|
||||
assert(is_aligned(_size, os::vm_page_size()), "Unaligned size");
|
||||
assert(os::page_sizes().contains(_page_size), "Invalid pagesize");
|
||||
}
|
||||
#endif
|
159
src/hotspot/share/memory/reservedSpace.hpp
Normal file
159
src/hotspot/share/memory/reservedSpace.hpp
Normal file
@ -0,0 +1,159 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_MEMORY_RESERVEDSPACE_HPP
|
||||
#define SHARE_MEMORY_RESERVEDSPACE_HPP
|
||||
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
// ReservedSpace is a data structure for describing a reserved contiguous address range.
|
||||
|
||||
class ReservedSpace {
|
||||
char* _base;
|
||||
size_t _size;
|
||||
size_t _alignment;
|
||||
size_t _page_size;
|
||||
bool _executable;
|
||||
bool _special;
|
||||
|
||||
void sanity_checks() NOT_DEBUG_RETURN;
|
||||
|
||||
public:
|
||||
// Constructor for non-reserved memory.
|
||||
ReservedSpace()
|
||||
: _base(nullptr),
|
||||
_size(0),
|
||||
_alignment(0),
|
||||
_page_size(0),
|
||||
_executable(false),
|
||||
_special(false) {}
|
||||
|
||||
// Main constructor
|
||||
ReservedSpace(char* base,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
bool executable,
|
||||
bool special)
|
||||
: _base(base),
|
||||
_size(size),
|
||||
_alignment(alignment),
|
||||
_page_size(page_size),
|
||||
_executable(executable),
|
||||
_special(special) {
|
||||
sanity_checks();
|
||||
}
|
||||
|
||||
bool is_reserved() const {
|
||||
return _base != nullptr;
|
||||
}
|
||||
|
||||
char* base() const {
|
||||
return _base;
|
||||
}
|
||||
|
||||
size_t size() const {
|
||||
return _size;
|
||||
}
|
||||
|
||||
char* end() const {
|
||||
return _base + _size;
|
||||
}
|
||||
|
||||
size_t alignment() const {
|
||||
return _alignment;
|
||||
}
|
||||
|
||||
size_t page_size() const {
|
||||
return _page_size;
|
||||
}
|
||||
|
||||
bool executable() const {
|
||||
return _executable;
|
||||
}
|
||||
|
||||
bool special() const {
|
||||
return _special;
|
||||
}
|
||||
|
||||
ReservedSpace partition(size_t offset, size_t partition_size, size_t alignment) const {
|
||||
assert(offset + partition_size <= size(), "partition failed");
|
||||
|
||||
char* const partition_base = base() + offset;
|
||||
assert(is_aligned(partition_base, alignment), "partition base must be aligned");
|
||||
|
||||
return ReservedSpace(partition_base,
|
||||
partition_size,
|
||||
alignment,
|
||||
_page_size,
|
||||
_executable,
|
||||
_special);
|
||||
}
|
||||
|
||||
ReservedSpace partition(size_t offset, size_t partition_size) const {
|
||||
return partition(offset, partition_size, _alignment);
|
||||
}
|
||||
|
||||
ReservedSpace first_part(size_t split_offset, size_t alignment) const {
|
||||
return partition(0, split_offset, alignment);
|
||||
}
|
||||
|
||||
ReservedSpace first_part(size_t split_offset) const {
|
||||
return first_part(split_offset, _alignment);
|
||||
}
|
||||
|
||||
ReservedSpace last_part (size_t split_offset, size_t alignment) const {
|
||||
return partition(split_offset, _size - split_offset, alignment);
|
||||
}
|
||||
|
||||
ReservedSpace last_part (size_t split_offset) const {
|
||||
return last_part(split_offset, _alignment);
|
||||
}
|
||||
};
|
||||
|
||||
// Class encapsulating behavior specific to memory reserved for the Java heap.
|
||||
class ReservedHeapSpace : public ReservedSpace {
|
||||
private:
|
||||
const size_t _noaccess_prefix;
|
||||
|
||||
public:
|
||||
// Constructor for non-reserved memory.
|
||||
ReservedHeapSpace()
|
||||
: ReservedSpace(),
|
||||
_noaccess_prefix() {}
|
||||
|
||||
ReservedHeapSpace(const ReservedSpace& reserved, size_t noaccess_prefix)
|
||||
: ReservedSpace(reserved),
|
||||
_noaccess_prefix(noaccess_prefix) {}
|
||||
|
||||
size_t noaccess_prefix() const { return _noaccess_prefix; }
|
||||
|
||||
// Returns the base to be used for compression, i.e. so that null can be
|
||||
// encoded safely and implicit null checks can work.
|
||||
char* compressed_oop_base() const { return base() - _noaccess_prefix; }
|
||||
};
|
||||
|
||||
#endif // SHARE_MEMORY_RESERVEDSPACE_HPP
|
@ -50,6 +50,7 @@
|
||||
#include "gc/shared/tlab_globals.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "memory/metaspaceCounters.hpp"
|
||||
@ -956,11 +957,18 @@ ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
|
||||
}
|
||||
|
||||
// Now create the space.
|
||||
ReservedHeapSpace total_rs(total_reserved, alignment, page_size, AllocateHeapAt);
|
||||
ReservedHeapSpace rhs = HeapReserver::reserve(total_reserved, alignment, page_size, AllocateHeapAt);
|
||||
|
||||
if (rhs.is_reserved()) {
|
||||
assert(total_reserved == rhs.size(), "must be exactly of required size");
|
||||
assert(is_aligned(rhs.base(),alignment),"must be exactly of required alignment");
|
||||
|
||||
assert(markWord::encode_pointer_as_mark(rhs.base()).decode_pointer() == rhs.base(),
|
||||
"area must be distinguishable from marks for mark-sweep");
|
||||
assert(markWord::encode_pointer_as_mark(&rhs.base()[rhs.size()]).decode_pointer() ==
|
||||
&rhs.base()[rhs.size()],
|
||||
"area must be distinguishable from marks for mark-sweep");
|
||||
|
||||
if (total_rs.is_reserved()) {
|
||||
assert((total_reserved == total_rs.size()) && ((uintptr_t)total_rs.base() % alignment == 0),
|
||||
"must be exactly of required size and alignment");
|
||||
// We are good.
|
||||
|
||||
if (AllocateHeapAt != nullptr) {
|
||||
@ -968,12 +976,12 @@ ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
|
||||
}
|
||||
|
||||
if (UseCompressedOops) {
|
||||
CompressedOops::initialize(total_rs);
|
||||
CompressedOops::initialize(rhs);
|
||||
}
|
||||
|
||||
Universe::calculate_verify_data((HeapWord*)total_rs.base(), (HeapWord*)total_rs.end());
|
||||
Universe::calculate_verify_data((HeapWord*)rhs.base(), (HeapWord*)rhs.end());
|
||||
|
||||
return total_rs;
|
||||
return rhs;
|
||||
}
|
||||
|
||||
vm_exit_during_initialization(
|
||||
@ -982,7 +990,6 @@ ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
|
||||
|
||||
// satisfy compiler
|
||||
ShouldNotReachHere();
|
||||
return ReservedHeapSpace(0, 0, os::vm_page_size());
|
||||
}
|
||||
|
||||
OopStorage* Universe::vm_weak() {
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_MEMORY_UNIVERSE_HPP
|
||||
|
||||
#include "gc/shared/verifyOption.hpp"
|
||||
#include "memory/reservedSpace.hpp"
|
||||
#include "oops/array.hpp"
|
||||
#include "oops/oopHandle.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
@ -42,7 +43,6 @@
|
||||
class CollectedHeap;
|
||||
class DeferredObjAllocEvent;
|
||||
class OopStorage;
|
||||
class ReservedHeapSpace;
|
||||
class SerializeClosure;
|
||||
|
||||
class Universe: AllStatic {
|
||||
@ -51,7 +51,6 @@ class Universe: AllStatic {
|
||||
friend class oopDesc;
|
||||
friend class ClassLoader;
|
||||
friend class SystemDictionary;
|
||||
friend class ReservedHeapSpace;
|
||||
friend class VMStructs;
|
||||
friend class VM_PopulateDumpSharedSpace;
|
||||
friend class Metaspace;
|
||||
|
@ -23,648 +23,13 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "memory/reservedSpace.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "oops/compressedKlass.hpp"
|
||||
#include "oops/compressedOops.hpp"
|
||||
#include "oops/markWord.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/formatBuffer.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
// ReservedSpace
|
||||
|
||||
// Dummy constructor
|
||||
ReservedSpace::ReservedSpace() : _base(nullptr), _size(0), _noaccess_prefix(0),
|
||||
_alignment(0), _special(false), _fd_for_heap(-1), _executable(false) {
|
||||
}
|
||||
|
||||
ReservedSpace::ReservedSpace(size_t size, MemTag mem_tag) : _fd_for_heap(-1) {
|
||||
// Want to use large pages where possible. If the size is
|
||||
// not large page aligned the mapping will be a mix of
|
||||
// large and normal pages.
|
||||
size_t page_size = os::page_size_for_region_unaligned(size, 1);
|
||||
size_t alignment = os::vm_allocation_granularity();
|
||||
initialize(size, alignment, page_size, nullptr, false, mem_tag);
|
||||
}
|
||||
|
||||
ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
|
||||
// When a page size is given we don't want to mix large
|
||||
// and normal pages. If the size is not a multiple of the
|
||||
// page size it will be aligned up to achieve this.
|
||||
size_t alignment = os::vm_allocation_granularity();
|
||||
if (preferred_page_size != os::vm_page_size()) {
|
||||
alignment = MAX2(preferred_page_size, alignment);
|
||||
size = align_up(size, alignment);
|
||||
}
|
||||
initialize(size, alignment, preferred_page_size, nullptr, false);
|
||||
}
|
||||
|
||||
ReservedSpace::ReservedSpace(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
char* requested_address) : _fd_for_heap(-1) {
|
||||
initialize(size, alignment, page_size, requested_address, false);
|
||||
}
|
||||
|
||||
ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, size_t page_size,
|
||||
bool special, bool executable) : _fd_for_heap(-1) {
|
||||
assert((size % os::vm_allocation_granularity()) == 0,
|
||||
"size not allocation aligned");
|
||||
initialize_members(base, size, alignment, page_size, special, executable);
|
||||
}
|
||||
|
||||
// Helper method
|
||||
static char* attempt_map_or_reserve_memory_at(char* base, size_t size, int fd, bool executable, MemTag mem_tag) {
|
||||
if (fd != -1) {
|
||||
return os::attempt_map_memory_to_file_at(base, size, fd);
|
||||
}
|
||||
return os::attempt_reserve_memory_at(base, size, executable, mem_tag);
|
||||
}
|
||||
|
||||
// Helper method
|
||||
static char* map_or_reserve_memory(size_t size, int fd, bool executable, MemTag mem_tag) {
|
||||
if (fd != -1) {
|
||||
return os::map_memory_to_file(size, fd);
|
||||
}
|
||||
return os::reserve_memory(size, executable, mem_tag);
|
||||
}
|
||||
|
||||
// Helper method
|
||||
static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fd, bool executable) {
|
||||
if (fd != -1) {
|
||||
return os::map_memory_to_file_aligned(size, alignment, fd);
|
||||
}
|
||||
return os::reserve_memory_aligned(size, alignment, executable);
|
||||
}
|
||||
|
||||
// Helper method
|
||||
static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
|
||||
if (is_file_mapped) {
|
||||
if (!os::unmap_memory(base, size)) {
|
||||
fatal("os::unmap_memory failed");
|
||||
}
|
||||
} else if (!os::release_memory(base, size)) {
|
||||
fatal("os::release_memory failed");
|
||||
}
|
||||
}
|
||||
|
||||
// Helper method
|
||||
static bool failed_to_reserve_as_requested(char* base, char* requested_address) {
|
||||
if (base == requested_address || requested_address == nullptr) {
|
||||
return false; // did not fail
|
||||
}
|
||||
|
||||
if (base != nullptr) {
|
||||
// Different reserve address may be acceptable in other cases
|
||||
// but for compressed oops heap should be at requested address.
|
||||
assert(UseCompressedOops, "currently requested address used only for compressed oops");
|
||||
log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool use_explicit_large_pages(size_t page_size) {
|
||||
return !os::can_commit_large_page_memory() &&
|
||||
page_size != os::vm_page_size();
|
||||
}
|
||||
|
||||
static bool large_pages_requested() {
|
||||
return UseLargePages &&
|
||||
(!FLAG_IS_DEFAULT(UseLargePages) || !FLAG_IS_DEFAULT(LargePageSizeInBytes));
|
||||
}
|
||||
|
||||
static void log_on_large_pages_failure(char* req_addr, size_t bytes) {
|
||||
if (large_pages_requested()) {
|
||||
// Compressed oops logging.
|
||||
log_debug(gc, heap, coops)("Reserve regular memory without large pages");
|
||||
// JVM style warning that we did not succeed in using large pages.
|
||||
char msg[128];
|
||||
jio_snprintf(msg, sizeof(msg), "Failed to reserve and commit memory using large pages. "
|
||||
"req_addr: " PTR_FORMAT " bytes: " SIZE_FORMAT,
|
||||
req_addr, bytes);
|
||||
warning("%s", msg);
|
||||
}
|
||||
}
|
||||
|
||||
static char* reserve_memory(char* requested_address, const size_t size,
|
||||
const size_t alignment, int fd, bool exec, MemTag mem_tag) {
|
||||
char* base;
|
||||
// If the memory was requested at a particular address, use
|
||||
// os::attempt_reserve_memory_at() to avoid mapping over something
|
||||
// important. If the reservation fails, return null.
|
||||
if (requested_address != nullptr) {
|
||||
assert(is_aligned(requested_address, alignment),
|
||||
"Requested address " PTR_FORMAT " must be aligned to " SIZE_FORMAT,
|
||||
p2i(requested_address), alignment);
|
||||
base = attempt_map_or_reserve_memory_at(requested_address, size, fd, exec, mem_tag);
|
||||
} else {
|
||||
// Optimistically assume that the OS returns an aligned base pointer.
|
||||
// When reserving a large address range, most OSes seem to align to at
|
||||
// least 64K.
|
||||
base = map_or_reserve_memory(size, fd, exec, mem_tag);
|
||||
// Check alignment constraints. This is only needed when there is
|
||||
// no requested address.
|
||||
if (!is_aligned(base, alignment)) {
|
||||
// Base not aligned, retry.
|
||||
unmap_or_release_memory(base, size, fd != -1 /*is_file_mapped*/);
|
||||
// Map using the requested alignment.
|
||||
base = map_or_reserve_memory_aligned(size, alignment, fd, exec);
|
||||
}
|
||||
}
|
||||
|
||||
return base;
|
||||
}
|
||||
|
||||
static char* reserve_memory_special(char* requested_address, const size_t size,
|
||||
const size_t alignment, const size_t page_size, bool exec) {
|
||||
|
||||
log_trace(pagesize)("Attempt special mapping: size: " SIZE_FORMAT "%s, "
|
||||
"alignment: " SIZE_FORMAT "%s",
|
||||
byte_size_in_exact_unit(size), exact_unit_for_byte_size(size),
|
||||
byte_size_in_exact_unit(alignment), exact_unit_for_byte_size(alignment));
|
||||
|
||||
char* base = os::reserve_memory_special(size, alignment, page_size, requested_address, exec);
|
||||
if (base != nullptr) {
|
||||
// Check alignment constraints.
|
||||
assert(is_aligned(base, alignment),
|
||||
"reserve_memory_special() returned an unaligned address, base: " PTR_FORMAT
|
||||
" alignment: " SIZE_FORMAT_X,
|
||||
p2i(base), alignment);
|
||||
}
|
||||
return base;
|
||||
}
|
||||
|
||||
void ReservedSpace::clear_members() {
|
||||
initialize_members(nullptr, 0, 0, 0, false, false);
|
||||
}
|
||||
|
||||
void ReservedSpace::initialize_members(char* base, size_t size, size_t alignment,
|
||||
size_t page_size, bool special, bool executable) {
|
||||
_base = base;
|
||||
_size = size;
|
||||
_alignment = alignment;
|
||||
_page_size = page_size;
|
||||
_special = special;
|
||||
_executable = executable;
|
||||
_noaccess_prefix = 0;
|
||||
}
|
||||
|
||||
void ReservedSpace::reserve(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
char* requested_address,
|
||||
bool executable,
|
||||
MemTag mem_tag) {
|
||||
assert(is_aligned(size, alignment), "Size must be aligned to the requested alignment");
|
||||
|
||||
// There are basically three different cases that we need to handle below:
|
||||
// 1. Mapping backed by a file
|
||||
// 2. Mapping backed by explicit large pages
|
||||
// 3. Mapping backed by normal pages or transparent huge pages
|
||||
// The first two have restrictions that requires the whole mapping to be
|
||||
// committed up front. To record this the ReservedSpace is marked 'special'.
|
||||
|
||||
// == Case 1 ==
|
||||
if (_fd_for_heap != -1) {
|
||||
// When there is a backing file directory for this space then whether
|
||||
// large pages are allocated is up to the filesystem of the backing file.
|
||||
// So UseLargePages is not taken into account for this reservation.
|
||||
char* base = reserve_memory(requested_address, size, alignment, _fd_for_heap, executable, mem_tag);
|
||||
if (base != nullptr) {
|
||||
initialize_members(base, size, alignment, os::vm_page_size(), true, executable);
|
||||
}
|
||||
// Always return, not possible to fall back to reservation not using a file.
|
||||
return;
|
||||
}
|
||||
|
||||
// == Case 2 ==
|
||||
if (use_explicit_large_pages(page_size)) {
|
||||
// System can't commit large pages i.e. use transparent huge pages and
|
||||
// the caller requested large pages. To satisfy this request we use
|
||||
// explicit large pages and these have to be committed up front to ensure
|
||||
// no reservations are lost.
|
||||
do {
|
||||
char* base = reserve_memory_special(requested_address, size, alignment, page_size, executable);
|
||||
if (base != nullptr) {
|
||||
// Successful reservation using large pages.
|
||||
initialize_members(base, size, alignment, page_size, true, executable);
|
||||
return;
|
||||
}
|
||||
page_size = os::page_sizes().next_smaller(page_size);
|
||||
} while (page_size > os::vm_page_size());
|
||||
|
||||
// Failed to reserve explicit large pages, do proper logging.
|
||||
log_on_large_pages_failure(requested_address, size);
|
||||
// Now fall back to normal reservation.
|
||||
assert(page_size == os::vm_page_size(), "inv");
|
||||
}
|
||||
|
||||
// == Case 3 ==
|
||||
char* base = reserve_memory(requested_address, size, alignment, -1, executable, mem_tag);
|
||||
if (base != nullptr) {
|
||||
// Successful mapping.
|
||||
initialize_members(base, size, alignment, page_size, false, executable);
|
||||
}
|
||||
}
|
||||
|
||||
void ReservedSpace::initialize(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
char* requested_address,
|
||||
bool executable,
|
||||
MemTag mem_tag) {
|
||||
const size_t granularity = os::vm_allocation_granularity();
|
||||
assert((size & (granularity - 1)) == 0,
|
||||
"size not aligned to os::vm_allocation_granularity()");
|
||||
assert((alignment & (granularity - 1)) == 0,
|
||||
"alignment not aligned to os::vm_allocation_granularity()");
|
||||
assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
|
||||
"not a power of 2");
|
||||
assert(page_size >= os::vm_page_size(), "Invalid page size");
|
||||
assert(is_power_of_2(page_size), "Invalid page size");
|
||||
|
||||
clear_members();
|
||||
|
||||
if (size == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Adjust alignment to not be 0.
|
||||
alignment = MAX2(alignment, os::vm_page_size());
|
||||
|
||||
// Reserve the memory.
|
||||
reserve(size, alignment, page_size, requested_address, executable, mem_tag);
|
||||
|
||||
// Check that the requested address is used if given.
|
||||
if (failed_to_reserve_as_requested(_base, requested_address)) {
|
||||
// OS ignored the requested address, release the reservation.
|
||||
release();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment) {
|
||||
assert(partition_size <= size(), "partition failed");
|
||||
ReservedSpace result(base(), partition_size, alignment, page_size(), special(), executable());
|
||||
return result;
|
||||
}
|
||||
|
||||
ReservedSpace ReservedSpace::last_part(size_t partition_size, size_t alignment) {
|
||||
assert(partition_size <= size(), "partition failed");
|
||||
ReservedSpace result(base() + partition_size, size() - partition_size,
|
||||
alignment, page_size(), special(), executable());
|
||||
return result;
|
||||
}
|
||||
|
||||
ReservedSpace ReservedSpace::partition(size_t offset, size_t partition_size, size_t alignment) {
|
||||
assert(offset + partition_size <= size(), "partition failed");
|
||||
ReservedSpace result(base() + offset, partition_size, alignment, page_size(), special(), executable());
|
||||
return result;
|
||||
}
|
||||
|
||||
void ReservedSpace::release() {
|
||||
if (is_reserved()) {
|
||||
char *real_base = _base - _noaccess_prefix;
|
||||
const size_t real_size = _size + _noaccess_prefix;
|
||||
if (special()) {
|
||||
if (_fd_for_heap != -1) {
|
||||
os::unmap_memory(real_base, real_size);
|
||||
} else {
|
||||
os::release_memory_special(real_base, real_size);
|
||||
}
|
||||
} else{
|
||||
os::release_memory(real_base, real_size);
|
||||
}
|
||||
clear_members();
|
||||
}
|
||||
}
|
||||
|
||||
// Put a ReservedSpace over an existing range
|
||||
ReservedSpace ReservedSpace::space_for_range(char* base, size_t size, size_t alignment,
|
||||
size_t page_size, bool special, bool executable) {
|
||||
assert(is_aligned(base, os::vm_allocation_granularity()), "Unaligned base");
|
||||
assert(is_aligned(size, os::vm_page_size()), "Unaligned size");
|
||||
assert(os::page_sizes().contains(page_size), "Invalid pagesize");
|
||||
ReservedSpace space;
|
||||
space.initialize_members(base, size, alignment, page_size, special, executable);
|
||||
return space;
|
||||
}
|
||||
|
||||
// Compressed oop support is not relevant in 32bit builds.
|
||||
#ifdef _LP64
|
||||
|
||||
static size_t noaccess_prefix_size(size_t alignment) {
|
||||
return lcm(os::vm_page_size(), alignment);
|
||||
}
|
||||
|
||||
void ReservedHeapSpace::establish_noaccess_prefix() {
|
||||
assert(_alignment >= os::vm_page_size(), "must be at least page size big");
|
||||
_noaccess_prefix = noaccess_prefix_size(_alignment);
|
||||
|
||||
if (base() && base() + _size > (char *)OopEncodingHeapMax) {
|
||||
if (true
|
||||
WIN64_ONLY(&& !UseLargePages)
|
||||
AIX_ONLY(&& (os::Aix::supports_64K_mmap_pages() || os::vm_page_size() == 4*K))) {
|
||||
// Protect memory at the base of the allocated region.
|
||||
// If special, the page was committed (only matters on windows)
|
||||
if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
|
||||
fatal("cannot protect protection page");
|
||||
}
|
||||
log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
|
||||
PTR_FORMAT " / " INTX_FORMAT " bytes",
|
||||
p2i(_base),
|
||||
_noaccess_prefix);
|
||||
assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
|
||||
} else {
|
||||
CompressedOops::set_use_implicit_null_checks(false);
|
||||
}
|
||||
}
|
||||
|
||||
_base += _noaccess_prefix;
|
||||
_size -= _noaccess_prefix;
|
||||
assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
|
||||
}
|
||||
|
||||
// Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
|
||||
// Does not check whether the reserved memory actually is at requested_address, as the memory returned
|
||||
// might still fulfill the wishes of the caller.
|
||||
// Assures the memory is aligned to 'alignment'.
|
||||
// NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
|
||||
void ReservedHeapSpace::try_reserve_heap(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
char* requested_address) {
|
||||
if (_base != nullptr) {
|
||||
// We tried before, but we didn't like the address delivered.
|
||||
release();
|
||||
}
|
||||
|
||||
// Try to reserve the memory for the heap.
|
||||
log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
|
||||
" heap of size " SIZE_FORMAT_X,
|
||||
p2i(requested_address),
|
||||
size);
|
||||
|
||||
reserve(size, alignment, page_size, requested_address, false, mtJavaHeap);
|
||||
|
||||
// Check alignment constraints.
|
||||
if (is_reserved() && !is_aligned(_base, _alignment)) {
|
||||
// Base not aligned, retry.
|
||||
release();
|
||||
}
|
||||
}
|
||||
|
||||
void ReservedHeapSpace::try_reserve_range(char *highest_start,
|
||||
char *lowest_start,
|
||||
size_t attach_point_alignment,
|
||||
char *aligned_heap_base_min_address,
|
||||
char *upper_bound,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size) {
|
||||
const size_t attach_range = highest_start - lowest_start;
|
||||
// Cap num_attempts at possible number.
|
||||
// At least one is possible even for 0 sized attach range.
|
||||
const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
|
||||
const uint64_t num_attempts_to_try = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
|
||||
|
||||
const size_t stepsize = (attach_range == 0) ? // Only one try.
|
||||
(size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
|
||||
|
||||
// Try attach points from top to bottom.
|
||||
char* attach_point = highest_start;
|
||||
while (attach_point >= lowest_start &&
|
||||
attach_point <= highest_start && // Avoid wrap around.
|
||||
((_base == nullptr) ||
|
||||
(_base < aligned_heap_base_min_address || size > (uintptr_t)(upper_bound - _base)))) {
|
||||
try_reserve_heap(size, alignment, page_size, attach_point);
|
||||
attach_point -= stepsize;
|
||||
}
|
||||
}
|
||||
|
||||
#define SIZE_64K ((uint64_t) UCONST64( 0x10000))
|
||||
#define SIZE_256M ((uint64_t) UCONST64( 0x10000000))
|
||||
#define SIZE_32G ((uint64_t) UCONST64( 0x800000000))
|
||||
|
||||
// Helper for heap allocation. Returns an array with addresses
|
||||
// (OS-specific) which are suited for disjoint base mode. Array is
|
||||
// null terminated.
|
||||
static char** get_attach_addresses_for_disjoint_mode() {
|
||||
static uint64_t addresses[] = {
|
||||
2 * SIZE_32G,
|
||||
3 * SIZE_32G,
|
||||
4 * SIZE_32G,
|
||||
8 * SIZE_32G,
|
||||
10 * SIZE_32G,
|
||||
1 * SIZE_64K * SIZE_32G,
|
||||
2 * SIZE_64K * SIZE_32G,
|
||||
3 * SIZE_64K * SIZE_32G,
|
||||
4 * SIZE_64K * SIZE_32G,
|
||||
16 * SIZE_64K * SIZE_32G,
|
||||
32 * SIZE_64K * SIZE_32G,
|
||||
34 * SIZE_64K * SIZE_32G,
|
||||
0
|
||||
};
|
||||
|
||||
// Sort out addresses smaller than HeapBaseMinAddress. This assumes
|
||||
// the array is sorted.
|
||||
uint i = 0;
|
||||
while (addresses[i] != 0 &&
|
||||
(addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
|
||||
i++;
|
||||
}
|
||||
uint start = i;
|
||||
|
||||
// Avoid more steps than requested.
|
||||
i = 0;
|
||||
while (addresses[start+i] != 0) {
|
||||
if (i == HeapSearchSteps) {
|
||||
addresses[start+i] = 0;
|
||||
break;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
return (char**) &addresses[start];
|
||||
}
|
||||
|
||||
void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, size_t page_size) {
|
||||
guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
|
||||
"can not allocate compressed oop heap for this size");
|
||||
guarantee(alignment == MAX2(alignment, os::vm_page_size()), "alignment too small");
|
||||
|
||||
const size_t granularity = os::vm_allocation_granularity();
|
||||
assert((size & (granularity - 1)) == 0,
|
||||
"size not aligned to os::vm_allocation_granularity()");
|
||||
assert((alignment & (granularity - 1)) == 0,
|
||||
"alignment not aligned to os::vm_allocation_granularity()");
|
||||
assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
|
||||
"not a power of 2");
|
||||
|
||||
// The necessary attach point alignment for generated wish addresses.
|
||||
// This is needed to increase the chance of attaching for mmap and shmat.
|
||||
// AIX is the only platform that uses System V shm for reserving virtual memory.
|
||||
// In this case, the required alignment of the allocated size (64K) and the alignment
|
||||
// of possible start points of the memory region (256M) differ.
|
||||
// This is not reflected by os_allocation_granularity().
|
||||
// The logic here is dual to the one in pd_reserve_memory in os_aix.cpp
|
||||
const size_t os_attach_point_alignment =
|
||||
AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M)
|
||||
NOT_AIX(os::vm_allocation_granularity());
|
||||
|
||||
const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
|
||||
|
||||
char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
|
||||
size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
|
||||
noaccess_prefix_size(alignment) : 0;
|
||||
|
||||
// Attempt to alloc at user-given address.
|
||||
if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
|
||||
try_reserve_heap(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
|
||||
if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
|
||||
release();
|
||||
}
|
||||
}
|
||||
|
||||
// Keep heap at HeapBaseMinAddress.
|
||||
if (_base == nullptr) {
|
||||
|
||||
// Try to allocate the heap at addresses that allow efficient oop compression.
|
||||
// Different schemes are tried, in order of decreasing optimization potential.
|
||||
//
|
||||
// For this, try_reserve_heap() is called with the desired heap base addresses.
|
||||
// A call into the os layer to allocate at a given address can return memory
|
||||
// at a different address than requested. Still, this might be memory at a useful
|
||||
// address. try_reserve_heap() always returns this allocated memory, as only here
|
||||
// the criteria for a good heap are checked.
|
||||
|
||||
// Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
|
||||
// Give it several tries from top of range to bottom.
|
||||
if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
|
||||
|
||||
// Calc address range within we try to attach (range of possible start addresses).
|
||||
char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
|
||||
char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment);
|
||||
try_reserve_range(highest_start, lowest_start, attach_point_alignment,
|
||||
aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size);
|
||||
}
|
||||
|
||||
// zerobased: Attempt to allocate in the lower 32G.
|
||||
char *zerobased_max = (char *)OopEncodingHeapMax;
|
||||
|
||||
// Give it several tries from top of range to bottom.
|
||||
if (aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible.
|
||||
((_base == nullptr) || // No previous try succeeded.
|
||||
(_base + size > zerobased_max))) { // Unscaled delivered an arbitrary address.
|
||||
|
||||
// Calc address range within we try to attach (range of possible start addresses).
|
||||
char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
|
||||
// Need to be careful about size being guaranteed to be less
|
||||
// than UnscaledOopHeapMax due to type constraints.
|
||||
char *lowest_start = aligned_heap_base_min_address;
|
||||
uint64_t unscaled_end = UnscaledOopHeapMax - size;
|
||||
if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
|
||||
lowest_start = MAX2(lowest_start, (char*)unscaled_end);
|
||||
}
|
||||
lowest_start = align_up(lowest_start, attach_point_alignment);
|
||||
try_reserve_range(highest_start, lowest_start, attach_point_alignment,
|
||||
aligned_heap_base_min_address, zerobased_max, size, alignment, page_size);
|
||||
}
|
||||
|
||||
// Now we go for heaps with base != 0. We need a noaccess prefix to efficiently
|
||||
// implement null checks.
|
||||
noaccess_prefix = noaccess_prefix_size(alignment);
|
||||
|
||||
// Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
|
||||
char** addresses = get_attach_addresses_for_disjoint_mode();
|
||||
int i = 0;
|
||||
while ((addresses[i] != nullptr) && // End of array not yet reached.
|
||||
((_base == nullptr) || // No previous try succeeded.
|
||||
(_base + size > (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
|
||||
!CompressedOops::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address.
|
||||
char* const attach_point = addresses[i];
|
||||
assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
|
||||
try_reserve_heap(size + noaccess_prefix, alignment, page_size, attach_point);
|
||||
i++;
|
||||
}
|
||||
|
||||
// Last, desperate try without any placement.
|
||||
if (_base == nullptr) {
|
||||
log_trace(gc, heap, coops)("Trying to allocate at address null heap of size " SIZE_FORMAT_X, size + noaccess_prefix);
|
||||
initialize(size + noaccess_prefix, alignment, page_size, nullptr, false, mtJavaHeap);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif // _LP64
|
||||
|
||||
ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) : ReservedSpace() {
|
||||
|
||||
if (size == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (heap_allocation_directory != nullptr) {
|
||||
_fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
|
||||
if (_fd_for_heap == -1) {
|
||||
vm_exit_during_initialization(
|
||||
err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
|
||||
}
|
||||
// When there is a backing file directory for this space then whether
|
||||
// large pages are allocated is up to the filesystem of the backing file.
|
||||
// If requested, let the user know that explicit large pages can't be used.
|
||||
if (use_explicit_large_pages(page_size) && large_pages_requested()) {
|
||||
log_debug(gc, heap)("Cannot allocate explicit large pages for Java Heap when AllocateHeapAt option is set.");
|
||||
}
|
||||
}
|
||||
|
||||
// Heap size should be aligned to alignment, too.
|
||||
guarantee(is_aligned(size, alignment), "set by caller");
|
||||
|
||||
if (UseCompressedOops) {
|
||||
#ifdef _LP64
|
||||
initialize_compressed_heap(size, alignment, page_size);
|
||||
if (_size > size) {
|
||||
// We allocated heap with noaccess prefix.
|
||||
// It can happen we get a zerobased/unscaled heap with noaccess prefix,
|
||||
// if we had to try at arbitrary address.
|
||||
establish_noaccess_prefix();
|
||||
}
|
||||
#else
|
||||
ShouldNotReachHere();
|
||||
#endif // _LP64
|
||||
} else {
|
||||
initialize(size, alignment, page_size, nullptr, false, mtJavaHeap);
|
||||
}
|
||||
|
||||
assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
|
||||
"area must be distinguishable from marks for mark-sweep");
|
||||
assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],
|
||||
"area must be distinguishable from marks for mark-sweep");
|
||||
|
||||
if (_fd_for_heap != -1) {
|
||||
::close(_fd_for_heap);
|
||||
}
|
||||
}
|
||||
|
||||
MemRegion ReservedHeapSpace::region() const {
|
||||
return MemRegion((HeapWord*)base(), (HeapWord*)end());
|
||||
}
|
||||
|
||||
// Reserve space for code segment. Same as Java heap only we mark this as
|
||||
// executable.
|
||||
ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
|
||||
size_t rs_align,
|
||||
size_t rs_page_size) : ReservedSpace() {
|
||||
initialize(r_size, rs_align, rs_page_size, /*requested address*/ nullptr, /*executable*/ true, mtCode);
|
||||
}
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
// VirtualSpace
|
||||
|
||||
|
@ -25,143 +25,11 @@
|
||||
#ifndef SHARE_MEMORY_VIRTUALSPACE_HPP
|
||||
#define SHARE_MEMORY_VIRTUALSPACE_HPP
|
||||
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "nmt/memTag.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
class outputStream;
|
||||
|
||||
// ReservedSpace is a data structure for reserving a contiguous address range.
|
||||
|
||||
class ReservedSpace {
|
||||
friend class VMStructs;
|
||||
protected:
|
||||
char* _base;
|
||||
size_t _size;
|
||||
size_t _noaccess_prefix;
|
||||
size_t _alignment;
|
||||
size_t _page_size;
|
||||
bool _special;
|
||||
int _fd_for_heap;
|
||||
private:
|
||||
bool _executable;
|
||||
|
||||
// ReservedSpace
|
||||
ReservedSpace(char* base, size_t size, size_t alignment,
|
||||
size_t page_size, bool special, bool executable);
|
||||
protected:
|
||||
// Helpers to clear and set members during initialization. Two members
|
||||
// require special treatment:
|
||||
// * _fd_for_heap - The fd is set once and should not be cleared
|
||||
// even if the reservation has to be retried.
|
||||
// * _noaccess_prefix - Used for compressed heaps and updated after
|
||||
// the reservation is initialized. Always set to
|
||||
// 0 during initialization.
|
||||
void clear_members();
|
||||
void initialize_members(char* base, size_t size, size_t alignment,
|
||||
size_t page_size, bool special, bool executable);
|
||||
|
||||
void initialize(size_t size, size_t alignment, size_t page_size,
|
||||
char* requested_address, bool executable, MemTag mem_tag = mtNone);
|
||||
|
||||
void reserve(size_t size, size_t alignment, size_t page_size,
|
||||
char* requested_address, bool executable, MemTag mem_tag);
|
||||
public:
|
||||
// Constructor
|
||||
ReservedSpace();
|
||||
// Initialize the reserved space with the given size. Depending on the size
|
||||
// a suitable page size and alignment will be used.
|
||||
ReservedSpace(size_t size, MemTag mem_tag);
|
||||
// Initialize the reserved space with the given size. The preferred_page_size
|
||||
// is used as the minimum page size/alignment. This may waste some space if
|
||||
// the given size is not aligned to that value, as the reservation will be
|
||||
// aligned up to the final alignment in this case.
|
||||
ReservedSpace(size_t size, size_t preferred_page_size);
|
||||
ReservedSpace(size_t size, size_t alignment, size_t page_size,
|
||||
char* requested_address = nullptr);
|
||||
|
||||
// Accessors
|
||||
char* base() const { return _base; }
|
||||
size_t size() const { return _size; }
|
||||
char* end() const { return _base + _size; }
|
||||
size_t alignment() const { return _alignment; }
|
||||
size_t page_size() const { return _page_size; }
|
||||
bool special() const { return _special; }
|
||||
bool executable() const { return _executable; }
|
||||
size_t noaccess_prefix() const { return _noaccess_prefix; }
|
||||
bool is_reserved() const { return _base != nullptr; }
|
||||
void release();
|
||||
|
||||
// Splitting
|
||||
// This splits the space into two spaces, the first part of which will be returned.
|
||||
ReservedSpace first_part(size_t partition_size, size_t alignment);
|
||||
ReservedSpace last_part (size_t partition_size, size_t alignment);
|
||||
ReservedSpace partition (size_t offset, size_t partition_size, size_t alignment);
|
||||
|
||||
// These simply call the above using the default alignment.
|
||||
inline ReservedSpace first_part(size_t partition_size);
|
||||
inline ReservedSpace last_part (size_t partition_size);
|
||||
inline ReservedSpace partition (size_t offset, size_t partition_size);
|
||||
|
||||
bool contains(const void* p) const {
|
||||
return (base() <= ((char*)p)) && (((char*)p) < (base() + size()));
|
||||
}
|
||||
|
||||
// Put a ReservedSpace over an existing range
|
||||
static ReservedSpace space_for_range(char* base, size_t size, size_t alignment,
|
||||
size_t page_size, bool special, bool executable);
|
||||
};
|
||||
|
||||
ReservedSpace ReservedSpace::first_part(size_t partition_size)
|
||||
{
|
||||
return first_part(partition_size, alignment());
|
||||
}
|
||||
|
||||
ReservedSpace ReservedSpace::last_part(size_t partition_size)
|
||||
{
|
||||
return last_part(partition_size, alignment());
|
||||
}
|
||||
|
||||
ReservedSpace ReservedSpace::partition(size_t offset, size_t partition_size)
|
||||
{
|
||||
return partition(offset, partition_size, alignment());
|
||||
}
|
||||
|
||||
// Class encapsulating behavior specific of memory space reserved for Java heap.
|
||||
class ReservedHeapSpace : public ReservedSpace {
|
||||
private:
|
||||
|
||||
// Compressed oop support is not relevant in 32bit builds.
|
||||
#ifdef _LP64
|
||||
|
||||
void try_reserve_heap(size_t size, size_t alignment, size_t page_size,
|
||||
char *requested_address);
|
||||
void try_reserve_range(char *highest_start, char *lowest_start,
|
||||
size_t attach_point_alignment, char *aligned_HBMA,
|
||||
char *upper_bound, size_t size, size_t alignment, size_t page_size);
|
||||
void initialize_compressed_heap(const size_t size, size_t alignment, size_t page_size);
|
||||
// Create protection page at the beginning of the space.
|
||||
void establish_noaccess_prefix();
|
||||
|
||||
#endif // _LP64
|
||||
|
||||
public:
|
||||
// Constructor. Tries to find a heap that is good for compressed oops.
|
||||
// heap_allocation_directory is the path to the backing memory for Java heap. When set, Java heap will be allocated
|
||||
// on the device which is managed by the file system where the directory resides.
|
||||
ReservedHeapSpace(size_t size, size_t forced_base_alignment, size_t page_size, const char* heap_allocation_directory = nullptr);
|
||||
// Returns the base to be used for compression, i.e. so that null can be
|
||||
// encoded safely and implicit null checks can work.
|
||||
char *compressed_oop_base() const { return _base - _noaccess_prefix; }
|
||||
MemRegion region() const;
|
||||
};
|
||||
|
||||
// Class encapsulating behavior specific memory space for Code
|
||||
class ReservedCodeSpace : public ReservedSpace {
|
||||
public:
|
||||
// Constructor
|
||||
ReservedCodeSpace(size_t r_size, size_t rs_align, size_t page_size);
|
||||
};
|
||||
class ReservedSpace;
|
||||
|
||||
// VirtualSpace is data structure for committing a previously reserved address range in smaller chunks.
|
||||
|
||||
|
@ -26,9 +26,9 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "memory/reservedSpace.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "oops/compressedOops.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
@ -66,7 +66,7 @@ void CompressedOops::initialize(const ReservedHeapSpace& heap_space) {
|
||||
set_base((address)heap_space.compressed_oop_base());
|
||||
}
|
||||
|
||||
_heap_address_range = heap_space.region();
|
||||
_heap_address_range = MemRegion((HeapWord*)heap_space.base(), (HeapWord*)heap_space.end());
|
||||
|
||||
LogTarget(Debug, gc, heap, coops) lt;
|
||||
if (lt.is_enabled()) {
|
||||
|
@ -51,6 +51,7 @@
|
||||
#include "jvmtifiles/jvmtiEnv.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/metaspace/testHelpers.hpp"
|
||||
#include "memory/metaspaceUtils.hpp"
|
||||
@ -299,7 +300,7 @@ WB_END
|
||||
|
||||
WB_ENTRY(void, WB_ReadFromNoaccessArea(JNIEnv* env, jobject o))
|
||||
size_t granularity = os::vm_allocation_granularity();
|
||||
ReservedHeapSpace rhs(100 * granularity, granularity, os::vm_page_size());
|
||||
ReservedHeapSpace rhs = HeapReserver::reserve(100 * granularity, granularity, os::vm_page_size(), nullptr);
|
||||
VirtualSpace vs;
|
||||
vs.initialize(rhs, 50 * granularity);
|
||||
|
||||
@ -326,7 +327,7 @@ WB_END
|
||||
static jint wb_stress_virtual_space_resize(size_t reserved_space_size,
|
||||
size_t magnitude, size_t iterations) {
|
||||
size_t granularity = os::vm_allocation_granularity();
|
||||
ReservedHeapSpace rhs(reserved_space_size * granularity, granularity, os::vm_page_size());
|
||||
ReservedHeapSpace rhs = HeapReserver::reserve(reserved_space_size * granularity, granularity, os::vm_page_size(), nullptr);
|
||||
VirtualSpace vs;
|
||||
if (!vs.initialize(rhs, 0)) {
|
||||
tty->print_cr("Failed to initialize VirtualSpace. Can't proceed.");
|
||||
|
@ -29,8 +29,9 @@
|
||||
#include "gc/g1/g1HeapRegionSet.hpp"
|
||||
#include "gc/g1/g1RegionToSpaceMapper.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
|
||||
#include "unittest.hpp"
|
||||
|
||||
// @requires UseG1GC
|
||||
@ -50,7 +51,7 @@ TEST_OTHER_VM(G1FreeRegionList, length) {
|
||||
// the BOT.
|
||||
size_t bot_size = G1BlockOffsetTable::compute_size(heap.word_size());
|
||||
HeapWord* bot_data = NEW_C_HEAP_ARRAY(HeapWord, bot_size, mtGC);
|
||||
ReservedSpace bot_rs(G1BlockOffsetTable::compute_size(heap.word_size()), mtGC);
|
||||
ReservedSpace bot_rs = MemoryReserver::reserve(G1BlockOffsetTable::compute_size(heap.word_size()), mtGC);
|
||||
G1RegionToSpaceMapper* bot_storage =
|
||||
G1RegionToSpaceMapper::create_mapper(bot_rs,
|
||||
bot_rs.size(),
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include "gc/g1/g1BlockOffsetTable.hpp"
|
||||
#include "gc/g1/g1RegionToSpaceMapper.hpp"
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "unittest.hpp"
|
||||
@ -81,7 +81,9 @@ TEST_VM(G1RegionToSpaceMapper, smallStressAdjacent) {
|
||||
size_t size = G1BlockOffsetTable::compute_size(num_regions * region_size / HeapWordSize);
|
||||
size_t page_size = os::vm_page_size();
|
||||
|
||||
ReservedSpace rs(size, os::vm_page_size());
|
||||
ReservedSpace rs = MemoryReserver::reserve(size,
|
||||
os::vm_allocation_granularity(),
|
||||
os::vm_page_size());
|
||||
|
||||
G1RegionToSpaceMapper* small_mapper =
|
||||
G1RegionToSpaceMapper::create_mapper(rs,
|
||||
@ -105,8 +107,9 @@ TEST_VM(G1RegionToSpaceMapper, largeStressAdjacent) {
|
||||
size_t size = G1BlockOffsetTable::compute_size(num_regions * region_size / HeapWordSize);
|
||||
size_t page_size = os::vm_page_size();
|
||||
|
||||
ReservedSpace rs(size, page_size);
|
||||
|
||||
ReservedSpace rs = MemoryReserver::reserve(size,
|
||||
os::vm_allocation_granularity(),
|
||||
os::vm_page_size());
|
||||
G1RegionToSpaceMapper* large_mapper =
|
||||
G1RegionToSpaceMapper::create_mapper(rs,
|
||||
size,
|
||||
|
@ -22,7 +22,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
@ -35,11 +35,7 @@ namespace {
|
||||
public:
|
||||
MemoryReleaser(ReservedSpace* rs) : _rs(rs) { }
|
||||
~MemoryReleaser() {
|
||||
if (_rs->special()) {
|
||||
EXPECT_TRUE(os::release_memory_special(_rs->base(), _rs->size()));
|
||||
} else {
|
||||
EXPECT_TRUE(os::release_memory(_rs->base(), _rs->size()));
|
||||
}
|
||||
EXPECT_TRUE(MemoryReserver::release(*_rs));
|
||||
}
|
||||
};
|
||||
|
||||
@ -64,7 +60,7 @@ namespace {
|
||||
static void test_reserved_size(size_t size) {
|
||||
ASSERT_PRED2(is_size_aligned, size, os::vm_allocation_granularity());
|
||||
|
||||
ReservedSpace rs(size, mtTest);
|
||||
ReservedSpace rs = MemoryReserver::reserve(size, mtTest);
|
||||
MemoryReleaser releaser(&rs);
|
||||
|
||||
EXPECT_TRUE(rs.base() != nullptr) << "rs.special: " << rs.special();
|
||||
@ -78,7 +74,7 @@ namespace {
|
||||
static void test_reserved_size_alignment(size_t size, size_t alignment) {
|
||||
ASSERT_PRED2(is_size_aligned, size, alignment) << "Incorrect input parameters";
|
||||
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
|
||||
ReservedSpace rs(size, alignment, page_size, (char *) nullptr);
|
||||
ReservedSpace rs = MemoryReserver::reserve(size, alignment, page_size);
|
||||
|
||||
ASSERT_TRUE(rs.base() != nullptr) << "rs.special = " << rs.special();
|
||||
ASSERT_EQ(size, rs.size()) << "rs.special = " << rs.special();
|
||||
@ -106,7 +102,7 @@ namespace {
|
||||
bool large = maybe_large && UseLargePages && size >= os::large_page_size();
|
||||
size_t page_size = large ? os::large_page_size() : os::vm_page_size();
|
||||
|
||||
ReservedSpace rs(size, alignment, page_size);
|
||||
ReservedSpace rs = MemoryReserver::reserve(size, alignment, page_size);
|
||||
MemoryReleaser releaser(&rs);
|
||||
|
||||
EXPECT_TRUE(rs.base() != nullptr) << "rs.special: " << rs.special();
|
||||
@ -206,7 +202,9 @@ namespace {
|
||||
public:
|
||||
ReservedSpaceReleaser(ReservedSpace* rs) : _rs(rs) { }
|
||||
~ReservedSpaceReleaser() {
|
||||
_rs->release();
|
||||
if (_rs->is_reserved()) {
|
||||
MemoryReserver::release(*_rs);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@ -215,12 +213,12 @@ namespace {
|
||||
default:
|
||||
case Default:
|
||||
case Reserve:
|
||||
return ReservedSpace(reserve_size_aligned, mtTest);
|
||||
return MemoryReserver::reserve(reserve_size_aligned, mtTest);
|
||||
case Disable:
|
||||
case Commit:
|
||||
return ReservedSpace(reserve_size_aligned,
|
||||
os::vm_allocation_granularity(),
|
||||
os::vm_page_size());
|
||||
return MemoryReserver::reserve(reserve_size_aligned,
|
||||
os::vm_allocation_granularity(),
|
||||
os::vm_page_size());
|
||||
}
|
||||
}
|
||||
|
||||
@ -299,7 +297,7 @@ TEST_VM(VirtualSpace, actual_committed_space_one_large_page) {
|
||||
|
||||
size_t large_page_size = os::large_page_size();
|
||||
|
||||
ReservedSpace reserved(large_page_size, large_page_size, large_page_size);
|
||||
ReservedSpace reserved = MemoryReserver::reserve(large_page_size, large_page_size, large_page_size);
|
||||
ReservedSpaceReleaser releaser(&reserved);
|
||||
ASSERT_TRUE(reserved.is_reserved());
|
||||
|
||||
@ -366,10 +364,9 @@ class TestReservedSpace : AllStatic {
|
||||
static void test_reserved_space1(size_t size, size_t alignment) {
|
||||
ASSERT_TRUE(is_aligned(size, alignment)) << "Incorrect input parameters";
|
||||
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
|
||||
ReservedSpace rs(size, // size
|
||||
alignment, // alignment
|
||||
page_size, // page size
|
||||
(char *)nullptr); // requested_address
|
||||
ReservedSpace rs = MemoryReserver::reserve(size,
|
||||
alignment,
|
||||
page_size);
|
||||
|
||||
EXPECT_TRUE(rs.base() != nullptr);
|
||||
EXPECT_EQ(rs.size(), size) << "rs.size: " << rs.size();
|
||||
@ -387,7 +384,7 @@ class TestReservedSpace : AllStatic {
|
||||
static void test_reserved_space2(size_t size) {
|
||||
ASSERT_TRUE(is_aligned(size, os::vm_allocation_granularity())) << "Must be at least AG aligned";
|
||||
|
||||
ReservedSpace rs(size, mtTest);
|
||||
ReservedSpace rs = MemoryReserver::reserve(size, mtTest);
|
||||
|
||||
EXPECT_TRUE(rs.base() != nullptr);
|
||||
EXPECT_EQ(rs.size(), size) << "rs.size: " << rs.size();
|
||||
@ -412,7 +409,9 @@ class TestReservedSpace : AllStatic {
|
||||
bool large = maybe_large && UseLargePages && size >= os::large_page_size();
|
||||
size_t page_size = large ? os::large_page_size() : os::vm_page_size();
|
||||
|
||||
ReservedSpace rs(size, alignment, page_size);
|
||||
ReservedSpace rs = MemoryReserver::reserve(size,
|
||||
alignment,
|
||||
page_size);
|
||||
|
||||
EXPECT_TRUE(rs.base() != nullptr);
|
||||
EXPECT_EQ(rs.size(), size) << "rs.size: " << rs.size();
|
||||
@ -516,12 +515,12 @@ class TestVirtualSpace : AllStatic {
|
||||
default:
|
||||
case Default:
|
||||
case Reserve:
|
||||
return ReservedSpace(reserve_size_aligned, mtTest);
|
||||
return MemoryReserver::reserve(reserve_size_aligned, mtTest);
|
||||
case Disable:
|
||||
case Commit:
|
||||
return ReservedSpace(reserve_size_aligned,
|
||||
os::vm_allocation_granularity(),
|
||||
os::vm_page_size());
|
||||
return MemoryReserver::reserve(reserve_size_aligned,
|
||||
os::vm_allocation_granularity(),
|
||||
os::vm_page_size());
|
||||
}
|
||||
}
|
||||
|
||||
@ -566,7 +565,9 @@ class TestVirtualSpace : AllStatic {
|
||||
EXPECT_LT(vs.actual_committed_size(), commit_size + commit_granularity);
|
||||
}
|
||||
|
||||
reserved.release();
|
||||
if (reserved.is_reserved()) {
|
||||
MemoryReserver::release(reserved);
|
||||
}
|
||||
}
|
||||
|
||||
static void test_virtual_space_actual_committed_space_one_large_page() {
|
||||
@ -576,7 +577,9 @@ class TestVirtualSpace : AllStatic {
|
||||
|
||||
size_t large_page_size = os::large_page_size();
|
||||
|
||||
ReservedSpace reserved(large_page_size, large_page_size, large_page_size);
|
||||
ReservedSpace reserved = MemoryReserver::reserve(large_page_size,
|
||||
large_page_size,
|
||||
large_page_size);
|
||||
|
||||
EXPECT_TRUE(reserved.is_reserved());
|
||||
|
||||
@ -588,7 +591,9 @@ class TestVirtualSpace : AllStatic {
|
||||
|
||||
EXPECT_EQ(vs.actual_committed_size(), large_page_size);
|
||||
|
||||
reserved.release();
|
||||
if (reserved.is_reserved()) {
|
||||
MemoryReserver::release(reserved);
|
||||
}
|
||||
}
|
||||
|
||||
static void test_virtual_space_actual_committed_space() {
|
||||
|
@ -31,8 +31,7 @@
|
||||
// jtreg-controlled gtests (see test/hotspot/jtreg/gtest/NMTGtests.java)
|
||||
|
||||
#include "precompiled.hpp"
|
||||
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "nmt/virtualMemoryTracker.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
@ -93,7 +92,7 @@ public:
|
||||
static void test_add_committed_region_adjacent() {
|
||||
|
||||
size_t size = 0x01000000;
|
||||
ReservedSpace rs(size, mtTest);
|
||||
ReservedSpace rs = MemoryReserver::reserve(size, mtTest);
|
||||
address addr = (address)rs.base();
|
||||
|
||||
address frame1 = (address)0x1234;
|
||||
@ -167,7 +166,7 @@ public:
|
||||
static void test_add_committed_region_adjacent_overlapping() {
|
||||
|
||||
size_t size = 0x01000000;
|
||||
ReservedSpace rs(size, mtTest);
|
||||
ReservedSpace rs = MemoryReserver::reserve(size, mtTest);
|
||||
address addr = (address)rs.base();
|
||||
|
||||
address frame1 = (address)0x1234;
|
||||
@ -254,7 +253,7 @@ public:
|
||||
static void test_add_committed_region_overlapping() {
|
||||
|
||||
size_t size = 0x01000000;
|
||||
ReservedSpace rs(size, mtTest);
|
||||
ReservedSpace rs = MemoryReserver::reserve(size, mtTest);
|
||||
address addr = (address)rs.base();
|
||||
|
||||
address frame1 = (address)0x1234;
|
||||
@ -425,7 +424,7 @@ public:
|
||||
static void test_remove_uncommitted_region() {
|
||||
|
||||
size_t size = 0x01000000;
|
||||
ReservedSpace rs(size, mtTest);
|
||||
ReservedSpace rs = MemoryReserver::reserve(size, mtTest);
|
||||
address addr = (address)rs.base();
|
||||
|
||||
address frame1 = (address)0x1234;
|
||||
|
Loading…
x
Reference in New Issue
Block a user