8251358: Clean up Access configuration after Shenandoah barrier change
Reviewed-by: eosterlund, rkennke, shade
This commit is contained in:
parent
9c17a35e50
commit
e63b90cc17
@ -113,7 +113,7 @@ void java_lang_ref_Reference::set_referent_raw(oop ref, oop value) {
|
||||
}
|
||||
|
||||
HeapWord* java_lang_ref_Reference::referent_addr_raw(oop ref) {
|
||||
return ref->obj_field_addr_raw<HeapWord>(_referent_offset);
|
||||
return ref->obj_field_addr<HeapWord>(_referent_offset);
|
||||
}
|
||||
|
||||
oop java_lang_ref_Reference::next(oop ref) {
|
||||
@ -129,7 +129,7 @@ void java_lang_ref_Reference::set_next_raw(oop ref, oop value) {
|
||||
}
|
||||
|
||||
HeapWord* java_lang_ref_Reference::next_addr_raw(oop ref) {
|
||||
return ref->obj_field_addr_raw<HeapWord>(_next_offset);
|
||||
return ref->obj_field_addr<HeapWord>(_next_offset);
|
||||
}
|
||||
|
||||
oop java_lang_ref_Reference::discovered(oop ref) {
|
||||
@ -145,7 +145,7 @@ void java_lang_ref_Reference::set_discovered_raw(oop ref, oop value) {
|
||||
}
|
||||
|
||||
HeapWord* java_lang_ref_Reference::discovered_addr_raw(oop ref) {
|
||||
return ref->obj_field_addr_raw<HeapWord>(_discovered_offset);
|
||||
return ref->obj_field_addr<HeapWord>(_discovered_offset);
|
||||
}
|
||||
|
||||
bool java_lang_ref_Reference::is_final(oop ref) {
|
||||
|
@ -48,7 +48,7 @@ public:
|
||||
if (_bitmap->is_marked(obj)) {
|
||||
// Clear bitmap and fix mark word.
|
||||
_bitmap->clear(obj);
|
||||
obj->init_mark_raw();
|
||||
obj->init_mark();
|
||||
} else {
|
||||
assert(current->is_empty(), "Should have been cleared in phase 2.");
|
||||
}
|
||||
@ -71,7 +71,7 @@ size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) {
|
||||
HeapWord* obj_addr = cast_from_oop<HeapWord*>(obj);
|
||||
assert(obj_addr != destination, "everything in this pass should be moving");
|
||||
Copy::aligned_conjoint_words(obj_addr, destination, size);
|
||||
oop(destination)->init_mark_raw();
|
||||
oop(destination)->init_mark();
|
||||
assert(oop(destination)->klass() != NULL, "should have a class");
|
||||
|
||||
return size;
|
||||
|
@ -112,15 +112,15 @@ void G1FullGCCompactionPoint::forward(oop object, size_t size) {
|
||||
// with BiasedLocking, in this case forwardee() will return NULL
|
||||
// even if the mark-word is used. This is no problem since
|
||||
// forwardee() will return NULL in the compaction phase as well.
|
||||
object->init_mark_raw();
|
||||
object->init_mark();
|
||||
} else {
|
||||
// Make sure object has the correct mark-word set or that it will be
|
||||
// fixed when restoring the preserved marks.
|
||||
assert(object->mark_raw() == markWord::prototype_for_klass(object->klass()) || // Correct mark
|
||||
assert(object->mark() == markWord::prototype_for_klass(object->klass()) || // Correct mark
|
||||
object->mark_must_be_preserved() || // Will be restored by PreservedMarksSet
|
||||
(UseBiasedLocking && object->has_bias_pattern_raw()), // Will be restored by BiasedLocking
|
||||
(UseBiasedLocking && object->has_bias_pattern()), // Will be restored by BiasedLocking
|
||||
"should have correct prototype obj: " PTR_FORMAT " mark: " PTR_FORMAT " prototype: " PTR_FORMAT,
|
||||
p2i(object), object->mark_raw().value(), markWord::prototype_for_klass(object->klass()).value());
|
||||
p2i(object), object->mark().value(), markWord::prototype_for_klass(object->klass()).value());
|
||||
}
|
||||
assert(object->forwardee() == NULL, "should be forwarded to NULL");
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ inline bool G1FullGCMarker::mark_object(oop obj) {
|
||||
}
|
||||
|
||||
// Marked by us, preserve if needed.
|
||||
markWord mark = obj->mark_raw();
|
||||
markWord mark = obj->mark();
|
||||
if (obj->mark_must_be_preserved(mark) &&
|
||||
!G1ArchiveAllocator::is_open_archive_object(obj)) {
|
||||
preserved_stack()->push(obj, mark);
|
||||
|
@ -77,11 +77,11 @@ template <class T> inline void G1AdjustClosure::adjust_pointer(T* p) {
|
||||
oop forwardee = obj->forwardee();
|
||||
if (forwardee == NULL) {
|
||||
// Not forwarded, return current reference.
|
||||
assert(obj->mark_raw() == markWord::prototype_for_klass(obj->klass()) || // Correct mark
|
||||
assert(obj->mark() == markWord::prototype_for_klass(obj->klass()) || // Correct mark
|
||||
obj->mark_must_be_preserved() || // Will be restored by PreservedMarksSet
|
||||
(UseBiasedLocking && obj->has_bias_pattern_raw()), // Will be restored by BiasedLocking
|
||||
(UseBiasedLocking && obj->has_bias_pattern()), // Will be restored by BiasedLocking
|
||||
"Must have correct prototype or be preserved, obj: " PTR_FORMAT ", mark: " PTR_FORMAT ", prototype: " PTR_FORMAT,
|
||||
p2i(obj), obj->mark_raw().value(), markWord::prototype_for_klass(obj->klass()).value());
|
||||
p2i(obj), obj->mark().value(), markWord::prototype_for_klass(obj->klass()).value());
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -47,8 +47,8 @@ inline void G1ScanClosureBase::prefetch_and_push(T* p, const oop obj) {
|
||||
// stall. We'll try to prefetch the object (for write, given that
|
||||
// we might need to install the forwarding reference) and we'll
|
||||
// get back to it when pop it from the queue
|
||||
Prefetch::write(obj->mark_addr_raw(), 0);
|
||||
Prefetch::read(obj->mark_addr_raw(), (HeapWordSize*2));
|
||||
Prefetch::write(obj->mark_addr(), 0);
|
||||
Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
|
||||
|
||||
// slightly paranoid test; I'm trying to catch potential
|
||||
// problems before we go into push_on_queue to know where the
|
||||
@ -231,7 +231,7 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
||||
const G1HeapRegionAttr state = _g1h->region_attr(obj);
|
||||
if (state.is_in_cset()) {
|
||||
oop forwardee;
|
||||
markWord m = obj->mark_raw();
|
||||
markWord m = obj->mark();
|
||||
if (m.is_marked()) {
|
||||
forwardee = (oop) m.decode_pointer();
|
||||
} else {
|
||||
|
@ -193,7 +193,7 @@ void G1ParScanThreadState::do_oop_evac(T* p) {
|
||||
return;
|
||||
}
|
||||
|
||||
markWord m = obj->mark_raw();
|
||||
markWord m = obj->mark();
|
||||
if (m.is_marked()) {
|
||||
obj = (oop) m.decode_pointer();
|
||||
} else {
|
||||
@ -485,15 +485,15 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
|
||||
// In this case, we have to install the mark word first,
|
||||
// otherwise obj looks to be forwarded (the old mark word,
|
||||
// which contains the forward pointer, was copied)
|
||||
obj->set_mark_raw(old_mark);
|
||||
obj->set_mark(old_mark);
|
||||
markWord new_mark = old_mark.displaced_mark_helper().set_age(age);
|
||||
old_mark.set_displaced_mark_helper(new_mark);
|
||||
} else {
|
||||
obj->set_mark_raw(old_mark.set_age(age));
|
||||
obj->set_mark(old_mark.set_age(age));
|
||||
}
|
||||
_age_table.add(age, word_sz);
|
||||
} else {
|
||||
obj->set_mark_raw(old_mark);
|
||||
obj->set_mark(old_mark);
|
||||
}
|
||||
|
||||
// Most objects are not arrays, so do one array check rather than
|
||||
|
@ -133,7 +133,7 @@ inline void follow_array_specialized(objArrayOop obj, int index, ParCompactionMa
|
||||
|
||||
const size_t stride = MIN2(len - beg_index, (size_t)ObjArrayMarkingStride);
|
||||
const size_t end_index = beg_index + stride;
|
||||
T* const base = (T*)obj->base_raw();
|
||||
T* const base = (T*)obj->base();
|
||||
T* const beg = base + beg_index;
|
||||
T* const end = base + end_index;
|
||||
|
||||
|
@ -83,7 +83,7 @@ void PSPromotionLAB::flush() {
|
||||
// so they can always fill with an array.
|
||||
HeapWord* tlab_end = end() + filler_header_size;
|
||||
typeArrayOop filler_oop = (typeArrayOop) top();
|
||||
filler_oop->set_mark_raw(markWord::prototype());
|
||||
filler_oop->set_mark(markWord::prototype());
|
||||
filler_oop->set_klass(Universe::intArrayKlassObj());
|
||||
const size_t array_length =
|
||||
pointer_delta(tlab_end, top()) - typeArrayOopDesc::header_size(T_INT);
|
||||
|
@ -53,7 +53,7 @@ inline void PSPromotionManager::claim_or_forward_depth(T* p) {
|
||||
assert(should_scavenge(p, true), "revisiting object?");
|
||||
assert(ParallelScavengeHeap::heap()->is_in(p), "pointer outside heap");
|
||||
oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
|
||||
Prefetch::write(obj->mark_addr_raw(), 0);
|
||||
Prefetch::write(obj->mark_addr(), 0);
|
||||
push_depth(ScannerTask(p));
|
||||
}
|
||||
|
||||
@ -141,7 +141,7 @@ inline oop PSPromotionManager::copy_to_survivor_space(oop o) {
|
||||
// NOTE! We must be very careful with any methods that access the mark
|
||||
// in o. There may be multiple threads racing on it, and it may be forwarded
|
||||
// at any time. Do not use oop methods for accessing the mark!
|
||||
markWord test_mark = o->mark_raw();
|
||||
markWord test_mark = o->mark();
|
||||
|
||||
// The same test as "o->is_forwarded()"
|
||||
if (!test_mark.is_marked()) {
|
||||
|
@ -693,7 +693,7 @@ void DefNewGeneration::handle_promotion_failure(oop old) {
|
||||
|
||||
_promotion_failed = true;
|
||||
_promotion_failed_info.register_copy_failure(old->size());
|
||||
_preserved_marks_set.get()->push_if_necessary(old, old->mark_raw());
|
||||
_preserved_marks_set.get()->push_if_necessary(old, old->mark());
|
||||
// forward to self
|
||||
old->forward_to(old);
|
||||
|
||||
|
@ -132,7 +132,7 @@ template <class T> inline void MarkSweep::follow_root(T* p) {
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
if (!obj->mark_raw().is_marked()) {
|
||||
if (!obj->mark().is_marked()) {
|
||||
mark_object(obj);
|
||||
follow_object(obj);
|
||||
}
|
||||
@ -148,7 +148,7 @@ void PreservedMark::adjust_pointer() {
|
||||
}
|
||||
|
||||
void PreservedMark::restore() {
|
||||
_obj->set_mark_raw(_mark);
|
||||
_obj->set_mark(_mark);
|
||||
}
|
||||
|
||||
// We preserve the mark which should be replaced at the end and the location
|
||||
@ -205,7 +205,7 @@ void MarkSweep::restore_marks() {
|
||||
while (!_preserved_oop_stack.is_empty()) {
|
||||
oop obj = _preserved_oop_stack.pop();
|
||||
markWord mark = _preserved_mark_stack.pop();
|
||||
obj->set_mark_raw(mark);
|
||||
obj->set_mark(mark);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -38,8 +38,8 @@
|
||||
inline void MarkSweep::mark_object(oop obj) {
|
||||
// some marks may contain information we need to preserve so we store them away
|
||||
// and overwrite the mark. We'll restore it at the end of markSweep.
|
||||
markWord mark = obj->mark_raw();
|
||||
obj->set_mark_raw(markWord::prototype().set_marked());
|
||||
markWord mark = obj->mark();
|
||||
obj->set_mark(markWord::prototype().set_marked());
|
||||
|
||||
if (obj->mark_must_be_preserved(mark)) {
|
||||
preserve_mark(obj, mark);
|
||||
@ -50,7 +50,7 @@ template <class T> inline void MarkSweep::mark_and_push(T* p) {
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
if (!obj->mark_raw().is_marked()) {
|
||||
if (!obj->mark().is_marked()) {
|
||||
mark_object(obj);
|
||||
_marking_stack.push(obj);
|
||||
}
|
||||
@ -79,11 +79,11 @@ template <class T> inline void MarkSweep::adjust_pointer(T* p) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
assert(Universe::heap()->is_in(obj), "should be in heap");
|
||||
|
||||
oop new_obj = oop(obj->mark_raw().decode_pointer());
|
||||
oop new_obj = oop(obj->mark().decode_pointer());
|
||||
|
||||
assert(new_obj != NULL || // is forwarding ptr?
|
||||
obj->mark_raw() == markWord::prototype() || // not gc marked?
|
||||
(UseBiasedLocking && obj->mark_raw().has_bias_pattern()),
|
||||
assert(new_obj != NULL || // is forwarding ptr?
|
||||
obj->mark() == markWord::prototype() || // not gc marked?
|
||||
(UseBiasedLocking && obj->mark().has_bias_pattern()),
|
||||
// not gc marked?
|
||||
"should be forwarded");
|
||||
|
||||
|
@ -43,20 +43,4 @@
|
||||
FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \
|
||||
FOR_EACH_CONCRETE_BARRIER_SET_DO(f)
|
||||
|
||||
// To enable runtime-resolution of GC barriers on primitives, please
|
||||
// define SUPPORT_BARRIER_ON_PRIMITIVES.
|
||||
#ifdef SUPPORT_BARRIER_ON_PRIMITIVES
|
||||
#define ACCESS_PRIMITIVE_SUPPORT INTERNAL_BT_BARRIER_ON_PRIMITIVES
|
||||
#else
|
||||
#define ACCESS_PRIMITIVE_SUPPORT DECORATORS_NONE
|
||||
#endif
|
||||
|
||||
#ifdef SUPPORT_NOT_TO_SPACE_INVARIANT
|
||||
#define ACCESS_TO_SPACE_INVARIANT_SUPPORT DECORATORS_NONE
|
||||
#else
|
||||
#define ACCESS_TO_SPACE_INVARIANT_SUPPORT INTERNAL_BT_TO_SPACE_INVARIANT
|
||||
#endif
|
||||
|
||||
#define BT_BUILDTIME_DECORATORS (ACCESS_PRIMITIVE_SUPPORT | ACCESS_TO_SPACE_INVARIANT_SUPPORT)
|
||||
|
||||
#endif // SHARE_GC_SHARED_BARRIERSETCONFIG_HPP
|
||||
|
@ -383,10 +383,10 @@ void MemAllocator::mem_clear(HeapWord* mem) const {
|
||||
oop MemAllocator::finish(HeapWord* mem) const {
|
||||
assert(mem != NULL, "NULL object pointer");
|
||||
if (UseBiasedLocking) {
|
||||
oopDesc::set_mark_raw(mem, _klass->prototype_header());
|
||||
oopDesc::set_mark(mem, _klass->prototype_header());
|
||||
} else {
|
||||
// May be bootstrapping
|
||||
oopDesc::set_mark_raw(mem, markWord::prototype());
|
||||
oopDesc::set_mark(mem, markWord::prototype());
|
||||
}
|
||||
// Need a release store to ensure array/class length, mark word, and
|
||||
// object zeroing are visible before setting the klass non-NULL, for
|
||||
|
@ -47,7 +47,7 @@ inline void PreservedMarks::push_if_necessary(oop obj, markWord m) {
|
||||
}
|
||||
|
||||
inline void PreservedMarks::init_forwarded_mark(oop obj) {
|
||||
obj->init_mark_raw();
|
||||
obj->init_mark();
|
||||
}
|
||||
|
||||
inline PreservedMarks::PreservedMarks()
|
||||
@ -59,7 +59,7 @@ inline PreservedMarks::PreservedMarks()
|
||||
0 /* max_cache_size */) { }
|
||||
|
||||
void PreservedMarks::OopAndMarkWord::set_mark() const {
|
||||
_o->set_mark_raw(_m);
|
||||
_o->set_mark(_m);
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_SHARED_PRESERVEDMARKS_INLINE_HPP
|
||||
|
@ -379,7 +379,7 @@ HeapWord* CompactibleSpace::forward(oop q, size_t size,
|
||||
} else {
|
||||
// if the object isn't moving we can just set the mark to the default
|
||||
// mark and handle it specially later on.
|
||||
q->init_mark_raw();
|
||||
q->init_mark();
|
||||
assert(q->forwardee() == NULL, "should be forwarded to NULL");
|
||||
}
|
||||
|
||||
@ -617,14 +617,14 @@ void ContiguousSpace::allocate_temporary_filler(int factor) {
|
||||
// allocate uninitialized int array
|
||||
typeArrayOop t = (typeArrayOop) allocate(size);
|
||||
assert(t != NULL, "allocation should succeed");
|
||||
t->set_mark_raw(markWord::prototype());
|
||||
t->set_mark(markWord::prototype());
|
||||
t->set_klass(Universe::intArrayKlassObj());
|
||||
t->set_length((int)length);
|
||||
} else {
|
||||
assert(size == CollectedHeap::min_fill_size(),
|
||||
"size for smallest fake object doesn't match");
|
||||
instanceOop obj = (instanceOop) allocate(size);
|
||||
obj->set_mark_raw(markWord::prototype());
|
||||
obj->set_mark(markWord::prototype());
|
||||
obj->set_klass_gap(0);
|
||||
obj->set_klass(SystemDictionary::Object_klass());
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ public:
|
||||
_allowed_deadspace_words -= dead_length;
|
||||
CollectedHeap::fill_with_object(dead_start, dead_length);
|
||||
oop obj = oop(dead_start);
|
||||
obj->set_mark_raw(obj->mark_raw().set_marked());
|
||||
obj->set_mark(obj->mark().set_marked());
|
||||
|
||||
assert(dead_length == (size_t)obj->size(), "bad filler object size");
|
||||
log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b",
|
||||
@ -336,7 +336,7 @@ inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
|
||||
// copy object and reinit its mark
|
||||
assert(cur_obj != compaction_top, "everything in this pass should be moving");
|
||||
Copy::aligned_conjoint_words(cur_obj, compaction_top, size);
|
||||
oop(compaction_top)->init_mark_raw();
|
||||
oop(compaction_top)->init_mark();
|
||||
assert(oop(compaction_top)->klass() != NULL, "should have a class");
|
||||
|
||||
debug_only(prev_obj = cur_obj);
|
||||
|
@ -40,7 +40,7 @@ inline HeapWord* ShenandoahForwarding::get_forwardee_raw_unchecked(oop obj) {
|
||||
// On this path, we can encounter the "marked" object, but with NULL
|
||||
// fwdptr. That object is still not forwarded, and we need to return
|
||||
// the object itself.
|
||||
markWord mark = obj->mark_raw();
|
||||
markWord mark = obj->mark();
|
||||
if (mark.is_marked()) {
|
||||
HeapWord* fwdptr = (HeapWord*) mark.clear_lock_bits().to_pointer();
|
||||
if (fwdptr != NULL) {
|
||||
@ -55,7 +55,7 @@ inline oop ShenandoahForwarding::get_forwardee_mutator(oop obj) {
|
||||
shenandoah_assert_correct(NULL, obj);
|
||||
assert(Thread::current()->is_Java_thread(), "Must be a mutator thread");
|
||||
|
||||
markWord mark = obj->mark_raw();
|
||||
markWord mark = obj->mark();
|
||||
if (mark.is_marked()) {
|
||||
HeapWord* fwdptr = (HeapWord*) mark.clear_lock_bits().to_pointer();
|
||||
assert(fwdptr != NULL, "Forwarding pointer is never null here");
|
||||
@ -71,17 +71,17 @@ inline oop ShenandoahForwarding::get_forwardee(oop obj) {
|
||||
}
|
||||
|
||||
inline bool ShenandoahForwarding::is_forwarded(oop obj) {
|
||||
return obj->mark_raw().is_marked();
|
||||
return obj->mark().is_marked();
|
||||
}
|
||||
|
||||
inline oop ShenandoahForwarding::try_update_forwardee(oop obj, oop update) {
|
||||
markWord old_mark = obj->mark_raw();
|
||||
markWord old_mark = obj->mark();
|
||||
if (old_mark.is_marked()) {
|
||||
return oop(old_mark.clear_lock_bits().to_pointer());
|
||||
}
|
||||
|
||||
markWord new_mark = markWord::encode_pointer_as_mark(update);
|
||||
markWord prev_mark = obj->cas_set_mark_raw(new_mark, old_mark);
|
||||
markWord prev_mark = obj->cas_set_mark(new_mark, old_mark, memory_order_conservative);
|
||||
if (prev_mark == old_mark) {
|
||||
return update;
|
||||
} else {
|
||||
|
@ -323,7 +323,7 @@ public:
|
||||
// Object fits into current region, record new location:
|
||||
assert(_compact_point + obj_size <= _to_region->end(), "must fit");
|
||||
shenandoah_assert_not_forwarded(NULL, p);
|
||||
_preserved_marks->push_if_necessary(p, p->mark_raw());
|
||||
_preserved_marks->push_if_necessary(p, p->mark());
|
||||
p->forward_to(oop(_compact_point));
|
||||
_compact_point += obj_size;
|
||||
}
|
||||
@ -431,7 +431,7 @@ void ShenandoahMarkCompact::calculate_target_humongous_objects() {
|
||||
|
||||
if (start >= to_begin && start != r->index()) {
|
||||
// Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
|
||||
_preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark_raw());
|
||||
_preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
|
||||
old_obj->forward_to(oop(heap->get_region(start)->bottom()));
|
||||
to_end = start;
|
||||
continue;
|
||||
@ -806,7 +806,7 @@ public:
|
||||
HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee());
|
||||
Copy::aligned_conjoint_words(compact_from, compact_to, size);
|
||||
oop new_obj = oop(compact_to);
|
||||
new_obj->init_mark_raw();
|
||||
new_obj->init_mark();
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -922,7 +922,7 @@ void ShenandoahMarkCompact::compact_humongous_objects() {
|
||||
ShenandoahHeapRegion::region_size_words()*num_regions);
|
||||
|
||||
oop new_obj = oop(heap->get_region(new_start)->bottom());
|
||||
new_obj->init_mark_raw();
|
||||
new_obj->init_mark();
|
||||
|
||||
{
|
||||
for (size_t c = old_start; c <= old_end; c++) {
|
||||
|
@ -192,7 +192,7 @@ oop HeapShared::archive_heap_object(oop obj, Thread* THREAD) {
|
||||
// identity_hash for all shared objects, so they are less likely to be written
|
||||
// into during run time, increasing the potential of memory sharing.
|
||||
int hash_original = obj->identity_hash();
|
||||
archived_oop->set_mark_raw(markWord::prototype().copy_set_hash(hash_original));
|
||||
archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original));
|
||||
assert(archived_oop->mark().is_unlocked(), "sanity");
|
||||
|
||||
DEBUG_ONLY(int hash_archived = archived_oop->identity_hash());
|
||||
|
@ -269,11 +269,6 @@ public:
|
||||
OopType new_oop_value = new_value;
|
||||
return AccessInternal::atomic_xchg<decorators | INTERNAL_VALUE_IS_OOP>(addr, new_oop_value);
|
||||
}
|
||||
|
||||
static oop resolve(oop obj) {
|
||||
verify_decorators<DECORATORS_NONE>();
|
||||
return AccessInternal::resolve<decorators>(obj);
|
||||
}
|
||||
};
|
||||
|
||||
// Helper for performing raw accesses (knows only of memory ordering
|
||||
|
@ -649,8 +649,7 @@ namespace AccessInternal {
|
||||
|
||||
template<DecoratorSet decorators>
|
||||
static bool is_hardwired_primitive() {
|
||||
return !HasDecorator<decorators, INTERNAL_BT_BARRIER_ON_PRIMITIVES>::value &&
|
||||
!HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
|
||||
return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
@ -946,21 +945,6 @@ namespace AccessInternal {
|
||||
clone(oop src, oop dst, size_t size) {
|
||||
RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators>
|
||||
inline static typename EnableIf<
|
||||
HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
|
||||
resolve(oop obj) {
|
||||
typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
|
||||
return Raw::resolve(obj);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators>
|
||||
inline static typename EnableIf<
|
||||
!HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
|
||||
resolve(oop obj) {
|
||||
return RuntimeDispatch<decorators, oop, BARRIER_RESOLVE>::resolve(obj);
|
||||
}
|
||||
};
|
||||
|
||||
// Step 2: Reduce types.
|
||||
@ -1253,12 +1237,6 @@ namespace AccessInternal {
|
||||
PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators>
|
||||
inline oop resolve(oop obj) {
|
||||
const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
|
||||
return PreRuntimeDispatch::resolve<expanded_decorators>(obj);
|
||||
}
|
||||
|
||||
// Infer the type that should be returned from an Access::oop_load.
|
||||
template <typename P, DecoratorSet decorators>
|
||||
class OopLoadProxy: public StackObj {
|
||||
|
@ -362,7 +362,7 @@ inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
|
||||
reinterpret_cast<jlong*>((oopDesc*)dst),
|
||||
align_object_size(size) / HeapWordsPerLong);
|
||||
// Clear the header
|
||||
dst->init_mark_raw();
|
||||
dst->init_mark();
|
||||
}
|
||||
|
||||
#endif // SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
|
||||
|
@ -54,20 +54,13 @@ const DecoratorSet DECORATORS_NONE = UCONST64(0);
|
||||
const DecoratorSet INTERNAL_CONVERT_COMPRESSED_OOP = UCONST64(1) << 1;
|
||||
const DecoratorSet INTERNAL_VALUE_IS_OOP = UCONST64(1) << 2;
|
||||
|
||||
// == Internal build-time Decorators ==
|
||||
// * INTERNAL_BT_BARRIER_ON_PRIMITIVES: This is set in the barrierSetConfig.hpp file.
|
||||
// * INTERNAL_BT_TO_SPACE_INVARIANT: This is set in the barrierSetConfig.hpp file iff
|
||||
// no GC is bundled in the build that is to-space invariant.
|
||||
const DecoratorSet INTERNAL_BT_BARRIER_ON_PRIMITIVES = UCONST64(1) << 3;
|
||||
const DecoratorSet INTERNAL_BT_TO_SPACE_INVARIANT = UCONST64(1) << 4;
|
||||
|
||||
// == Internal run-time Decorators ==
|
||||
// * INTERNAL_RT_USE_COMPRESSED_OOPS: This decorator will be set in runtime resolved
|
||||
// access backends iff UseCompressedOops is true.
|
||||
const DecoratorSet INTERNAL_RT_USE_COMPRESSED_OOPS = UCONST64(1) << 5;
|
||||
|
||||
const DecoratorSet INTERNAL_DECORATOR_MASK = INTERNAL_CONVERT_COMPRESSED_OOP | INTERNAL_VALUE_IS_OOP |
|
||||
INTERNAL_BT_BARRIER_ON_PRIMITIVES | INTERNAL_RT_USE_COMPRESSED_OOPS;
|
||||
INTERNAL_RT_USE_COMPRESSED_OOPS;
|
||||
|
||||
// == Memory Ordering Decorators ==
|
||||
// The memory ordering decorators can be described in the following way:
|
||||
@ -238,7 +231,7 @@ namespace AccessInternal {
|
||||
// If no barrier strength has been picked, normal will be used
|
||||
static const DecoratorSet barrier_strength_default = memory_ordering_default |
|
||||
((AS_DECORATOR_MASK & memory_ordering_default) == 0 ? AS_NORMAL : DECORATORS_NONE);
|
||||
static const DecoratorSet value = barrier_strength_default | BT_BUILDTIME_DECORATORS;
|
||||
static const DecoratorSet value = barrier_strength_default;
|
||||
};
|
||||
|
||||
// This function implements the above DecoratorFixup rules, but without meta
|
||||
@ -254,8 +247,7 @@ namespace AccessInternal {
|
||||
// If no barrier strength has been picked, normal will be used
|
||||
DecoratorSet barrier_strength_default = memory_ordering_default |
|
||||
((AS_DECORATOR_MASK & memory_ordering_default) == 0 ? AS_NORMAL : DECORATORS_NONE);
|
||||
DecoratorSet value = barrier_strength_default | BT_BUILDTIME_DECORATORS;
|
||||
return value;
|
||||
return barrier_strength_default;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -91,7 +91,6 @@ class arrayOopDesc : public oopDesc {
|
||||
// Returns the address of the first element. The elements in the array will not
|
||||
// relocate from this address until a subsequent thread transition.
|
||||
inline void* base(BasicType type) const;
|
||||
inline void* base_raw(BasicType type) const; // GC barrier invariant
|
||||
|
||||
template <typename T>
|
||||
static T* obj_offset_to_raw(arrayOop obj, size_t offset_in_bytes, T* raw) {
|
||||
|
@ -29,11 +29,6 @@
|
||||
#include "oops/arrayOop.hpp"
|
||||
|
||||
void* arrayOopDesc::base(BasicType type) const {
|
||||
oop resolved_obj = Access<>::resolve(as_oop());
|
||||
return arrayOop(resolved_obj)->base_raw(type);
|
||||
}
|
||||
|
||||
void* arrayOopDesc::base_raw(BasicType type) const {
|
||||
return reinterpret_cast<void*>(cast_from_oop<intptr_t>(as_oop()) + base_offset_in_bytes(type));
|
||||
}
|
||||
|
||||
|
@ -57,7 +57,7 @@ inline void InstanceKlass::release_set_methods_jmethod_ids(jmethodID* jmeths) {
|
||||
|
||||
template <typename T, class OopClosureType>
|
||||
ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) {
|
||||
T* p = (T*)obj->obj_field_addr_raw<T>(map->offset());
|
||||
T* p = (T*)obj->obj_field_addr<T>(map->offset());
|
||||
T* const end = p + map->count();
|
||||
|
||||
for (; p < end; ++p) {
|
||||
@ -67,7 +67,7 @@ ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop o
|
||||
|
||||
template <typename T, class OopClosureType>
|
||||
ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) {
|
||||
T* const start = (T*)obj->obj_field_addr_raw<T>(map->offset());
|
||||
T* const start = (T*)obj->obj_field_addr<T>(map->offset());
|
||||
T* p = start + map->count();
|
||||
|
||||
while (start < p) {
|
||||
@ -78,7 +78,7 @@ ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* ma
|
||||
|
||||
template <typename T, class OopClosureType>
|
||||
ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
T* p = (T*)obj->obj_field_addr_raw<T>(map->offset());
|
||||
T* p = (T*)obj->obj_field_addr<T>(map->offset());
|
||||
T* end = p + map->count();
|
||||
|
||||
T* const l = (T*)mr.start();
|
||||
|
@ -294,17 +294,17 @@ void ObjArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
|
||||
size_t src_offset = (size_t) objArrayOopDesc::obj_at_offset<narrowOop>(src_pos);
|
||||
size_t dst_offset = (size_t) objArrayOopDesc::obj_at_offset<narrowOop>(dst_pos);
|
||||
assert(arrayOopDesc::obj_offset_to_raw<narrowOop>(s, src_offset, NULL) ==
|
||||
objArrayOop(s)->obj_at_addr_raw<narrowOop>(src_pos), "sanity");
|
||||
objArrayOop(s)->obj_at_addr<narrowOop>(src_pos), "sanity");
|
||||
assert(arrayOopDesc::obj_offset_to_raw<narrowOop>(d, dst_offset, NULL) ==
|
||||
objArrayOop(d)->obj_at_addr_raw<narrowOop>(dst_pos), "sanity");
|
||||
objArrayOop(d)->obj_at_addr<narrowOop>(dst_pos), "sanity");
|
||||
do_copy(s, src_offset, d, dst_offset, length, CHECK);
|
||||
} else {
|
||||
size_t src_offset = (size_t) objArrayOopDesc::obj_at_offset<oop>(src_pos);
|
||||
size_t dst_offset = (size_t) objArrayOopDesc::obj_at_offset<oop>(dst_pos);
|
||||
assert(arrayOopDesc::obj_offset_to_raw<oop>(s, src_offset, NULL) ==
|
||||
objArrayOop(s)->obj_at_addr_raw<oop>(src_pos), "sanity");
|
||||
objArrayOop(s)->obj_at_addr<oop>(src_pos), "sanity");
|
||||
assert(arrayOopDesc::obj_offset_to_raw<oop>(d, dst_offset, NULL) ==
|
||||
objArrayOop(d)->obj_at_addr_raw<oop>(dst_pos), "sanity");
|
||||
objArrayOop(d)->obj_at_addr<oop>(dst_pos), "sanity");
|
||||
do_copy(s, src_offset, d, dst_offset, length, CHECK);
|
||||
}
|
||||
}
|
||||
|
@ -37,7 +37,7 @@
|
||||
|
||||
template <typename T, class OopClosureType>
|
||||
void ObjArrayKlass::oop_oop_iterate_elements(objArrayOop a, OopClosureType* closure) {
|
||||
T* p = (T*)a->base_raw();
|
||||
T* p = (T*)a->base();
|
||||
T* const end = p + a->length();
|
||||
|
||||
for (;p < end; p++) {
|
||||
@ -52,7 +52,7 @@ void ObjArrayKlass::oop_oop_iterate_elements_bounded(
|
||||
T* const l = (T*)low;
|
||||
T* const h = (T*)high;
|
||||
|
||||
T* p = (T*)a->base_raw();
|
||||
T* p = (T*)a->base();
|
||||
T* end = p + a->length();
|
||||
|
||||
if (p < l) {
|
||||
@ -101,8 +101,8 @@ void ObjArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, Me
|
||||
// for objArrayOops.
|
||||
template <typename T, class OopClosureType>
|
||||
void ObjArrayKlass::oop_oop_iterate_range(objArrayOop a, OopClosureType* closure, int start, int end) {
|
||||
T* low = (T*)a->base_raw() + start;
|
||||
T* high = (T*)a->base_raw() + end;
|
||||
T* low = (T*)a->base() + start;
|
||||
T* high = (T*)a->base() + end;
|
||||
|
||||
oop_oop_iterate_elements_bounded<T>(a, closure, low, high);
|
||||
}
|
||||
|
@ -40,7 +40,6 @@ class objArrayOopDesc : public arrayOopDesc {
|
||||
friend class CSetMarkWordClosure;
|
||||
|
||||
template <class T> T* obj_at_addr(int index) const;
|
||||
template <class T> T* obj_at_addr_raw(int index) const;
|
||||
|
||||
template <class T>
|
||||
static ptrdiff_t obj_at_offset(int index) {
|
||||
@ -81,7 +80,6 @@ private:
|
||||
|
||||
// base is the address following the header.
|
||||
HeapWord* base() const;
|
||||
HeapWord* base_raw() const;
|
||||
|
||||
// Accessing
|
||||
oop obj_at(int index) const;
|
||||
|
@ -32,18 +32,12 @@
|
||||
#include "runtime/globals.hpp"
|
||||
|
||||
inline HeapWord* objArrayOopDesc::base() const { return (HeapWord*) arrayOopDesc::base(T_OBJECT); }
|
||||
inline HeapWord* objArrayOopDesc::base_raw() const { return (HeapWord*) arrayOopDesc::base_raw(T_OBJECT); }
|
||||
|
||||
template <class T> T* objArrayOopDesc::obj_at_addr(int index) const {
|
||||
assert(is_within_bounds(index), "index %d out of bounds %d", index, length());
|
||||
return &((T*)base())[index];
|
||||
}
|
||||
|
||||
template <class T> T* objArrayOopDesc::obj_at_addr_raw(int index) const {
|
||||
assert(is_within_bounds(index), "index %d out of bounds %d", index, length());
|
||||
return &((T*)base_raw())[index];
|
||||
}
|
||||
|
||||
inline oop objArrayOopDesc::obj_at(int index) const {
|
||||
assert(is_within_bounds(index), "index %d out of bounds %d", index, length());
|
||||
ptrdiff_t offset = UseCompressedOops ? obj_at_offset<narrowOop>(index) : obj_at_offset<oop>(index);
|
||||
|
@ -111,7 +111,7 @@ bool oopDesc::is_oop(oop obj, bool ignore_mark_word) {
|
||||
if (ignore_mark_word) {
|
||||
return true;
|
||||
}
|
||||
if (obj->mark_raw().value() != 0) {
|
||||
if (obj->mark().value() != 0) {
|
||||
return true;
|
||||
}
|
||||
return !SafepointSynchronize::is_at_safepoint();
|
||||
|
@ -59,21 +59,18 @@ class oopDesc {
|
||||
|
||||
public:
|
||||
inline markWord mark() const;
|
||||
inline markWord mark_raw() const;
|
||||
inline markWord* mark_addr_raw() const;
|
||||
inline markWord* mark_addr() const;
|
||||
|
||||
inline void set_mark(markWord m);
|
||||
inline void set_mark_raw(markWord m);
|
||||
static inline void set_mark_raw(HeapWord* mem, markWord m);
|
||||
static inline void set_mark(HeapWord* mem, markWord m);
|
||||
|
||||
inline void release_set_mark(markWord m);
|
||||
inline markWord cas_set_mark(markWord new_mark, markWord old_mark);
|
||||
inline markWord cas_set_mark_raw(markWord new_mark, markWord old_mark, atomic_memory_order order = memory_order_conservative);
|
||||
inline markWord cas_set_mark(markWord new_mark, markWord old_mark, atomic_memory_order order);
|
||||
|
||||
// Used only to re-initialize the mark word (e.g., of promoted
|
||||
// objects during a GC) -- requires a valid klass pointer
|
||||
inline void init_mark();
|
||||
inline void init_mark_raw();
|
||||
|
||||
inline Klass* klass() const;
|
||||
inline Klass* klass_or_null() const;
|
||||
@ -117,11 +114,10 @@ class oopDesc {
|
||||
|
||||
public:
|
||||
// field addresses in oop
|
||||
inline void* field_addr(int offset) const;
|
||||
inline void* field_addr_raw(int offset) const;
|
||||
inline void* field_addr(int offset) const;
|
||||
|
||||
// Need this as public for garbage collection.
|
||||
template <class T> inline T* obj_field_addr_raw(int offset) const;
|
||||
template <class T> inline T* obj_field_addr(int offset) const;
|
||||
|
||||
template <typename T> inline size_t field_offset(T* p) const;
|
||||
|
||||
@ -237,7 +233,6 @@ class oopDesc {
|
||||
inline bool is_locked() const;
|
||||
inline bool is_unlocked() const;
|
||||
inline bool has_bias_pattern() const;
|
||||
inline bool has_bias_pattern_raw() const;
|
||||
|
||||
// asserts and guarantees
|
||||
static bool is_oop(oop obj, bool ignore_mark_word = false);
|
||||
@ -291,9 +286,9 @@ class oopDesc {
|
||||
intptr_t slow_identity_hash();
|
||||
|
||||
// marks are forwarded to stack when object is locked
|
||||
inline bool has_displaced_mark_raw() const;
|
||||
inline markWord displaced_mark_raw() const;
|
||||
inline void set_displaced_mark_raw(markWord m);
|
||||
inline bool has_displaced_mark() const;
|
||||
inline markWord displaced_mark() const;
|
||||
inline void set_displaced_mark(markWord m);
|
||||
|
||||
// Checks if the mark word needs to be preserved
|
||||
inline bool mark_must_be_preserved() const;
|
||||
|
@ -47,11 +47,7 @@ markWord oopDesc::mark() const {
|
||||
return markWord(v);
|
||||
}
|
||||
|
||||
markWord oopDesc::mark_raw() const {
|
||||
return Atomic::load(&_mark);
|
||||
}
|
||||
|
||||
markWord* oopDesc::mark_addr_raw() const {
|
||||
markWord* oopDesc::mark_addr() const {
|
||||
return (markWord*) &_mark;
|
||||
}
|
||||
|
||||
@ -59,11 +55,7 @@ void oopDesc::set_mark(markWord m) {
|
||||
HeapAccess<MO_RELAXED>::store_at(as_oop(), mark_offset_in_bytes(), m.value());
|
||||
}
|
||||
|
||||
void oopDesc::set_mark_raw(markWord m) {
|
||||
Atomic::store(&_mark, m);
|
||||
}
|
||||
|
||||
void oopDesc::set_mark_raw(HeapWord* mem, markWord m) {
|
||||
void oopDesc::set_mark(HeapWord* mem, markWord m) {
|
||||
*(markWord*)(((char*)mem) + mark_offset_in_bytes()) = m;
|
||||
}
|
||||
|
||||
@ -76,7 +68,7 @@ markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) {
|
||||
return markWord(v);
|
||||
}
|
||||
|
||||
markWord oopDesc::cas_set_mark_raw(markWord new_mark, markWord old_mark, atomic_memory_order order) {
|
||||
markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memory_order order) {
|
||||
return Atomic::cmpxchg(&_mark, old_mark, new_mark, order);
|
||||
}
|
||||
|
||||
@ -84,10 +76,6 @@ void oopDesc::init_mark() {
|
||||
set_mark(markWord::prototype_for_klass(klass()));
|
||||
}
|
||||
|
||||
void oopDesc::init_mark_raw() {
|
||||
set_mark_raw(markWord::prototype_for_klass(klass()));
|
||||
}
|
||||
|
||||
Klass* oopDesc::klass() const {
|
||||
if (UseCompressedClassPointers) {
|
||||
return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass);
|
||||
@ -216,11 +204,10 @@ bool oopDesc::is_array() const { return klass()->is_array_klass(); }
|
||||
bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
|
||||
bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
|
||||
|
||||
void* oopDesc::field_addr_raw(int offset) const { return reinterpret_cast<void*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
|
||||
void* oopDesc::field_addr(int offset) const { return Access<>::resolve(as_oop())->field_addr_raw(offset); }
|
||||
void* oopDesc::field_addr(int offset) const { return reinterpret_cast<void*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
|
||||
|
||||
template <class T>
|
||||
T* oopDesc::obj_field_addr_raw(int offset) const { return (T*) field_addr_raw(offset); }
|
||||
T* oopDesc::obj_field_addr(int offset) const { return (T*) field_addr(offset); }
|
||||
|
||||
template <typename T>
|
||||
size_t oopDesc::field_offset(T* p) const { return pointer_delta((void*)p, (void*)this, 1); }
|
||||
@ -269,20 +256,16 @@ bool oopDesc::has_bias_pattern() const {
|
||||
return mark().has_bias_pattern();
|
||||
}
|
||||
|
||||
bool oopDesc::has_bias_pattern_raw() const {
|
||||
return mark_raw().has_bias_pattern();
|
||||
}
|
||||
|
||||
// Used only for markSweep, scavenging
|
||||
bool oopDesc::is_gc_marked() const {
|
||||
return mark_raw().is_marked();
|
||||
return mark().is_marked();
|
||||
}
|
||||
|
||||
// Used by scavengers
|
||||
bool oopDesc::is_forwarded() const {
|
||||
// The extra heap check is needed since the obj might be locked, in which case the
|
||||
// mark would point to a stack location and have the sentinel bit cleared
|
||||
return mark_raw().is_marked();
|
||||
return mark().is_marked();
|
||||
}
|
||||
|
||||
// Used by scavengers
|
||||
@ -290,7 +273,7 @@ void oopDesc::forward_to(oop p) {
|
||||
verify_forwardee(p);
|
||||
markWord m = markWord::encode_pointer_as_mark(p);
|
||||
assert(m.decode_pointer() == p, "encoding must be reversable");
|
||||
set_mark_raw(m);
|
||||
set_mark(m);
|
||||
}
|
||||
|
||||
// Used by parallel scavengers
|
||||
@ -298,14 +281,14 @@ bool oopDesc::cas_forward_to(oop p, markWord compare, atomic_memory_order order)
|
||||
verify_forwardee(p);
|
||||
markWord m = markWord::encode_pointer_as_mark(p);
|
||||
assert(m.decode_pointer() == p, "encoding must be reversable");
|
||||
return cas_set_mark_raw(m, compare, order) == compare;
|
||||
return cas_set_mark(m, compare, order) == compare;
|
||||
}
|
||||
|
||||
oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) {
|
||||
verify_forwardee(p);
|
||||
markWord m = markWord::encode_pointer_as_mark(p);
|
||||
assert(m.decode_pointer() == p, "encoding must be reversable");
|
||||
markWord old_mark = cas_set_mark_raw(m, compare, order);
|
||||
markWord old_mark = cas_set_mark(m, compare, order);
|
||||
if (old_mark == compare) {
|
||||
return NULL;
|
||||
} else {
|
||||
@ -317,7 +300,7 @@ oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order orde
|
||||
// The forwardee is used when copying during scavenge and mark-sweep.
|
||||
// It does need to clear the low two locking- and GC-related bits.
|
||||
oop oopDesc::forwardee() const {
|
||||
return (oop) mark_raw().decode_pointer();
|
||||
return (oop) mark().decode_pointer();
|
||||
}
|
||||
|
||||
// Note that the forwardee is not the same thing as the displaced_mark.
|
||||
@ -330,19 +313,19 @@ oop oopDesc::forwardee_acquire() const {
|
||||
// The following method needs to be MT safe.
|
||||
uint oopDesc::age() const {
|
||||
assert(!is_forwarded(), "Attempt to read age from forwarded mark");
|
||||
if (has_displaced_mark_raw()) {
|
||||
return displaced_mark_raw().age();
|
||||
if (has_displaced_mark()) {
|
||||
return displaced_mark().age();
|
||||
} else {
|
||||
return mark_raw().age();
|
||||
return mark().age();
|
||||
}
|
||||
}
|
||||
|
||||
void oopDesc::incr_age() {
|
||||
assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
|
||||
if (has_displaced_mark_raw()) {
|
||||
set_displaced_mark_raw(displaced_mark_raw().incr_age());
|
||||
if (has_displaced_mark()) {
|
||||
set_displaced_mark(displaced_mark().incr_age());
|
||||
} else {
|
||||
set_mark_raw(mark_raw().incr_age());
|
||||
set_mark(mark().incr_age());
|
||||
}
|
||||
}
|
||||
|
||||
@ -394,16 +377,16 @@ intptr_t oopDesc::identity_hash() {
|
||||
}
|
||||
}
|
||||
|
||||
bool oopDesc::has_displaced_mark_raw() const {
|
||||
return mark_raw().has_displaced_mark_helper();
|
||||
bool oopDesc::has_displaced_mark() const {
|
||||
return mark().has_displaced_mark_helper();
|
||||
}
|
||||
|
||||
markWord oopDesc::displaced_mark_raw() const {
|
||||
return mark_raw().displaced_mark_helper();
|
||||
markWord oopDesc::displaced_mark() const {
|
||||
return mark().displaced_mark_helper();
|
||||
}
|
||||
|
||||
void oopDesc::set_displaced_mark_raw(markWord m) {
|
||||
mark_raw().set_displaced_mark_helper(m);
|
||||
void oopDesc::set_displaced_mark(markWord m) {
|
||||
mark().set_displaced_mark_helper(m);
|
||||
}
|
||||
|
||||
// Supports deferred calling of obj->klass().
|
||||
@ -420,7 +403,7 @@ public:
|
||||
};
|
||||
|
||||
bool oopDesc::mark_must_be_preserved() const {
|
||||
return mark_must_be_preserved(mark_raw());
|
||||
return mark_must_be_preserved(mark());
|
||||
}
|
||||
|
||||
bool oopDesc::mark_must_be_preserved(markWord m) const {
|
||||
|
@ -187,8 +187,7 @@ class JvmtiTagHashmap : public CHeapObj<mtInternal> {
|
||||
|
||||
// hash a given key (oop) with the specified size
|
||||
static unsigned int hash(oop key, int size) {
|
||||
const oop obj = Access<>::resolve(key);
|
||||
const unsigned int hash = Universe::heap()->hash_oop(obj);
|
||||
const unsigned int hash = Universe::heap()->hash_oop(key);
|
||||
return hash % size;
|
||||
}
|
||||
|
||||
|
@ -115,8 +115,8 @@ static inline void assert_field_offset_sane(oop p, jlong field_offset) {
|
||||
assert(byte_offset >= 0 && byte_offset <= (jlong)MAX_OBJECT_SIZE, "sane offset");
|
||||
if (byte_offset == (jint)byte_offset) {
|
||||
void* ptr_plus_disp = cast_from_oop<address>(p) + byte_offset;
|
||||
assert(p->field_addr_raw((jint)byte_offset) == ptr_plus_disp,
|
||||
"raw [ptr+disp] must be consistent with oop::field_addr_raw");
|
||||
assert(p->field_addr((jint)byte_offset) == ptr_plus_disp,
|
||||
"raw [ptr+disp] must be consistent with oop::field_addr");
|
||||
}
|
||||
jlong p_size = HeapWordSize * (jlong)(p->size());
|
||||
assert(byte_offset < p_size, "Unsafe access: offset " INT64_FORMAT " > object's size " INT64_FORMAT, (int64_t)byte_offset, (int64_t)p_size);
|
||||
@ -128,10 +128,6 @@ static inline void* index_oop_from_field_offset_long(oop p, jlong field_offset)
|
||||
assert_field_offset_sane(p, field_offset);
|
||||
jlong byte_offset = field_offset_to_byte_offset(field_offset);
|
||||
|
||||
if (p != NULL) {
|
||||
p = Access<>::resolve(p);
|
||||
}
|
||||
|
||||
if (sizeof(char*) == sizeof(jint)) { // (this constant folds!)
|
||||
return cast_from_oop<address>(p) + (jint) byte_offset;
|
||||
} else {
|
||||
|
@ -39,14 +39,14 @@ class FakeOop {
|
||||
oopDesc _oop;
|
||||
|
||||
public:
|
||||
FakeOop() : _oop() { _oop.set_mark_raw(originalMark()); }
|
||||
FakeOop() : _oop() { _oop.set_mark(originalMark()); }
|
||||
|
||||
oop get_oop() { return &_oop; }
|
||||
markWord mark() { return _oop.mark_raw(); }
|
||||
void set_mark(markWord m) { _oop.set_mark_raw(m); }
|
||||
markWord mark() { return _oop.mark(); }
|
||||
void set_mark(markWord m) { _oop.set_mark(m); }
|
||||
void forward_to(oop obj) {
|
||||
markWord m = markWord::encode_pointer_as_mark(obj);
|
||||
_oop.set_mark_raw(m);
|
||||
_oop.set_mark(m);
|
||||
}
|
||||
|
||||
static markWord originalMark() { return markWord(markWord::lock_mask_in_place); }
|
||||
|
Loading…
x
Reference in New Issue
Block a user