8033552: Fix missing missing volatile specifiers in CAS operations in GC code

Add missing volatile specifiers.

Reviewed-by: kbarrett, tschatzl
This commit is contained in:
Erik Österlund 2016-09-20 15:42:17 -04:00
parent 7ee6161c86
commit f6f5dfdb4a
20 changed files with 61 additions and 59 deletions

View File

@ -258,16 +258,15 @@ class PushOrMarkClosure: public MetadataAwareOopClosure {
// the closure ParMarkFromRootsClosure.
class ParPushOrMarkClosure: public MetadataAwareOopClosure {
private:
CMSCollector* _collector;
MemRegion _whole_span;
MemRegion _span; // local chunk
CMSBitMap* _bit_map;
OopTaskQueue* _work_queue;
CMSMarkStack* _overflow_stack;
HeapWord* const _finger;
HeapWord** const _global_finger_addr;
ParMarkFromRootsClosure* const
_parent;
CMSCollector* _collector;
MemRegion _whole_span;
MemRegion _span; // local chunk
CMSBitMap* _bit_map;
OopTaskQueue* _work_queue;
CMSMarkStack* _overflow_stack;
HeapWord* const _finger;
HeapWord* volatile* const _global_finger_addr;
ParMarkFromRootsClosure* const _parent;
protected:
DO_OOP_WORK_DEFN
public:
@ -277,7 +276,7 @@ class ParPushOrMarkClosure: public MetadataAwareOopClosure {
OopTaskQueue* work_queue,
CMSMarkStack* mark_stack,
HeapWord* finger,
HeapWord** global_finger_addr,
HeapWord* volatile* global_finger_addr,
ParMarkFromRootsClosure* parent);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);

View File

@ -3025,14 +3025,14 @@ class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
// MT Concurrent Marking Task
class CMSConcMarkingTask: public YieldingFlexibleGangTask {
CMSCollector* _collector;
uint _n_workers; // requested/desired # workers
bool _result;
CompactibleFreeListSpace* _cms_space;
char _pad_front[64]; // padding to ...
HeapWord* _global_finger; // ... avoid sharing cache line
char _pad_back[64];
HeapWord* _restart_addr;
CMSCollector* _collector;
uint _n_workers; // requested/desired # workers
bool _result;
CompactibleFreeListSpace* _cms_space;
char _pad_front[64]; // padding to ...
HeapWord* volatile _global_finger; // ... avoid sharing cache line
char _pad_back[64];
HeapWord* _restart_addr;
// Exposed here for yielding support
Mutex* const _bit_map_lock;
@ -3068,7 +3068,7 @@ class CMSConcMarkingTask: public YieldingFlexibleGangTask {
OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
HeapWord** global_finger_addr() { return &_global_finger; }
HeapWord* volatile* global_finger_addr() { return &_global_finger; }
CMSConcMarkingTerminator* terminator() { return &_term; }
@ -6554,7 +6554,7 @@ void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
// Note: the local finger doesn't advance while we drain
// the stack below, but the global finger sure can and will.
HeapWord** gfa = _task->global_finger_addr();
HeapWord* volatile* gfa = _task->global_finger_addr();
ParPushOrMarkClosure pushOrMarkClosure(_collector,
_span, _bit_map,
_work_queue,
@ -6721,7 +6721,7 @@ ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector,
OopTaskQueue* work_queue,
CMSMarkStack* overflow_stack,
HeapWord* finger,
HeapWord** global_finger_addr,
HeapWord* volatile* global_finger_addr,
ParMarkFromRootsClosure* parent) :
MetadataAwareOopClosure(collector->ref_processor()),
_collector(collector),

View File

@ -724,12 +724,12 @@ class CMSCollector: public CHeapObj<mtGC> {
// Support for parallelizing young gen rescan in CMS remark phase
ParNewGeneration* _young_gen;
HeapWord** _top_addr; // ... Top of Eden
HeapWord** _end_addr; // ... End of Eden
Mutex* _eden_chunk_lock;
HeapWord** _eden_chunk_array; // ... Eden partitioning array
size_t _eden_chunk_index; // ... top (exclusive) of array
size_t _eden_chunk_capacity; // ... max entries in array
HeapWord* volatile* _top_addr; // ... Top of Eden
HeapWord** _end_addr; // ... End of Eden
Mutex* _eden_chunk_lock;
HeapWord** _eden_chunk_array; // ... Eden partitioning array
size_t _eden_chunk_index; // ... top (exclusive) of array
size_t _eden_chunk_capacity; // ... max entries in array
// Support for parallelizing survivor space rescan
HeapWord** _survivor_chunk_array;

View File

@ -56,7 +56,7 @@ class PerRegionTable: public CHeapObj<mtGC> {
PerRegionTable * _collision_list_next;
// Global free list of PRTs
static PerRegionTable* _free_list;
static PerRegionTable* volatile _free_list;
protected:
// We need access in order to union things into the base table.
@ -249,7 +249,7 @@ public:
static void test_fl_mem_size();
};
PerRegionTable* PerRegionTable::_free_list = NULL;
PerRegionTable* volatile PerRegionTable::_free_list = NULL;
size_t OtherRegionsTable::_max_fine_entries = 0;
size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;

View File

@ -283,7 +283,7 @@ size_t RSHashTable::mem_size() const {
// ----------------------------------------------------------------------
SparsePRT* SparsePRT::_head_expanded_list = NULL;
SparsePRT* volatile SparsePRT::_head_expanded_list = NULL;
void SparsePRT::add_to_expanded_list(SparsePRT* sprt) {
// We could expand multiple times in a pause -- only put on list once.

View File

@ -250,7 +250,7 @@ class SparsePRT VALUE_OBJ_CLASS_SPEC {
bool should_be_on_expanded_list();
static SparsePRT* _head_expanded_list;
static SparsePRT* volatile _head_expanded_list;
public:
SparsePRT(HeapRegion* hr);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,7 +51,7 @@ class MutableSpace: public ImmutableSpace {
MemRegion _last_setup_region;
size_t _alignment;
protected:
HeapWord* _top;
HeapWord* volatile _top;
MutableSpaceMangler* mangler() { return _mangler; }
@ -69,7 +69,7 @@ class MutableSpace: public ImmutableSpace {
HeapWord* top() const { return _top; }
virtual void set_top(HeapWord* value) { _top = value; }
HeapWord** top_addr() { return &_top; }
HeapWord* volatile* top_addr() { return &_top; }
HeapWord** end_addr() { return &_end; }
virtual void set_bottom(HeapWord* value) { _bottom = value; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -175,7 +175,7 @@ class ParallelScavengeHeap : public CollectedHeap {
bool supports_inline_contig_alloc() const { return !UseNUMA; }
HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
HeapWord* volatile* top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord* volatile*)-1; }
HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
void ensure_parsability(bool retire_tlabs);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -162,7 +162,7 @@ class PSYoungGen : public CHeapObj<mtGC> {
return result;
}
HeapWord** top_addr() const { return eden_space()->top_addr(); }
HeapWord* volatile* top_addr() const { return eden_space()->top_addr(); }
HeapWord** end_addr() const { return eden_space()->end_addr(); }
// Iteration.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,8 @@
#define SHARE_VM_GC_PARALLEL_VMSTRUCTS_PARALLELGC_HPP
#define VM_STRUCTS_PARALLELGC(nonstatic_field, \
static_field) \
volatile_nonstatic_field, \
static_field) \
\
/**********************/ \
/* Parallel GC fields */ \
@ -40,7 +41,7 @@
nonstatic_field(ImmutableSpace, _bottom, HeapWord*) \
nonstatic_field(ImmutableSpace, _end, HeapWord*) \
\
nonstatic_field(MutableSpace, _top, HeapWord*) \
volatile_nonstatic_field(MutableSpace, _top, HeapWord*) \
\
nonstatic_field(PSYoungGen, _reserved, MemRegion) \
nonstatic_field(PSYoungGen, _virtual_space, PSVirtualSpace*) \

View File

@ -512,7 +512,7 @@ size_t DefNewGeneration::contiguous_available() const {
}
HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
HeapWord* volatile* DefNewGeneration::top_addr() const { return eden()->top_addr(); }
HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
void DefNewGeneration::object_iterate(ObjectClosure* blk) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -225,7 +225,7 @@ protected:
size_t max_survivor_size() const { return _max_survivor_size; }
bool supports_inline_contig_alloc() const { return true; }
HeapWord** top_addr() const;
HeapWord* volatile* top_addr() const;
HeapWord** end_addr() const;
// Thread-local allocation buffers

View File

@ -350,7 +350,7 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// These functions return the addresses of the fields that define the
// boundaries of the contiguous allocation area. (These fields should be
// physically near to one another.)
virtual HeapWord** top_addr() const {
virtual HeapWord* volatile* top_addr() const {
guarantee(false, "inline contiguous allocation not supported");
return NULL;
}

View File

@ -721,7 +721,7 @@ bool GenCollectedHeap::supports_inline_contig_alloc() const {
return _young_gen->supports_inline_contig_alloc();
}
HeapWord** GenCollectedHeap::top_addr() const {
HeapWord* volatile* GenCollectedHeap::top_addr() const {
return _young_gen->top_addr();
}

View File

@ -184,7 +184,7 @@ public:
// We may support a shared contiguous allocation area, if the youngest
// generation does.
bool supports_inline_contig_alloc() const;
HeapWord** top_addr() const;
HeapWord* volatile* top_addr() const;
HeapWord** end_addr() const;
// Perform a full collection of the heap; intended for use in implementing

View File

@ -263,7 +263,7 @@ class Generation: public CHeapObj<mtGC> {
// These functions return the addresses of the fields that define the
// boundaries of the contiguous allocation area. (These fields should be
// physically near to one another.)
virtual HeapWord** top_addr() const { return NULL; }
virtual HeapWord* volatile* top_addr() const { return NULL; }
virtual HeapWord** end_addr() const { return NULL; }
// Thread-local allocation buffers

View File

@ -112,7 +112,7 @@ uintptr_t CompilerToVM::Data::Universe_verify_oop_bits;
bool CompilerToVM::Data::_supports_inline_contig_alloc;
HeapWord** CompilerToVM::Data::_heap_end_addr;
HeapWord** CompilerToVM::Data::_heap_top_addr;
HeapWord* volatile* CompilerToVM::Data::_heap_top_addr;
int CompilerToVM::Data::_max_oop_map_stack_offset;
jbyte* CompilerToVM::Data::cardtable_start_address;
@ -153,7 +153,7 @@ void CompilerToVM::Data::initialize() {
_supports_inline_contig_alloc = Universe::heap()->supports_inline_contig_alloc();
_heap_end_addr = _supports_inline_contig_alloc ? Universe::heap()->end_addr() : (HeapWord**) -1;
_heap_top_addr = _supports_inline_contig_alloc ? Universe::heap()->top_addr() : (HeapWord**) -1;
_heap_top_addr = _supports_inline_contig_alloc ? Universe::heap()->top_addr() : (HeapWord* volatile*) -1;
_max_oop_map_stack_offset = (OopMapValue::register_mask - VMRegImpl::stack2reg(0)->value()) * VMRegImpl::stack_slot_size;
int max_oop_map_stack_index = _max_oop_map_stack_offset / VMRegImpl::stack_slot_size;
@ -1604,4 +1604,3 @@ JNINativeMethod CompilerToVM::methods[] = {
int CompilerToVM::methods_count() {
return sizeof(methods) / sizeof(JNINativeMethod);
}

View File

@ -58,7 +58,7 @@ class CompilerToVM {
static bool _supports_inline_contig_alloc;
static HeapWord** _heap_end_addr;
static HeapWord** _heap_top_addr;
static HeapWord* volatile* _heap_top_addr;
static int _max_oop_map_stack_offset;
static jbyte* cardtable_start_address;

View File

@ -69,7 +69,7 @@
\
static_field(CompilerToVM::Data, _supports_inline_contig_alloc, bool) \
static_field(CompilerToVM::Data, _heap_end_addr, HeapWord**) \
static_field(CompilerToVM::Data, _heap_top_addr, HeapWord**) \
static_field(CompilerToVM::Data, _heap_top_addr, HeapWord* volatile*) \
\
static_field(CompilerToVM::Data, _max_oop_map_stack_offset, int) \
\

View File

@ -2970,6 +2970,7 @@ VMStructEntry VMStructs::localHotSpotVMStructs[] = {
#if INCLUDE_ALL_GCS
VM_STRUCTS_PARALLELGC(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
GENERATE_STATIC_VM_STRUCT_ENTRY)
VM_STRUCTS_CMS(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
@ -2982,7 +2983,7 @@ VMStructEntry VMStructs::localHotSpotVMStructs[] = {
#if INCLUDE_TRACE
VM_STRUCTS_TRACE(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
GENERATE_STATIC_VM_STRUCT_ENTRY)
GENERATE_STATIC_VM_STRUCT_ENTRY)
#endif
VM_STRUCTS_EXT(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
@ -3168,11 +3169,12 @@ VMStructs::init() {
#if INCLUDE_ALL_GCS
VM_STRUCTS_PARALLELGC(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
CHECK_STATIC_VM_STRUCT_ENTRY);
CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY,
CHECK_STATIC_VM_STRUCT_ENTRY);
VM_STRUCTS_CMS(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY,
CHECK_STATIC_VM_STRUCT_ENTRY);
CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY,
CHECK_STATIC_VM_STRUCT_ENTRY);
VM_STRUCTS_G1(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
CHECK_STATIC_VM_STRUCT_ENTRY);
@ -3181,7 +3183,7 @@ VMStructs::init() {
#if INCLUDE_TRACE
VM_STRUCTS_TRACE(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
CHECK_STATIC_VM_STRUCT_ENTRY);
CHECK_STATIC_VM_STRUCT_ENTRY);
#endif
VM_STRUCTS_EXT(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
@ -3293,6 +3295,7 @@ VMStructs::init() {
CHECK_NO_OP));
#if INCLUDE_ALL_GCS
debug_only(VM_STRUCTS_PARALLELGC(ENSURE_FIELD_TYPE_PRESENT,
ENSURE_FIELD_TYPE_PRESENT,
ENSURE_FIELD_TYPE_PRESENT));
debug_only(VM_STRUCTS_CMS(ENSURE_FIELD_TYPE_PRESENT,
ENSURE_FIELD_TYPE_PRESENT,