2011-02-09 11:08:10 +01:00
/*
2025-04-07 13:04:47 +00:00
* Copyright ( c ) 1999 , 2025 , Oracle and / or its affiliates . All rights reserved .
2013-07-26 00:59:18 +02:00
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER .
*
* This code is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 only , as
* published by the Free Software Foundation .
*
* This code is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License
* version 2 for more details ( a copy is included in the LICENSE file that
* accompanied this code ) .
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work ; if not , write to the Free Software Foundation ,
* Inc . , 51 Franklin St , Fifth Floor , Boston , MA 02110 - 1301 USA .
*
* Please contact Oracle , 500 Oracle Parkway , Redwood Shores , CA 94065 USA
* or visit www . oracle . com if you need additional information or have any
* questions .
*
*/
2011-02-09 11:08:10 +01:00
2021-03-15 02:03:22 +00:00
# include "classfile/classLoader.hpp"
2022-11-16 12:51:26 +00:00
# include "jvm.h"
2021-01-27 01:18:52 +00:00
# include "jvmtifiles/jvmti.h"
2018-03-23 18:54:12 +01:00
# include "logging/log.hpp"
2018-03-08 09:56:29 +01:00
# include "memory/allocation.inline.hpp"
2023-10-26 14:03:49 +00:00
# include "nmt/memTracker.hpp"
2019-02-05 20:18:00 -05:00
# include "os_posix.inline.hpp"
2021-04-07 05:37:37 +00:00
# include "runtime/arguments.hpp"
2019-11-27 11:31:46 +01:00
# include "runtime/atomic.hpp"
2023-10-26 14:03:49 +00:00
# include "runtime/frame.inline.hpp"
# include "runtime/globals_extension.hpp"
# include "runtime/interfaceSupport.inline.hpp"
2020-08-26 14:42:07 -07:00
# include "runtime/java.hpp"
2019-11-26 10:47:46 +01:00
# include "runtime/orderAccess.hpp"
2023-10-26 14:03:49 +00:00
# include "runtime/osThread.hpp"
2022-07-02 14:45:10 +00:00
# include "runtime/park.hpp"
2021-03-15 02:03:22 +00:00
# include "runtime/perfMemory.hpp"
2023-10-26 14:03:49 +00:00
# include "runtime/sharedRuntime.hpp"
# include "services/attachListener.hpp"
2017-07-05 11:33:17 +02:00
# include "utilities/align.hpp"
2023-08-23 22:32:37 +00:00
# include "utilities/checkedCast.hpp"
2023-07-01 08:45:24 +00:00
# include "utilities/debug.hpp"
2022-07-12 12:10:28 +00:00
# include "utilities/defaultStream.hpp"
2019-03-08 11:23:30 +01:00
# include "utilities/events.hpp"
2018-03-23 18:54:12 +01:00
# include "utilities/formatBuffer.hpp"
2022-07-05 04:26:45 +00:00
# include "utilities/globalDefinitions.hpp"
2016-07-13 12:23:05 +02:00
# include "utilities/macros.hpp"
2025-04-24 11:43:53 +00:00
# include "utilities/permitForbiddenFunctions.hpp"
2011-02-09 11:08:10 +01:00
# include "utilities/vmError.hpp"
2023-08-16 07:39:42 +00:00
# if INCLUDE_JFR
2023-11-02 12:17:35 +00:00
# include "jfr/support/jfrNativeLibraryLoadEvent.hpp"
2023-08-16 07:39:42 +00:00
# endif
2023-02-01 07:04:26 +00:00
# ifdef AIX
# include "loadlib_aix.hpp"
2024-01-11 13:12:32 +00:00
# include "os_aix.hpp"
2023-02-01 07:04:26 +00:00
# endif
2022-08-04 01:20:29 +00:00
# ifdef LINUX
# include "os_linux.hpp"
# endif
2011-02-09 11:08:10 +01:00
2018-07-17 15:59:47 -04:00
# include <dirent.h>
2017-05-30 17:14:52 -04:00
# include <dlfcn.h>
2018-11-15 17:08:59 +01:00
# include <grp.h>
2022-04-27 16:50:22 +00:00
# include <locale.h>
2021-04-22 05:44:39 +00:00
# include <netdb.h>
2018-11-15 17:08:59 +01:00
# include <pwd.h>
2013-08-29 11:05:55 +02:00
# include <pthread.h>
# include <signal.h>
2017-11-29 17:03:10 -08:00
# include <sys/mman.h>
2017-05-30 17:14:52 -04:00
# include <sys/resource.h>
2021-04-22 05:44:39 +00:00
# include <sys/socket.h>
2021-11-01 05:13:55 +00:00
# include <spawn.h>
2021-09-23 05:34:18 +00:00
# include <sys/time.h>
# include <sys/times.h>
2021-03-09 06:00:46 +00:00
# include <sys/types.h>
2017-05-30 17:14:52 -04:00
# include <sys/utsname.h>
2021-03-09 06:00:46 +00:00
# include <sys/wait.h>
2017-05-30 17:14:52 -04:00
# include <time.h>
# include <unistd.h>
2019-12-03 15:17:59 +01:00
# include <utmpx.h>
2012-05-10 15:44:19 +02:00
2021-03-09 06:00:46 +00:00
# ifdef __APPLE__
# include <crt_externs.h>
# endif
2018-05-31 14:09:04 -07:00
# define ROOT_UID 0
2017-11-29 17:03:10 -08:00
# ifndef MAP_ANONYMOUS
# define MAP_ANONYMOUS MAP_ANON
# endif
2024-06-24 13:33:20 +00:00
/* Input/Output types for mincore(2) */
typedef LINUX_ONLY ( unsigned ) char mincore_vec_t ;
2021-09-23 05:34:18 +00:00
static jlong initial_time_count = 0 ;
static int clock_tics_per_sec = 100 ;
2022-05-03 17:43:12 +00:00
// Platform minimum stack allowed
size_t os : : _os_min_stack_allowed = PTHREAD_STACK_MIN ;
2011-02-09 11:08:10 +01:00
// Check core dump limit and report possible place where core can be found
2024-10-16 15:32:07 +00:00
void os : : check_core_dump_prerequisites ( char * buffer , size_t bufferSize , bool check_only ) {
2016-05-09 14:15:39 +03:00
if ( ! FLAG_IS_DEFAULT ( CreateCoredumpOnCrash ) & & ! CreateCoredumpOnCrash ) {
jio_snprintf ( buffer , bufferSize , " CreateCoredumpOnCrash is disabled from command line " ) ;
VMError : : record_coredump_status ( buffer , false ) ;
2024-10-16 15:32:07 +00:00
} else {
struct rlimit rlim ;
bool success = true ;
bool warn = true ;
char core_path [ PATH_MAX ] ;
if ( get_core_path ( core_path , PATH_MAX ) < = 0 ) {
jio_snprintf ( buffer , bufferSize , " core.%d (may not exist) " , current_process_id ( ) ) ;
2014-12-14 20:18:17 -05:00
# ifdef LINUX
2024-10-16 15:32:07 +00:00
} else if ( core_path [ 0 ] = = ' " ' ) { // redirect to user process
jio_snprintf ( buffer , bufferSize , " Core dumps may be processed with %s " , core_path ) ;
2014-12-14 20:18:17 -05:00
# endif
2024-10-16 15:32:07 +00:00
} else if ( getrlimit ( RLIMIT_CORE , & rlim ) ! = 0 ) {
jio_snprintf ( buffer , bufferSize , " %s (may not exist) " , core_path ) ;
} else {
switch ( rlim . rlim_cur ) {
case RLIM_INFINITY :
jio_snprintf ( buffer , bufferSize , " %s " , core_path ) ;
warn = false ;
break ;
case 0 :
jio_snprintf ( buffer , bufferSize , " Core dumps have been disabled. To enable core dumping, try \" ulimit -c unlimited \" before starting Java again " ) ;
success = false ;
break ;
default :
jio_snprintf ( buffer , bufferSize , " %s (max size " UINT64_FORMAT " k). To ensure a full core dump, try \" ulimit -c unlimited \" before starting Java again " , core_path , uint64_t ( rlim . rlim_cur ) / K ) ;
break ;
}
}
if ( ! check_only ) {
VMError : : record_coredump_status ( buffer , success ) ;
} else if ( warn ) {
warning ( " CreateCoredumpOnCrash specified, but %s " , buffer ) ;
2011-02-09 11:08:10 +01:00
}
}
}
2024-06-24 13:33:20 +00:00
bool os : : committed_in_range ( address start , size_t size , address & committed_start , size_t & committed_size ) {
# ifdef _AIX
committed_start = start ;
committed_size = size ;
return true ;
# else
int mincore_return_value ;
constexpr size_t stripe = 1024 ; // query this many pages each time
mincore_vec_t vec [ stripe + 1 ] ;
// set a guard
DEBUG_ONLY ( vec [ stripe ] = ' X ' ) ;
size_t page_sz = os : : vm_page_size ( ) ;
uintx pages = size / page_sz ;
assert ( is_aligned ( start , page_sz ) , " Start address must be page aligned " ) ;
assert ( is_aligned ( size , page_sz ) , " Size must be page aligned " ) ;
committed_start = nullptr ;
int loops = checked_cast < int > ( ( pages + stripe - 1 ) / stripe ) ;
int committed_pages = 0 ;
address loop_base = start ;
bool found_range = false ;
for ( int index = 0 ; index < loops & & ! found_range ; index + + ) {
assert ( pages > 0 , " Nothing to do " ) ;
uintx pages_to_query = ( pages > = stripe ) ? stripe : pages ;
pages - = pages_to_query ;
// Get stable read
int fail_count = 0 ;
while ( ( mincore_return_value = mincore ( loop_base , pages_to_query * page_sz , vec ) ) = = - 1 & & errno = = EAGAIN ) {
if ( + + fail_count = = 1000 ) {
return false ;
}
}
// During shutdown, some memory goes away without properly notifying NMT,
// E.g. ConcurrentGCThread/WatcherThread can exit without deleting thread object.
// Bailout and return as not committed for now.
if ( mincore_return_value = = - 1 & & errno = = ENOMEM ) {
return false ;
}
// If mincore is not supported.
if ( mincore_return_value = = - 1 & & errno = = ENOSYS ) {
return false ;
}
assert ( vec [ stripe ] = = ' X ' , " overflow guard " ) ;
assert ( mincore_return_value = = 0 , " Range must be valid " ) ;
// Process this stripe
for ( uintx vecIdx = 0 ; vecIdx < pages_to_query ; vecIdx + + ) {
if ( ( vec [ vecIdx ] & 0x01 ) = = 0 ) { // not committed
// End of current contiguous region
if ( committed_start ! = nullptr ) {
found_range = true ;
break ;
}
} else { // committed
// Start of region
if ( committed_start = = nullptr ) {
committed_start = loop_base + page_sz * vecIdx ;
}
committed_pages + + ;
}
}
loop_base + = pages_to_query * page_sz ;
}
if ( committed_start ! = nullptr ) {
assert ( committed_pages > 0 , " Must have committed region " ) ;
assert ( committed_pages < = int ( size / page_sz ) , " Can not commit more than it has " ) ;
assert ( committed_start > = start & & committed_start < start + size , " Out of range " ) ;
committed_size = page_sz * committed_pages ;
return true ;
} else {
assert ( committed_pages = = 0 , " Should not have committed region " ) ;
return false ;
}
# endif
}
2014-08-07 12:18:58 -07:00
int os : : get_native_stack ( address * stack , int frames , int toSkip ) {
int frame_idx = 0 ;
int num_of_frames ; // number of frames captured
2012-06-28 17:03:16 -04:00
frame fr = os : : current_frame ( ) ;
2014-08-07 12:18:58 -07:00
while ( fr . pc ( ) & & frame_idx < frames ) {
if ( toSkip > 0 ) {
toSkip - - ;
} else {
stack [ frame_idx + + ] = fr . pc ( ) ;
}
2023-02-10 14:02:20 +00:00
if ( fr . fp ( ) = = nullptr | | fr . cb ( ) ! = nullptr | |
2023-10-19 13:56:16 +00:00
fr . sender_pc ( ) = = nullptr | | os : : is_first_C_frame ( & fr ) ) {
2014-08-07 12:18:58 -07:00
break ;
}
2023-10-19 13:56:16 +00:00
fr = os : : get_sender_for_C_frame ( & fr ) ;
2012-06-28 17:03:16 -04:00
}
2014-08-07 12:18:58 -07:00
num_of_frames = frame_idx ;
for ( ; frame_idx < frames ; frame_idx + + ) {
2023-02-10 14:02:20 +00:00
stack [ frame_idx ] = nullptr ;
2012-06-28 17:03:16 -04:00
}
2014-08-07 12:18:58 -07:00
return num_of_frames ;
}
2012-01-03 15:11:31 -05:00
int os : : get_last_error ( ) {
return errno ;
}
2018-07-03 15:08:01 -04:00
size_t os : : lasterror ( char * buf , size_t len ) {
if ( errno = = 0 ) return 0 ;
const char * s = os : : strerror ( errno ) ;
size_t n = : : strlen ( s ) ;
if ( n > = len ) {
n = len - 1 ;
}
: : strncpy ( buf , s , n ) ;
buf [ n ] = ' \0 ' ;
return n ;
}
2024-03-21 08:16:50 +00:00
////////////////////////////////////////////////////////////////////////////////
// breakpoint support
void os : : breakpoint ( ) {
BREAKPOINT ;
}
extern " C " void breakpoint ( ) {
// use debugger to set breakpoint here
}
2021-09-20 08:29:12 +00:00
// Return true if user is running as root.
bool os : : have_special_privileges ( ) {
static bool privileges = ( getuid ( ) ! = geteuid ( ) ) | | ( getgid ( ) ! = getegid ( ) ) ;
return privileges ;
}
2011-02-28 14:19:52 +01:00
void os : : wait_for_keypress_at_exit ( void ) {
// don't do anything on posix platforms
return ;
}
2012-05-10 15:44:19 +02:00
2017-11-29 17:03:10 -08:00
int os : : create_file_for_heap ( const char * dir ) {
2020-01-08 08:53:28 +09:00
int fd ;
2017-11-29 17:03:10 -08:00
2020-01-08 08:53:28 +09:00
# if defined(LINUX) && defined(O_TMPFILE)
char * native_dir = os : : strdup ( dir ) ;
2023-02-10 14:02:20 +00:00
if ( native_dir = = nullptr ) {
2020-01-08 08:53:28 +09:00
vm_exit_during_initialization ( err_msg ( " strdup failed during creation of backing file for heap (%s) " , os : : strerror ( errno ) ) ) ;
2017-11-29 17:03:10 -08:00
return - 1 ;
}
2020-01-08 08:53:28 +09:00
os : : native_path ( native_dir ) ;
fd = os : : open ( dir , O_TMPFILE | O_RDWR , S_IRUSR | S_IWUSR ) ;
os : : free ( native_dir ) ;
if ( fd = = - 1 )
# endif
{
const char name_template [ ] = " /jvmheap.XXXXXX " ;
size_t fullname_len = strlen ( dir ) + strlen ( name_template ) ;
char * fullname = ( char * ) os : : malloc ( fullname_len + 1 , mtInternal ) ;
2023-02-10 14:02:20 +00:00
if ( fullname = = nullptr ) {
2020-01-08 08:53:28 +09:00
vm_exit_during_initialization ( err_msg ( " Malloc failed during creation of backing file for heap (%s) " , os : : strerror ( errno ) ) ) ;
return - 1 ;
}
int n = snprintf ( fullname , fullname_len + 1 , " %s%s " , dir , name_template ) ;
assert ( ( size_t ) n = = fullname_len , " Unexpected number of characters in string " ) ;
2017-11-29 17:03:10 -08:00
2020-01-08 08:53:28 +09:00
os : : native_path ( fullname ) ;
2017-11-29 17:03:10 -08:00
2020-01-08 08:53:28 +09:00
// create a new file.
fd = mkstemp ( fullname ) ;
2017-11-29 17:03:10 -08:00
2020-01-08 08:53:28 +09:00
if ( fd < 0 ) {
warning ( " Could not create file for heap with template %s " , fullname ) ;
os : : free ( fullname ) ;
return - 1 ;
} else {
// delete the name from the filesystem. When 'fd' is closed, the file (and space) will be deleted.
int ret = unlink ( fullname ) ;
assert_with_errno ( ret = = 0 , " unlink returned error " ) ;
}
2017-11-29 17:03:10 -08:00
os : : free ( fullname ) ;
}
return fd ;
}
2024-10-14 10:34:19 +00:00
// return current position of file pointer
jlong os : : current_file_offset ( int fd ) {
return ( jlong ) : : lseek ( fd , ( off_t ) 0 , SEEK_CUR ) ;
}
// move file pointer to the specified offset
jlong os : : seek_to_file_offset ( int fd , jlong offset ) {
return ( jlong ) : : lseek ( fd , ( off_t ) offset , SEEK_SET ) ;
}
2022-05-18 16:45:14 +00:00
// Is a (classpath) directory empty?
bool os : : dir_is_empty ( const char * path ) {
2023-02-10 14:02:20 +00:00
DIR * dir = nullptr ;
2022-05-18 16:45:14 +00:00
struct dirent * ptr ;
dir = : : opendir ( path ) ;
2023-02-10 14:02:20 +00:00
if ( dir = = nullptr ) return true ;
2022-05-18 16:45:14 +00:00
// Scan the directory
bool result = true ;
2023-02-10 14:02:20 +00:00
while ( result & & ( ptr = : : readdir ( dir ) ) ! = nullptr ) {
2022-05-18 16:45:14 +00:00
if ( strcmp ( ptr - > d_name , " . " ) ! = 0 & & strcmp ( ptr - > d_name , " .. " ) ! = 0 ) {
result = false ;
}
}
: : closedir ( dir ) ;
return result ;
}
2024-09-17 19:59:06 +00:00
static char * reserve_mmapped_memory ( size_t bytes , char * requested_addr , MemTag mem_tag ) {
2017-11-29 17:03:10 -08:00
char * addr ;
2017-12-01 11:26:22 -05:00
int flags = MAP_PRIVATE NOT_AIX ( | MAP_NORESERVE ) | MAP_ANONYMOUS ;
2023-02-10 14:02:20 +00:00
if ( requested_addr ! = nullptr ) {
2017-11-29 17:03:10 -08:00
assert ( ( uintptr_t ) requested_addr % os : : vm_page_size ( ) = = 0 , " Requested address should be aligned to OS page size " ) ;
flags | = MAP_FIXED ;
}
// Map reserved/uncommitted pages PROT_NONE so we fail early if we
// touch an uncommitted page. Otherwise, the read/write might
// succeed if we have enough swap space to back the physical page.
addr = ( char * ) : : mmap ( requested_addr , bytes , PROT_NONE ,
flags , - 1 , 0 ) ;
if ( addr ! = MAP_FAILED ) {
2024-09-17 19:59:06 +00:00
MemTracker : : record_virtual_memory_reserve ( ( address ) addr , bytes , CALLER_PC , mem_tag ) ;
2017-11-29 17:03:10 -08:00
return addr ;
}
2023-02-10 14:02:20 +00:00
return nullptr ;
2017-11-29 17:03:10 -08:00
}
static int util_posix_fallocate ( int fd , off_t offset , off_t len ) {
2024-01-20 09:49:44 +00:00
static_assert ( sizeof ( off_t ) = = 8 , " Expected Large File Support in this file " ) ;
2017-11-29 17:03:10 -08:00
# ifdef __APPLE__
fstore_t store = { F_ALLOCATECONTIG , F_PEOFPOSMODE , 0 , len } ;
// First we try to get a continuous chunk of disk space
int ret = fcntl ( fd , F_PREALLOCATE , & store ) ;
if ( ret = = - 1 ) {
// Maybe we are too fragmented, try to allocate non-continuous range
store . fst_flags = F_ALLOCATEALL ;
ret = fcntl ( fd , F_PREALLOCATE , & store ) ;
}
if ( ret ! = - 1 ) {
return ftruncate ( fd , len ) ;
}
return - 1 ;
# else
return posix_fallocate ( fd , offset , len ) ;
# endif
}
// Map the given address range to the provided file descriptor.
char * os : : map_memory_to_file ( char * base , size_t size , int fd ) {
assert ( fd ! = - 1 , " File descriptor is not valid " ) ;
// allocate space for the file
2017-12-07 10:21:13 -08:00
int ret = util_posix_fallocate ( fd , 0 , ( off_t ) size ) ;
if ( ret ! = 0 ) {
vm_exit_during_initialization ( err_msg ( " Error in mapping Java heap at the given filesystem directory. error(%d) " , ret ) ) ;
2023-02-10 14:02:20 +00:00
return nullptr ;
2017-11-29 17:03:10 -08:00
}
int prot = PROT_READ | PROT_WRITE ;
int flags = MAP_SHARED ;
2023-02-10 14:02:20 +00:00
if ( base ! = nullptr ) {
2017-11-29 17:03:10 -08:00
flags | = MAP_FIXED ;
}
char * addr = ( char * ) mmap ( base , size , prot , flags , fd , 0 ) ;
if ( addr = = MAP_FAILED ) {
2017-12-07 10:21:13 -08:00
warning ( " Failed mmap to file. (%s) " , os : : strerror ( errno ) ) ;
2023-02-10 14:02:20 +00:00
return nullptr ;
2017-11-29 17:03:10 -08:00
}
2023-02-10 14:02:20 +00:00
if ( base ! = nullptr & & addr ! = base ) {
2017-11-29 17:03:10 -08:00
if ( ! os : : release_memory ( addr , size ) ) {
warning ( " Could not release memory on unsuccessful file mapping " ) ;
2017-12-07 10:21:13 -08:00
}
2023-02-10 14:02:20 +00:00
return nullptr ;
2017-11-29 17:03:10 -08:00
}
return addr ;
}
char * os : : replace_existing_mapping_with_file_mapping ( char * base , size_t size , int fd ) {
assert ( fd ! = - 1 , " File descriptor is not valid " ) ;
2023-02-10 14:02:20 +00:00
assert ( base ! = nullptr , " Base cannot be null " ) ;
2017-11-29 17:03:10 -08:00
return map_memory_to_file ( base , size , fd ) ;
}
2020-10-27 20:37:01 +00:00
static size_t calculate_aligned_extra_size ( size_t size , size_t alignment ) {
2023-11-29 13:16:38 +00:00
assert ( is_aligned ( alignment , os : : vm_allocation_granularity ( ) ) ,
2012-12-17 08:49:20 +01:00
" Alignment must be a multiple of allocation granularity (page size) " ) ;
2023-11-29 13:16:38 +00:00
assert ( is_aligned ( size , os : : vm_allocation_granularity ( ) ) ,
" Size must be a multiple of allocation granularity (page size) " ) ;
2012-12-17 08:49:20 +01:00
size_t extra_size = size + alignment ;
assert ( extra_size > = size , " overflow, size is too large to allow alignment " ) ;
2020-10-27 20:37:01 +00:00
return extra_size ;
}
2012-12-17 08:49:20 +01:00
2020-10-27 20:37:01 +00:00
// After a bigger chunk was mapped, unmaps start and end parts to get the requested alignment.
static char * chop_extra_memory ( size_t size , size_t alignment , char * extra_base , size_t extra_size ) {
2012-12-17 08:49:20 +01:00
// Do manual alignment
2017-07-04 15:58:10 +02:00
char * aligned_base = align_up ( extra_base , alignment ) ;
2012-12-17 08:49:20 +01:00
// [ | | ]
// ^ extra_base
// ^ extra_base + begin_offset == aligned_base
// extra_base + begin_offset + size ^
// extra_base + extra_size ^
// |<>| == begin_offset
// end_offset == |<>|
size_t begin_offset = aligned_base - extra_base ;
size_t end_offset = ( extra_base + extra_size ) - ( aligned_base + size ) ;
if ( begin_offset > 0 ) {
os : : release_memory ( extra_base , begin_offset ) ;
}
if ( end_offset > 0 ) {
os : : release_memory ( extra_base + begin_offset + size , end_offset ) ;
}
2020-10-27 20:37:01 +00:00
return aligned_base ;
}
// Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
// so on posix, unmap the section at the start and at the end of the chunk that we mapped
// rather than unmapping and remapping the whole chunk to get requested alignment.
2025-04-14 18:47:59 +00:00
char * os : : reserve_memory_aligned ( size_t size , size_t alignment , bool exec ) {
2020-10-27 20:37:01 +00:00
size_t extra_size = calculate_aligned_extra_size ( size , alignment ) ;
2025-04-14 18:47:59 +00:00
char * extra_base = os : : reserve_memory ( extra_size , exec ) ;
2023-02-10 14:02:20 +00:00
if ( extra_base = = nullptr ) {
return nullptr ;
2020-10-27 20:37:01 +00:00
}
return chop_extra_memory ( size , alignment , extra_base , extra_size ) ;
}
2024-09-17 19:59:06 +00:00
char * os : : map_memory_to_file_aligned ( size_t size , size_t alignment , int file_desc , MemTag mem_tag ) {
2020-10-27 20:37:01 +00:00
size_t extra_size = calculate_aligned_extra_size ( size , alignment ) ;
// For file mapping, we do not call os:map_memory_to_file(size,fd) since:
// - we later chop away parts of the mapping using os::release_memory and that could fail if the
// original mmap call had been tied to an fd.
// - The memory API os::reserve_memory uses is an implementation detail. It may (and usually is)
// mmap but it also may System V shared memory which cannot be uncommitted as a whole, so
// chopping off and unmapping excess bits back and front (see below) would not work.
2024-09-17 19:59:06 +00:00
char * extra_base = reserve_mmapped_memory ( extra_size , nullptr , mem_tag ) ;
2023-02-10 14:02:20 +00:00
if ( extra_base = = nullptr ) {
return nullptr ;
2020-10-27 20:37:01 +00:00
}
char * aligned_base = chop_extra_memory ( size , alignment , extra_base , extra_size ) ;
// After we have an aligned address, we can replace anonymous mapping with file mapping
2023-02-10 14:02:20 +00:00
if ( replace_existing_mapping_with_file_mapping ( aligned_base , size , file_desc ) = = nullptr ) {
2020-10-27 20:37:01 +00:00
vm_exit_during_initialization ( err_msg ( " Error in mapping Java heap at the given filesystem directory " ) ) ;
2017-11-29 17:03:10 -08:00
}
2024-05-03 10:17:11 +00:00
MemTracker : : record_virtual_memory_commit ( ( address ) aligned_base , size , CALLER_PC ) ;
2012-12-17 08:49:20 +01:00
return aligned_base ;
}
2016-04-22 14:57:18 +02:00
int os : : get_fileno ( FILE * fp ) {
return NOT_AIX ( : : ) fileno ( fp ) ;
2016-04-11 12:22:09 +02:00
}
2016-10-21 10:18:11 +02:00
struct tm * os : : gmtime_pd ( const time_t * clock , struct tm * res ) {
return gmtime_r ( clock , res ) ;
}
2012-05-10 15:44:19 +02:00
void os : : Posix : : print_load_average ( outputStream * st ) {
2020-06-10 08:58:43 +02:00
st - > print ( " load average: " ) ;
2012-05-10 15:44:19 +02:00
double loadavg [ 3 ] ;
2019-10-08 09:33:57 +02:00
int res = os : : loadavg ( loadavg , 3 ) ;
if ( res ! = - 1 ) {
st - > print ( " %0.02f %0.02f %0.02f " , loadavg [ 0 ] , loadavg [ 1 ] , loadavg [ 2 ] ) ;
} else {
st - > print ( " Unavailable " ) ;
}
2012-05-10 15:44:19 +02:00
st - > cr ( ) ;
}
2019-12-03 15:17:59 +01:00
// boot/uptime information;
// unfortunately it does not work on macOS and Linux because the utx chain has no entry
// for reboot at least on my test machines
void os : : Posix : : print_uptime_info ( outputStream * st ) {
int bootsec = - 1 ;
2023-08-15 11:05:31 +00:00
time_t currsec = time ( nullptr ) ;
2019-12-03 15:17:59 +01:00
struct utmpx * ent ;
setutxent ( ) ;
while ( ( ent = getutxent ( ) ) ) {
if ( ! strcmp ( " system boot " , ent - > ut_line ) ) {
2025-04-07 13:04:47 +00:00
bootsec = ( int ) ent - > ut_tv . tv_sec ;
2019-12-03 15:17:59 +01:00
break ;
}
}
if ( bootsec ! = - 1 ) {
2023-08-15 11:05:31 +00:00
os : : print_dhm ( st , " OS uptime: " , currsec - bootsec ) ;
2019-12-03 15:17:59 +01:00
}
}
2020-04-17 08:39:41 +02:00
static void print_rlimit ( outputStream * st , const char * msg ,
int resource , bool output_k = false ) {
2012-05-10 15:44:19 +02:00
struct rlimit rlim ;
2020-04-17 08:39:41 +02:00
st - > print ( " %s " , msg ) ;
int res = getrlimit ( resource , & rlim ) ;
if ( res = = - 1 ) {
st - > print ( " could not obtain value " ) ;
} else {
// soft limit
if ( rlim . rlim_cur = = RLIM_INFINITY ) { st - > print ( " infinity " ) ; }
else {
2022-10-31 09:09:43 +00:00
if ( output_k ) { st - > print ( UINT64_FORMAT " k " , uint64_t ( rlim . rlim_cur ) / K ) ; }
2020-04-17 08:39:41 +02:00
else { st - > print ( UINT64_FORMAT , uint64_t ( rlim . rlim_cur ) ) ; }
}
// hard limit
st - > print ( " / " ) ;
if ( rlim . rlim_max = = RLIM_INFINITY ) { st - > print ( " infinity " ) ; }
else {
2022-10-31 09:09:43 +00:00
if ( output_k ) { st - > print ( UINT64_FORMAT " k " , uint64_t ( rlim . rlim_max ) / K ) ; }
2020-04-17 08:39:41 +02:00
else { st - > print ( UINT64_FORMAT , uint64_t ( rlim . rlim_max ) ) ; }
}
}
}
2012-05-10 15:44:19 +02:00
2020-04-17 08:39:41 +02:00
void os : : Posix : : print_rlimit_info ( outputStream * st ) {
st - > print ( " rlimit (soft/hard): " ) ;
print_rlimit ( st , " STACK " , RLIMIT_STACK , true ) ;
print_rlimit ( st , " , CORE " , RLIMIT_CORE , true ) ;
2012-05-10 15:44:19 +02:00
2017-05-30 15:41:23 -07:00
# if defined(AIX)
st - > print ( " , NPROC " ) ;
2024-04-15 08:46:36 +00:00
st - > print ( " %ld " , sysconf ( _SC_CHILD_MAX ) ) ;
2020-04-17 08:39:41 +02:00
print_rlimit ( st , " , THREADS " , RLIMIT_THREADS ) ;
8244224: Implementation of JEP 381: Remove the Solaris and SPARC Ports
Reviewed-by: alanb, bchristi, dcubed, dfuchs, eosterlund, erikj, glaubitz, ihse, iignatyev, jjiang, kbarrett, ksrini, kvn, naoto, prr, rriggs, serb, sspitsyn, stefank, tschatzl, valeriep, weijun, weijun
2020-05-20 17:33:37 -07:00
# else
2020-04-17 08:39:41 +02:00
print_rlimit ( st , " , NPROC " , RLIMIT_NPROC ) ;
# endif
print_rlimit ( st , " , NOFILE " , RLIMIT_NOFILE ) ;
print_rlimit ( st , " , AS " , RLIMIT_AS , true ) ;
print_rlimit ( st , " , CPU " , RLIMIT_CPU ) ;
print_rlimit ( st , " , DATA " , RLIMIT_DATA , true ) ;
// maximum size of files that the process may create
print_rlimit ( st , " , FSIZE " , RLIMIT_FSIZE , true ) ;
# if defined(LINUX) || defined(__APPLE__)
// maximum number of bytes of memory that may be locked into RAM
// (rounded down to the nearest multiple of system pagesize)
print_rlimit ( st , " , MEMLOCK " , RLIMIT_MEMLOCK , true ) ;
# endif
// MacOS; The maximum size (in bytes) to which a process's resident set size may grow.
# if defined(__APPLE__)
print_rlimit ( st , " , RSS " , RLIMIT_RSS , true ) ;
# endif
2017-05-30 15:41:23 -07:00
2012-05-10 15:44:19 +02:00
st - > cr ( ) ;
}
void os : : Posix : : print_uname_info ( outputStream * st ) {
// kernel
2020-06-10 08:58:43 +02:00
st - > print ( " uname: " ) ;
2012-05-10 15:44:19 +02:00
struct utsname name ;
uname ( & name ) ;
2014-05-09 16:50:54 -04:00
st - > print ( " %s " , name . sysname ) ;
2014-09-08 22:48:09 -07:00
# ifdef ASSERT
st - > print ( " %s " , name . nodename ) ;
# endif
2014-05-09 16:50:54 -04:00
st - > print ( " %s " , name . release ) ;
st - > print ( " %s " , name . version ) ;
st - > print ( " %s " , name . machine ) ;
2012-05-10 15:44:19 +02:00
st - > cr ( ) ;
}
2018-11-15 17:08:59 +01:00
void os : : Posix : : print_umask ( outputStream * st , mode_t umsk ) {
st - > print ( ( umsk & S_IRUSR ) ? " r " : " - " ) ;
st - > print ( ( umsk & S_IWUSR ) ? " w " : " - " ) ;
st - > print ( ( umsk & S_IXUSR ) ? " x " : " - " ) ;
st - > print ( ( umsk & S_IRGRP ) ? " r " : " - " ) ;
st - > print ( ( umsk & S_IWGRP ) ? " w " : " - " ) ;
st - > print ( ( umsk & S_IXGRP ) ? " x " : " - " ) ;
st - > print ( ( umsk & S_IROTH ) ? " r " : " - " ) ;
st - > print ( ( umsk & S_IWOTH ) ? " w " : " - " ) ;
st - > print ( ( umsk & S_IXOTH ) ? " x " : " - " ) ;
}
2022-08-04 01:20:29 +00:00
void os : : print_user_info ( outputStream * st ) {
2018-11-15 17:08:59 +01:00
unsigned id = ( unsigned ) : : getuid ( ) ;
st - > print ( " uid : %u " , id ) ;
id = ( unsigned ) : : geteuid ( ) ;
st - > print ( " euid : %u " , id ) ;
id = ( unsigned ) : : getgid ( ) ;
st - > print ( " gid : %u " , id ) ;
id = ( unsigned ) : : getegid ( ) ;
st - > print_cr ( " egid : %u " , id ) ;
st - > cr ( ) ;
mode_t umsk = : : umask ( 0 ) ;
: : umask ( umsk ) ;
st - > print ( " umask: %04o ( " , ( unsigned ) umsk ) ;
2022-08-04 01:20:29 +00:00
os : : Posix : : print_umask ( st , umsk ) ;
2018-11-15 17:08:59 +01:00
st - > print_cr ( " ) " ) ;
st - > cr ( ) ;
}
2022-04-27 16:50:22 +00:00
// Print all active locale categories, one line each
2022-08-04 01:20:29 +00:00
void os : : print_active_locale ( outputStream * st ) {
2022-04-27 16:50:22 +00:00
st - > print_cr ( " Active Locale: " ) ;
// Posix is quiet about how exactly LC_ALL is implemented.
// Just print it out too, in case LC_ALL is held separately
// from the individual categories.
# define LOCALE_CAT_DO(f) \
f ( LC_ALL ) \
f ( LC_COLLATE ) \
f ( LC_CTYPE ) \
f ( LC_MESSAGES ) \
f ( LC_MONETARY ) \
f ( LC_NUMERIC ) \
f ( LC_TIME )
# define XX(cat) { cat, #cat },
const struct { int c ; const char * name ; } categories [ ] = {
LOCALE_CAT_DO ( XX )
2023-02-10 14:02:20 +00:00
{ - 1 , nullptr }
2022-04-27 16:50:22 +00:00
} ;
# undef XX
# undef LOCALE_CAT_DO
for ( int i = 0 ; categories [ i ] . c ! = - 1 ; i + + ) {
2023-02-10 14:02:20 +00:00
const char * locale = setlocale ( categories [ i ] . c , nullptr ) ;
2022-04-27 16:50:22 +00:00
st - > print_cr ( " %s=%s " , categories [ i ] . name ,
2023-02-10 14:02:20 +00:00
( ( locale ! = nullptr ) ? locale : " <unknown> " ) ) ;
2022-04-27 16:50:22 +00:00
}
}
2018-11-15 17:08:59 +01:00
2015-07-22 00:03:45 -04:00
bool os : : get_host_name ( char * buf , size_t buflen ) {
struct utsname name ;
2024-04-10 08:12:47 +00:00
int retcode = uname ( & name ) ;
if ( retcode ! = - 1 ) {
jio_snprintf ( buf , buflen , " %s " , name . nodename ) ;
return true ;
}
const char * errmsg = os : : strerror ( errno ) ;
log_warning ( os ) ( " Failed to get host name, error message: %s " , errmsg ) ;
return false ;
2015-07-22 00:03:45 -04:00
}
2021-03-25 08:54:05 +00:00
# ifndef _LP64
// Helper, on 32bit, for os::has_allocatable_memory_limit
static bool is_allocatable ( size_t s ) {
if ( s < 2 * G ) {
return true ;
}
// Use raw anonymous mmap here; no need to go through any
// of our reservation layers. We will unmap right away.
2023-02-10 14:02:20 +00:00
void * p = : : mmap ( nullptr , s , PROT_NONE ,
2021-03-25 08:54:05 +00:00
MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS , - 1 , 0 ) ;
if ( p = = MAP_FAILED ) {
return false ;
} else {
: : munmap ( p , s ) ;
return true ;
}
}
# endif // !_LP64
2020-12-11 10:17:21 +00:00
bool os : : has_allocatable_memory_limit ( size_t * limit ) {
2013-03-27 19:21:18 +01:00
struct rlimit rlim ;
int getrlimit_res = getrlimit ( RLIMIT_AS , & rlim ) ;
// if there was an error when calling getrlimit, assume that there is no limitation
// on virtual memory.
bool result ;
if ( ( getrlimit_res ! = 0 ) | | ( rlim . rlim_cur = = RLIM_INFINITY ) ) {
result = false ;
} else {
2020-12-11 10:17:21 +00:00
* limit = ( size_t ) rlim . rlim_cur ;
2013-03-27 19:21:18 +01:00
result = true ;
}
# ifdef _LP64
return result ;
# else
// arbitrary virtual space limit for 32 bit Unices found by testing. If
// getrlimit above returned a limit, bound it with this limit. Otherwise
// directly use it.
2020-12-11 10:17:21 +00:00
const size_t max_virtual_limit = 3800 * M ;
2013-03-27 19:21:18 +01:00
if ( result ) {
* limit = MIN2 ( * limit , max_virtual_limit ) ;
} else {
* limit = max_virtual_limit ;
}
2012-05-10 15:44:19 +02:00
2013-03-27 19:21:18 +01:00
// bound by actually allocatable memory. The algorithm uses two bounds, an
// upper and a lower limit. The upper limit is the current highest amount of
// memory that could not be allocated, the lower limit is the current highest
// amount of memory that could be allocated.
// The algorithm iteratively refines the result by halving the difference
// between these limits, updating either the upper limit (if that value could
// not be allocated) or the lower limit (if the that value could be allocated)
// until the difference between these limits is "small".
// the minimum amount of memory we care about allocating.
2020-12-11 10:17:21 +00:00
const size_t min_allocation_size = M ;
2013-03-27 19:21:18 +01:00
2020-12-11 10:17:21 +00:00
size_t upper_limit = * limit ;
2013-03-27 19:21:18 +01:00
// first check a few trivial cases
if ( is_allocatable ( upper_limit ) | | ( upper_limit < = min_allocation_size ) ) {
* limit = upper_limit ;
} else if ( ! is_allocatable ( min_allocation_size ) ) {
// we found that not even min_allocation_size is allocatable. Return it
// anyway. There is no point to search for a better value any more.
* limit = min_allocation_size ;
} else {
// perform the binary search.
2020-12-11 10:17:21 +00:00
size_t lower_limit = min_allocation_size ;
2013-03-27 19:21:18 +01:00
while ( ( upper_limit - lower_limit ) > min_allocation_size ) {
2020-12-11 10:17:21 +00:00
size_t temp_limit = ( ( upper_limit - lower_limit ) / 2 ) + lower_limit ;
2017-04-24 09:14:09 +02:00
temp_limit = align_down ( temp_limit , min_allocation_size ) ;
2013-03-27 19:21:18 +01:00
if ( is_allocatable ( temp_limit ) ) {
lower_limit = temp_limit ;
} else {
upper_limit = temp_limit ;
}
}
* limit = lower_limit ;
}
return true ;
# endif
}
2013-04-25 11:02:32 -07:00
2021-10-05 07:25:00 +00:00
void * os : : get_default_process_handle ( ) {
# ifdef __APPLE__
// MacOS X needs to use RTLD_FIRST instead of RTLD_LAZY
// to avoid finding unexpected symbols on second (or later)
// loads of a library.
2023-02-10 14:02:20 +00:00
return ( void * ) : : dlopen ( nullptr , RTLD_FIRST ) ;
2021-10-05 07:25:00 +00:00
# else
2023-02-10 14:02:20 +00:00
return ( void * ) : : dlopen ( nullptr , RTLD_LAZY ) ;
2021-10-05 07:25:00 +00:00
# endif
}
void * os : : dll_lookup ( void * handle , const char * name ) {
2024-04-09 15:00:50 +00:00
: : dlerror ( ) ; // Clear any previous error
void * ret = : : dlsym ( handle , name ) ;
if ( ret = = nullptr ) {
const char * tmp = : : dlerror ( ) ;
2024-11-08 13:31:24 +00:00
// It is possible that we found a null symbol, hence no error.
2024-04-09 15:00:50 +00:00
if ( tmp ! = nullptr ) {
log_debug ( os ) ( " Symbol %s not found in dll: %s " , name , tmp ) ;
}
}
return ret ;
2021-10-05 07:25:00 +00:00
}
2021-04-22 05:44:39 +00:00
void os : : dll_unload ( void * lib ) {
2022-11-29 06:53:56 +00:00
// os::Linux::dll_path returns a pointer to a string that is owned by the dynamic loader. Upon
// calling dlclose the dynamic loader may free the memory containing the string, thus we need to
// copy the string to be able to reference it after dlclose.
2023-02-10 14:02:20 +00:00
const char * l_path = nullptr ;
2023-08-16 07:39:42 +00:00
2022-11-29 06:53:56 +00:00
# ifdef LINUX
2023-02-10 14:02:20 +00:00
char * l_pathdup = nullptr ;
2022-11-29 06:53:56 +00:00
l_path = os : : Linux : : dll_path ( lib ) ;
2023-02-10 14:02:20 +00:00
if ( l_path ! = nullptr ) {
2022-11-29 06:53:56 +00:00
l_path = l_pathdup = os : : strdup ( l_path ) ;
}
# endif // LINUX
2023-08-16 07:39:42 +00:00
2023-11-02 12:17:35 +00:00
JFR_ONLY ( NativeLibraryUnloadEvent unload_event ( l_path ) ; )
2023-08-16 07:39:42 +00:00
2023-02-10 14:02:20 +00:00
if ( l_path = = nullptr ) {
2022-11-29 06:53:56 +00:00
l_path = " <not available> " ;
}
2022-06-14 07:18:07 +00:00
2024-01-11 13:12:32 +00:00
char ebuf [ 1024 ] ;
bool res = os : : pd_dll_unload ( lib , ebuf , sizeof ( ebuf ) ) ;
if ( res ) {
2023-02-10 14:02:20 +00:00
Events : : log_dll_message ( nullptr , " Unloaded shared library \" %s \" [ " INTPTR_FORMAT " ] " ,
2022-06-14 07:18:07 +00:00
l_path , p2i ( lib ) ) ;
log_info ( os ) ( " Unloaded shared library \" %s \" [ " INTPTR_FORMAT " ] " , l_path , p2i ( lib ) ) ;
2023-11-02 12:17:35 +00:00
JFR_ONLY ( unload_event . set_result ( true ) ; )
2022-06-14 07:18:07 +00:00
} else {
2023-02-10 14:02:20 +00:00
Events : : log_dll_message ( nullptr , " Attempt to unload shared library \" %s \" [ " INTPTR_FORMAT " ] failed, %s " ,
2024-01-11 13:12:32 +00:00
l_path , p2i ( lib ) , ebuf ) ;
2022-06-14 07:18:07 +00:00
log_info ( os ) ( " Attempt to unload shared library \" %s \" [ " INTPTR_FORMAT " ] failed, %s " ,
2024-01-11 13:12:32 +00:00
l_path , p2i ( lib ) , ebuf ) ;
JFR_ONLY ( unload_event . set_error_msg ( ebuf ) ; )
2022-06-14 07:18:07 +00:00
}
2022-11-29 06:53:56 +00:00
LINUX_ONLY ( os : : free ( l_pathdup ) ) ;
2021-04-22 05:44:39 +00:00
}
2024-12-02 15:12:24 +00:00
void * os : : lookup_function ( const char * name ) {
// This returns the global symbol in the main executable and its dependencies,
// as well as shared objects dynamically loaded with RTLD_GLOBAL flag.
return dlsym ( RTLD_DEFAULT , name ) ;
}
2021-04-22 05:44:39 +00:00
jlong os : : lseek ( int fd , jlong offset , int whence ) {
2024-02-01 13:09:11 +00:00
return ( jlong ) : : lseek ( fd , offset , whence ) ;
2021-04-22 05:44:39 +00:00
}
int os : : ftruncate ( int fd , jlong length ) {
2024-02-01 13:09:11 +00:00
return : : ftruncate ( fd , length ) ;
2021-04-22 05:44:39 +00:00
}
2013-04-25 11:02:32 -07:00
const char * os : : get_current_directory ( char * buf , size_t buflen ) {
return getcwd ( buf , buflen ) ;
}
2022-01-10 13:57:45 +00:00
FILE * os : : fdopen ( int fd , const char * mode ) {
2013-04-25 11:02:32 -07:00
return : : fdopen ( fd , mode ) ;
}
2013-07-17 13:48:15 +02:00
2023-05-23 17:08:08 +00:00
ssize_t os : : pd_write ( int fd , const void * buf , size_t nBytes ) {
2022-01-10 13:18:41 +00:00
ssize_t res ;
2023-05-23 17:08:08 +00:00
RESTARTABLE ( : : write ( fd , buf , nBytes ) , res ) ;
2021-04-22 05:44:39 +00:00
return res ;
}
2019-01-13 16:54:01 -05:00
ssize_t os : : read_at ( int fd , void * buf , unsigned int nBytes , jlong offset ) {
return : : pread ( fd , buf , nBytes , offset ) ;
}
2016-05-09 15:46:12 +02:00
void os : : flockfile ( FILE * fp ) {
: : flockfile ( fp ) ;
}
void os : : funlockfile ( FILE * fp ) {
: : funlockfile ( fp ) ;
}
2018-07-17 15:59:47 -04:00
DIR * os : : opendir ( const char * dirname ) {
2023-02-10 14:02:20 +00:00
assert ( dirname ! = nullptr , " just checking " ) ;
2018-07-17 15:59:47 -04:00
return : : opendir ( dirname ) ;
}
struct dirent * os : : readdir ( DIR * dirp ) {
2023-02-10 14:02:20 +00:00
assert ( dirp ! = nullptr , " just checking " ) ;
2018-07-17 15:59:47 -04:00
return : : readdir ( dirp ) ;
}
int os : : closedir ( DIR * dirp ) {
2023-02-10 14:02:20 +00:00
assert ( dirp ! = nullptr , " just checking " ) ;
2018-07-17 15:59:47 -04:00
return : : closedir ( dirp ) ;
}
2021-04-22 05:44:39 +00:00
int os : : socket_close ( int fd ) {
return : : close ( fd ) ;
}
2023-08-15 11:05:31 +00:00
ssize_t os : : recv ( int fd , char * buf , size_t nBytes , uint flags ) {
RESTARTABLE_RETURN_SSIZE_T ( : : recv ( fd , buf , nBytes , flags ) ) ;
2021-04-22 05:44:39 +00:00
}
2023-08-15 11:05:31 +00:00
ssize_t os : : send ( int fd , char * buf , size_t nBytes , uint flags ) {
RESTARTABLE_RETURN_SSIZE_T ( : : send ( fd , buf , nBytes , flags ) ) ;
2021-04-22 05:44:39 +00:00
}
2023-08-15 11:05:31 +00:00
ssize_t os : : raw_send ( int fd , char * buf , size_t nBytes , uint flags ) {
2021-04-22 05:44:39 +00:00
return os : : send ( fd , buf , nBytes , flags ) ;
}
2023-08-15 11:05:31 +00:00
ssize_t os : : connect ( int fd , struct sockaddr * him , socklen_t len ) {
RESTARTABLE_RETURN_SSIZE_T ( : : connect ( fd , him , len ) ) ;
2021-04-22 05:44:39 +00:00
}
void os : : exit ( int num ) {
2025-04-24 11:43:53 +00:00
permit_forbidden_function : : exit ( num ) ;
2021-04-22 05:44:39 +00:00
}
2021-09-28 23:24:23 +00:00
void os : : _exit ( int num ) {
2025-04-24 11:43:53 +00:00
permit_forbidden_function : : _exit ( num ) ;
2021-09-28 23:24:23 +00:00
}
2024-04-25 12:02:31 +00:00
void os : : naked_yield ( ) {
sched_yield ( ) ;
}
2021-09-28 23:24:23 +00:00
2022-06-27 06:50:11 +00:00
// Sleep forever; naked call to OS-specific sleep; use with CAUTION
void os : : infinite_sleep ( ) {
while ( true ) { // sleep forever ...
: : sleep ( 100 ) ; // ... 100 seconds at a time
}
}
2014-02-19 16:22:15 +00:00
2018-12-20 10:05:19 +01:00
void os : : naked_short_nanosleep ( jlong ns ) {
struct timespec req ;
assert ( ns > - 1 & & ns < NANOUNITS , " Un-interruptable sleep, short time use only " ) ;
req . tv_sec = 0 ;
req . tv_nsec = ns ;
2023-02-10 14:02:20 +00:00
: : nanosleep ( & req , nullptr ) ;
2018-12-20 10:05:19 +01:00
return ;
}
void os : : naked_short_sleep ( jlong ms ) {
assert ( ms < MILLIUNITS , " Un-interruptable sleep, short time use only " ) ;
2020-01-17 00:52:10 -05:00
os : : naked_short_nanosleep ( millis_to_nanos ( ms ) ) ;
2018-12-20 10:05:19 +01:00
return ;
}
2016-02-24 18:06:34 +01:00
char * os : : Posix : : describe_pthread_attr ( char * buf , size_t buflen , const pthread_attr_t * attr ) {
size_t stack_size = 0 ;
size_t guard_size = 0 ;
int detachstate = 0 ;
pthread_attr_getstacksize ( attr , & stack_size ) ;
pthread_attr_getguardsize ( attr , & guard_size ) ;
2023-04-26 22:47:54 +00:00
// Work around glibc stack guard issue, see os::create_thread() in os_linux.cpp.
LINUX_ONLY ( if ( os : : Linux : : adjustStackSizeForGuardPages ( ) ) stack_size - = guard_size ; )
2016-02-24 18:06:34 +01:00
pthread_attr_getdetachstate ( attr , & detachstate ) ;
2025-01-16 16:09:04 +00:00
jio_snprintf ( buf , buflen , " stacksize: %zuk, guardsize: %zuk, %s " ,
2022-10-31 09:09:43 +00:00
stack_size / K , guard_size / K ,
2016-02-24 18:06:34 +01:00
( detachstate = = PTHREAD_CREATE_DETACHED ? " detached " : " joinable " ) ) ;
return buf ;
}
2024-10-17 08:06:37 +00:00
char * os : : realpath ( const char * filename , char * outbuf , size_t outbuflen ) {
2017-03-13 20:23:11 +01:00
2023-02-10 14:02:20 +00:00
if ( filename = = nullptr | | outbuf = = nullptr | | outbuflen < 1 ) {
2024-10-17 08:06:37 +00:00
assert ( false , " os::realpath: invalid arguments. " ) ;
2017-03-13 20:23:11 +01:00
errno = EINVAL ;
2023-02-10 14:02:20 +00:00
return nullptr ;
2017-03-13 20:23:11 +01:00
}
2023-02-10 14:02:20 +00:00
char * result = nullptr ;
2017-03-13 20:23:11 +01:00
// This assumes platform realpath() is implemented according to POSIX.1-2008.
2023-02-10 14:02:20 +00:00
// POSIX.1-2008 allows to specify null for the output buffer, in which case
2017-03-13 20:23:11 +01:00
// output buffer is dynamically allocated and must be ::free()'d by the caller.
2025-04-24 11:43:53 +00:00
char * p = permit_forbidden_function : : realpath ( filename , nullptr ) ;
2023-02-10 14:02:20 +00:00
if ( p ! = nullptr ) {
2017-03-13 20:23:11 +01:00
if ( strlen ( p ) < outbuflen ) {
strcpy ( outbuf , p ) ;
result = outbuf ;
} else {
errno = ENAMETOOLONG ;
}
2025-04-24 11:43:53 +00:00
permit_forbidden_function : : free ( p ) ; // *not* os::free
2017-03-13 20:23:11 +01:00
} else {
// Fallback for platforms struggling with modern Posix standards (AIX 5.3, 6.1). If realpath
// returns EINVAL, this may indicate that realpath is not POSIX.1-2008 compatible and
2023-02-10 14:02:20 +00:00
// that it complains about the null we handed down as user buffer.
2017-03-13 20:23:11 +01:00
// In this case, use the user provided buffer but at least check whether realpath caused
// a memory overwrite.
if ( errno = = EINVAL ) {
outbuf [ outbuflen - 1 ] = ' \0 ' ;
2025-04-24 11:43:53 +00:00
p = permit_forbidden_function : : realpath ( filename , outbuf ) ;
2023-02-10 14:02:20 +00:00
if ( p ! = nullptr ) {
2017-03-13 20:23:11 +01:00
guarantee ( outbuf [ outbuflen - 1 ] = = ' \0 ' , " realpath buffer overwrite detected. " ) ;
result = p ;
}
}
}
return result ;
}
2018-06-19 09:34:41 +02:00
int os : : stat ( const char * path , struct stat * sbuf ) {
return : : stat ( path , sbuf ) ;
}
char * os : : native_path ( char * path ) {
return path ;
}
2017-03-13 20:23:11 +01:00
2019-06-28 09:49:10 -07:00
bool os : : same_files ( const char * file1 , const char * file2 ) {
2021-04-23 21:51:11 +00:00
if ( file1 = = nullptr & & file2 = = nullptr ) {
return true ;
}
if ( file1 = = nullptr | | file2 = = nullptr ) {
return false ;
}
2019-06-28 09:49:10 -07:00
if ( strcmp ( file1 , file2 ) = = 0 ) {
return true ;
}
bool is_same = false ;
struct stat st1 ;
struct stat st2 ;
if ( os : : stat ( file1 , & st1 ) < 0 ) {
return false ;
}
if ( os : : stat ( file2 , & st2 ) < 0 ) {
return false ;
}
if ( st1 . st_dev = = st2 . st_dev & & st1 . st_ino = = st2 . st_ino ) {
// same files
is_same = true ;
}
return is_same ;
}
2016-09-09 11:14:57 -07:00
// Called when creating the thread. The minimum stack sizes have already been calculated
size_t os : : Posix : : get_initial_stack_size ( ThreadType thr_type , size_t req_stack_size ) {
size_t stack_size ;
if ( req_stack_size = = 0 ) {
stack_size = default_stack_size ( thr_type ) ;
} else {
stack_size = req_stack_size ;
}
switch ( thr_type ) {
case os : : java_thread :
// Java threads use ThreadStackSize which default value can be
// changed with the flag -Xss
if ( req_stack_size = = 0 & & JavaThread : : stack_size_at_create ( ) > 0 ) {
// no requested size and we have a more specific default value
stack_size = JavaThread : : stack_size_at_create ( ) ;
}
stack_size = MAX2 ( stack_size ,
_java_thread_min_stack_allowed ) ;
break ;
case os : : compiler_thread :
if ( req_stack_size = = 0 & & CompilerThreadStackSize > 0 ) {
// no requested size and we have a more specific default value
stack_size = ( size_t ) ( CompilerThreadStackSize * K ) ;
}
stack_size = MAX2 ( stack_size ,
_compiler_thread_min_stack_allowed ) ;
break ;
case os : : vm_thread :
2021-09-09 19:21:55 +00:00
case os : : gc_thread :
2016-09-09 11:14:57 -07:00
case os : : watcher_thread :
default : // presume the unknown thr_type is a VM internal
if ( req_stack_size = = 0 & & VMThreadStackSize > 0 ) {
// no requested size and we have a more specific default value
stack_size = ( size_t ) ( VMThreadStackSize * K ) ;
}
stack_size = MAX2 ( stack_size ,
_vm_internal_thread_min_stack_allowed ) ;
break ;
}
2017-03-23 11:10:55 -07:00
// pthread_attr_setstacksize() may require that the size be rounded up to the OS page size.
// Be careful not to round up to 0. Align down in that case.
if ( stack_size < = SIZE_MAX - vm_page_size ( ) ) {
2017-07-04 15:58:10 +02:00
stack_size = align_up ( stack_size , vm_page_size ( ) ) ;
2017-03-23 11:10:55 -07:00
} else {
2017-07-04 15:58:10 +02:00
stack_size = align_down ( stack_size , vm_page_size ( ) ) ;
2017-03-23 11:10:55 -07:00
}
2016-09-09 11:14:57 -07:00
return stack_size ;
}
2015-11-25 16:33:28 +01:00
2020-10-13 20:42:34 +00:00
# ifndef ZERO
# ifndef ARM
static bool get_frame_at_stack_banging_point ( JavaThread * thread , address pc , const void * ucVoid , frame * fr ) {
if ( Interpreter : : contains ( pc ) ) {
// interpreter performs stack banging after the fixed frame header has
// been generated while the compilers perform it before. To maintain
// semantic consistency between interpreted and compiled frames, the
// method returns the Java sender of the current frame.
* fr = os : : fetch_frame_from_context ( ucVoid ) ;
if ( ! fr - > is_first_java_frame ( ) ) {
// get_frame_at_stack_banging_point() is only called when we
// have well defined stacks so java_sender() calls do not need
// to assert safe_for_sender() first.
* fr = fr - > java_sender ( ) ;
}
} else {
// more complex code with compiled code
assert ( ! Interpreter : : contains ( pc ) , " Interpreted methods should have been handled above " ) ;
CodeBlob * cb = CodeCache : : find_blob ( pc ) ;
2023-02-10 14:02:20 +00:00
if ( cb = = nullptr | | ! cb - > is_nmethod ( ) | | cb - > is_frame_complete_at ( pc ) ) {
2020-10-13 20:42:34 +00:00
// Not sure where the pc points to, fallback to default
// stack overflow handling
return false ;
} else {
// in compiled code, the stack banging is performed just after the return pc
// has been pushed on the stack
* fr = os : : fetch_compiled_frame_from_context ( ucVoid ) ;
if ( ! fr - > is_java_frame ( ) ) {
assert ( ! fr - > is_first_frame ( ) , " Safety check " ) ;
// See java_sender() comment above.
* fr = fr - > java_sender ( ) ;
}
}
}
assert ( fr - > is_java_frame ( ) , " Safety check " ) ;
return true ;
}
# endif // ARM
// This return true if the signal handler should just continue, ie. return after calling this
bool os : : Posix : : handle_stack_overflow ( JavaThread * thread , address addr , address pc ,
const void * ucVoid , address * stub ) {
// stack overflow
StackOverflow * overflow_state = thread - > stack_overflow_state ( ) ;
if ( overflow_state - > in_stack_yellow_reserved_zone ( addr ) ) {
if ( thread - > thread_state ( ) = = _thread_in_Java ) {
# ifndef ARM
// arm32 doesn't have this
8284161: Implementation of Virtual Threads (Preview)
Co-authored-by: Ron Pressler <rpressler@openjdk.org>
Co-authored-by: Alan Bateman <alanb@openjdk.org>
Co-authored-by: Erik Österlund <eosterlund@openjdk.org>
Co-authored-by: Andrew Haley <aph@openjdk.org>
Co-authored-by: Rickard Bäckman <rbackman@openjdk.org>
Co-authored-by: Markus Grönlund <mgronlun@openjdk.org>
Co-authored-by: Leonid Mesnik <lmesnik@openjdk.org>
Co-authored-by: Serguei Spitsyn <sspitsyn@openjdk.org>
Co-authored-by: Chris Plummer <cjplummer@openjdk.org>
Co-authored-by: Coleen Phillimore <coleenp@openjdk.org>
Co-authored-by: Robbin Ehn <rehn@openjdk.org>
Co-authored-by: Stefan Karlsson <stefank@openjdk.org>
Co-authored-by: Thomas Schatzl <tschatzl@openjdk.org>
Co-authored-by: Sergey Kuksenko <skuksenko@openjdk.org>
Reviewed-by: lancea, eosterlund, rehn, sspitsyn, stefank, tschatzl, dfuchs, lmesnik, dcubed, kevinw, amenkov, dlong, mchung, psandoz, bpb, coleenp, smarks, egahlin, mseledtsov, coffeys, darcy
2022-05-07 08:06:16 +00:00
// vthreads don't support this
if ( ! thread - > is_vthread_mounted ( ) & & overflow_state - > in_stack_reserved_zone ( addr ) ) {
2020-10-13 20:42:34 +00:00
frame fr ;
if ( get_frame_at_stack_banging_point ( thread , pc , ucVoid , & fr ) ) {
assert ( fr . is_java_frame ( ) , " Must be a Java frame " ) ;
frame activation =
SharedRuntime : : look_for_reserved_stack_annotated_method ( thread , fr ) ;
2023-02-10 14:02:20 +00:00
if ( activation . sp ( ) ! = nullptr ) {
2020-10-13 20:42:34 +00:00
overflow_state - > disable_stack_reserved_zone ( ) ;
if ( activation . is_interpreted_frame ( ) ) {
2020-10-15 14:16:20 +00:00
overflow_state - > set_reserved_stack_activation ( ( address ) ( activation . fp ( )
// Some platforms use frame pointers for interpreter frames, others use initial sp.
# if !defined(PPC64) && !defined(S390)
+ frame : : interpreter_frame_initial_sp_offset
# endif
) ) ;
2020-10-13 20:42:34 +00:00
} else {
overflow_state - > set_reserved_stack_activation ( ( address ) activation . unextended_sp ( ) ) ;
}
return true ; // just continue
}
}
}
# endif // ARM
2022-04-19 19:10:52 +00:00
// Throw a stack overflow exception. Guard pages will be re-enabled
2020-10-13 20:42:34 +00:00
// while unwinding the stack.
overflow_state - > disable_stack_yellow_reserved_zone ( ) ;
* stub = SharedRuntime : : continuation_for_implicit_exception ( thread , pc , SharedRuntime : : STACK_OVERFLOW ) ;
} else {
// Thread was in the vm or native code. Return and try to finish.
overflow_state - > disable_stack_yellow_reserved_zone ( ) ;
return true ; // just continue
}
} else if ( overflow_state - > in_stack_red_zone ( addr ) ) {
2022-07-16 01:03:16 +00:00
// Fatal red zone violation. Disable the guard pages and keep
// on handling the signal.
2020-10-13 20:42:34 +00:00
overflow_state - > disable_stack_red_zone ( ) ;
tty - > print_raw_cr ( " An irrecoverable stack overflow has occurred. " ) ;
// This is a likely cause, but hard to verify. Let's just print
// it as a hint.
tty - > print_raw_cr ( " Please check if any of your loaded .so files has "
" enabled executable stack (see man page execstack(8)) " ) ;
} else {
2022-08-04 01:20:29 +00:00
# ifdef LINUX
// This only works with os::Linux::manually_expand_stack()
2020-10-13 20:42:34 +00:00
// Accessing stack address below sp may cause SEGV if current
// thread has MAP_GROWSDOWN stack. This should only happen when
// current thread was created by user code with MAP_GROWSDOWN flag
// and then attached to VM. See notes in os_linux.cpp.
if ( thread - > osthread ( ) - > expanding_stack ( ) = = 0 ) {
thread - > osthread ( ) - > set_expanding_stack ( ) ;
if ( os : : Linux : : manually_expand_stack ( thread , addr ) ) {
thread - > osthread ( ) - > clear_expanding_stack ( ) ;
return true ; // just continue
}
thread - > osthread ( ) - > clear_expanding_stack ( ) ;
} else {
fatal ( " recursive segv. expanding stack. " ) ;
}
# else
tty - > print_raw_cr ( " SIGSEGV happened inside stack but outside yellow and red zone. " ) ;
2022-08-04 01:20:29 +00:00
# endif // LINUX
2020-10-13 20:42:34 +00:00
}
return false ;
}
# endif // ZERO
2018-05-31 14:09:04 -07:00
bool os : : Posix : : is_root ( uid_t uid ) {
return ROOT_UID = = uid ;
}
bool os : : Posix : : matches_effective_uid_or_root ( uid_t uid ) {
return is_root ( uid ) | | geteuid ( ) = = uid ;
}
bool os : : Posix : : matches_effective_uid_and_gid_or_root ( uid_t uid , gid_t gid ) {
return is_root ( uid ) | | ( geteuid ( ) = = uid & & getegid ( ) = = gid ) ;
}
2019-01-23 21:17:51 -05:00
// Shared clock/time and other supporting routines for pthread_mutex/cond
// initialization. This is enabled on Solaris but only some of the clock/time
// functionality is actually used there.
2017-05-30 17:14:52 -04:00
// Shared condattr object for use with relative timed-waits. Will be associated
// with CLOCK_MONOTONIC if available to avoid issues with time-of-day changes,
// but otherwise whatever default is used by the platform - generally the
// time-of-day clock.
static pthread_condattr_t _condAttr [ 1 ] ;
// Shared mutexattr to explicitly set the type to PTHREAD_MUTEX_NORMAL as not
// all systems (e.g. FreeBSD) map the default to "normal".
static pthread_mutexattr_t _mutexAttr [ 1 ] ;
// common basic initialization that is always supported
static void pthread_init_common ( void ) {
int status ;
if ( ( status = pthread_condattr_init ( _condAttr ) ) ! = 0 ) {
fatal ( " pthread_condattr_init: %s " , os : : strerror ( status ) ) ;
}
if ( ( status = pthread_mutexattr_init ( _mutexAttr ) ) ! = 0 ) {
fatal ( " pthread_mutexattr_init: %s " , os : : strerror ( status ) ) ;
}
if ( ( status = pthread_mutexattr_settype ( _mutexAttr , PTHREAD_MUTEX_NORMAL ) ) ! = 0 ) {
fatal ( " pthread_mutexattr_settype: %s " , os : : strerror ( status ) ) ;
}
2022-07-02 14:45:10 +00:00
PlatformMutex : : init ( ) ;
2017-05-30 17:14:52 -04:00
}
2023-02-10 14:02:20 +00:00
static int ( * _pthread_condattr_setclock ) ( pthread_condattr_t * , clockid_t ) = nullptr ;
2017-05-30 17:14:52 -04:00
2019-01-23 21:17:51 -05:00
static bool _use_clock_monotonic_condattr = false ;
2017-05-30 17:14:52 -04:00
// Determine what POSIX API's are present and do appropriate
// configuration.
void os : : Posix : : init ( void ) {
2021-09-23 05:34:18 +00:00
# if defined(_ALLBSD_SOURCE)
clock_tics_per_sec = CLK_TCK ;
# else
2023-08-15 11:05:31 +00:00
clock_tics_per_sec = checked_cast < int > ( sysconf ( _SC_CLK_TCK ) ) ;
2021-09-23 05:34:18 +00:00
# endif
2017-05-30 17:14:52 -04:00
// NOTE: no logging available when this is called. Put logging
// statements in init_2().
2021-01-27 01:18:52 +00:00
// Check for pthread_condattr_setclock support.
2017-05-30 17:14:52 -04:00
// libpthread is already loaded.
int ( * condattr_setclock_func ) ( pthread_condattr_t * , clockid_t ) =
( int ( * ) ( pthread_condattr_t * , clockid_t ) ) dlsym ( RTLD_DEFAULT ,
" pthread_condattr_setclock " ) ;
2023-02-10 14:02:20 +00:00
if ( condattr_setclock_func ! = nullptr ) {
2017-05-30 17:14:52 -04:00
_pthread_condattr_setclock = condattr_setclock_func ;
}
// Now do general initialization.
pthread_init_common ( ) ;
int status ;
2023-02-10 14:02:20 +00:00
if ( _pthread_condattr_setclock ! = nullptr ) {
2017-05-30 17:14:52 -04:00
if ( ( status = _pthread_condattr_setclock ( _condAttr , CLOCK_MONOTONIC ) ) ! = 0 ) {
if ( status = = EINVAL ) {
_use_clock_monotonic_condattr = false ;
warning ( " Unable to use monotonic clock with relative timed-waits " \
" - changes to the time-of-day clock may have adverse affects " ) ;
} else {
fatal ( " pthread_condattr_setclock: %s " , os : : strerror ( status ) ) ;
}
} else {
_use_clock_monotonic_condattr = true ;
}
}
2021-09-23 05:34:18 +00:00
initial_time_count = javaTimeNanos ( ) ;
2017-05-30 17:14:52 -04:00
}
void os : : Posix : : init_2 ( void ) {
2021-01-27 01:18:52 +00:00
log_info ( os ) ( " Use of CLOCK_MONOTONIC is supported " ) ;
2017-05-30 17:14:52 -04:00
log_info ( os ) ( " Use of pthread_condattr_setclock is%s supported " ,
2023-02-10 14:02:20 +00:00
( _pthread_condattr_setclock ! = nullptr ? " " : " not " ) ) ;
2017-05-30 17:14:52 -04:00
log_info ( os ) ( " Relative timed-wait using pthread_cond_timedwait is associated with %s " ,
_use_clock_monotonic_condattr ? " CLOCK_MONOTONIC " : " the default clock " ) ;
}
// Utility to convert the given timeout to an absolute timespec
2019-01-23 21:17:51 -05:00
// (based on the appropriate clock) to use with pthread_cond_timewait,
// and sem_timedwait().
2017-05-30 17:14:52 -04:00
// The clock queried here must be the clock used to manage the
2019-01-23 21:17:51 -05:00
// timeout of the condition variable or semaphore.
2017-05-30 17:14:52 -04:00
//
// The passed in timeout value is either a relative time in nanoseconds
// or an absolute time in milliseconds. A relative timeout will be
2019-01-23 21:17:51 -05:00
// associated with CLOCK_MONOTONIC if available, unless the real-time clock
// is explicitly requested; otherwise, or if absolute,
2017-05-30 17:14:52 -04:00
// the default time-of-day clock will be used.
// Given time is a 64-bit value and the time_t used in the timespec is
// sometimes a signed-32-bit value we have to watch for overflow if times
// way in the future are given. Further on Solaris versions
// prior to 10 there is a restriction (see cond_timedwait) that the specified
// number of seconds, in abstime, is less than current_time + 100000000.
// As it will be over 20 years before "now + 100000000" will overflow we can
// ignore overflow and just impose a hard-limit on seconds using the value
// of "now + 100000000". This places a limit on the timeout of about 3.17
// years from "now".
//
# define MAX_SECS 100000000
// Calculate a new absolute time that is "timeout" nanoseconds from "now".
// "unit" indicates the unit of "now_part_sec" (may be nanos or micros depending
2019-01-23 21:17:51 -05:00
// on which clock API is being used).
2017-05-30 17:14:52 -04:00
static void calc_rel_time ( timespec * abstime , jlong timeout , jlong now_sec ,
jlong now_part_sec , jlong unit ) {
time_t max_secs = now_sec + MAX_SECS ;
jlong seconds = timeout / NANOUNITS ;
timeout % = NANOUNITS ; // remaining nanos
if ( seconds > = MAX_SECS ) {
// More seconds than we can add, so pin to max_secs.
abstime - > tv_sec = max_secs ;
abstime - > tv_nsec = 0 ;
} else {
abstime - > tv_sec = now_sec + seconds ;
long nanos = ( now_part_sec * ( NANOUNITS / unit ) ) + timeout ;
if ( nanos > = NANOUNITS ) { // overflow
abstime - > tv_sec + = 1 ;
nanos - = NANOUNITS ;
}
abstime - > tv_nsec = nanos ;
}
}
// Unpack the given deadline in milliseconds since the epoch, into the given timespec.
// The current time in seconds is also passed in to enforce an upper bound as discussed above.
static void unpack_abs_time ( timespec * abstime , jlong deadline , jlong now_sec ) {
time_t max_secs = now_sec + MAX_SECS ;
jlong seconds = deadline / MILLIUNITS ;
jlong millis = deadline % MILLIUNITS ;
if ( seconds > = max_secs ) {
// Absolute seconds exceeds allowed max, so pin to max_secs.
abstime - > tv_sec = max_secs ;
abstime - > tv_nsec = 0 ;
} else {
abstime - > tv_sec = seconds ;
2020-01-17 00:52:10 -05:00
abstime - > tv_nsec = millis_to_nanos ( millis ) ;
2017-05-30 17:14:52 -04:00
}
}
2020-01-17 00:52:10 -05:00
static jlong millis_to_nanos_bounded ( jlong millis ) {
2019-01-23 21:17:51 -05:00
// We have to watch for overflow when converting millis to nanos,
// but if millis is that large then we will end up limiting to
// MAX_SECS anyway, so just do that here.
if ( millis / MILLIUNITS > MAX_SECS ) {
millis = jlong ( MAX_SECS ) * MILLIUNITS ;
}
2020-01-17 00:52:10 -05:00
return millis_to_nanos ( millis ) ;
2019-01-23 21:17:51 -05:00
}
static void to_abstime ( timespec * abstime , jlong timeout ,
bool isAbsolute , bool isRealtime ) {
2023-08-15 11:05:31 +00:00
DEBUG_ONLY ( time_t max_secs = MAX_SECS ; )
2017-05-30 17:14:52 -04:00
if ( timeout < 0 ) {
timeout = 0 ;
}
2019-01-23 21:17:51 -05:00
clockid_t clock = CLOCK_MONOTONIC ;
2021-01-27 01:18:52 +00:00
if ( isAbsolute | | ( ! _use_clock_monotonic_condattr | | isRealtime ) ) {
clock = CLOCK_REALTIME ;
}
2017-05-30 17:14:52 -04:00
2021-01-27 01:18:52 +00:00
struct timespec now ;
int status = clock_gettime ( clock , & now ) ;
assert ( status = = 0 , " clock_gettime error: %s " , os : : strerror ( errno ) ) ;
2017-05-30 17:14:52 -04:00
2021-01-27 01:18:52 +00:00
if ( ! isAbsolute ) {
calc_rel_time ( abstime , timeout , now . tv_sec , now . tv_nsec , NANOUNITS ) ;
} else {
unpack_abs_time ( abstime , timeout , now . tv_sec ) ;
2017-05-30 17:14:52 -04:00
}
2021-01-27 01:18:52 +00:00
DEBUG_ONLY ( max_secs + = now . tv_sec ; )
2017-05-30 17:14:52 -04:00
assert ( abstime - > tv_sec > = 0 , " tv_sec < 0 " ) ;
assert ( abstime - > tv_sec < = max_secs , " tv_sec > max_secs " ) ;
assert ( abstime - > tv_nsec > = 0 , " tv_nsec < 0 " ) ;
assert ( abstime - > tv_nsec < NANOUNITS , " tv_nsec >= NANOUNITS " ) ;
}
2019-01-23 21:17:51 -05:00
// Create an absolute time 'millis' milliseconds in the future, using the
// real-time (time-of-day) clock. Used by PosixSemaphore.
void os : : Posix : : to_RTC_abstime ( timespec * abstime , int64_t millis ) {
2020-01-17 00:52:10 -05:00
to_abstime ( abstime , millis_to_nanos_bounded ( millis ) ,
2019-01-23 21:17:51 -05:00
false /* not absolute */ ,
true /* use real-time clock */ ) ;
}
2021-01-27 01:18:52 +00:00
// Common (partly) shared time functions
jlong os : : javaTimeMillis ( ) {
struct timespec ts ;
int status = clock_gettime ( CLOCK_REALTIME , & ts ) ;
assert ( status = = 0 , " clock_gettime error: %s " , os : : strerror ( errno ) ) ;
return jlong ( ts . tv_sec ) * MILLIUNITS +
jlong ( ts . tv_nsec ) / NANOUNITS_PER_MILLIUNIT ;
}
void os : : javaTimeSystemUTC ( jlong & seconds , jlong & nanos ) {
struct timespec ts ;
int status = clock_gettime ( CLOCK_REALTIME , & ts ) ;
assert ( status = = 0 , " clock_gettime error: %s " , os : : strerror ( errno ) ) ;
seconds = jlong ( ts . tv_sec ) ;
nanos = jlong ( ts . tv_nsec ) ;
}
// macOS and AIX have platform specific implementations for javaTimeNanos()
// using native clock/timer access APIs. These have historically worked well
// for those platforms, but it may be possible for them to switch to the
// generic clock_gettime mechanism in the future.
# if !defined(__APPLE__) && !defined(AIX)
jlong os : : javaTimeNanos ( ) {
struct timespec tp ;
int status = clock_gettime ( CLOCK_MONOTONIC , & tp ) ;
assert ( status = = 0 , " clock_gettime error: %s " , os : : strerror ( errno ) ) ;
jlong result = jlong ( tp . tv_sec ) * NANOSECS_PER_SEC + jlong ( tp . tv_nsec ) ;
return result ;
}
// for timer info max values which include all bits
# define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
void os : : javaTimeNanos_info ( jvmtiTimerInfo * info_ptr ) {
// CLOCK_MONOTONIC - amount of time since some arbitrary point in the past
info_ptr - > max_value = ALL_64_BITS ;
info_ptr - > may_skip_backward = false ; // not subject to resetting or drifting
info_ptr - > may_skip_forward = false ; // not subject to resetting or drifting
info_ptr - > kind = JVMTI_TIMER_ELAPSED ; // elapsed not CPU time
}
# endif // ! APPLE && !AIX
2021-09-23 05:34:18 +00:00
// Time since start-up in seconds to a fine granularity.
double os : : elapsedTime ( ) {
2023-08-15 11:05:31 +00:00
return ( ( double ) os : : elapsed_counter ( ) ) / ( double ) os : : elapsed_frequency ( ) ; // nanosecond resolution
2021-09-23 05:34:18 +00:00
}
jlong os : : elapsed_counter ( ) {
return os : : javaTimeNanos ( ) - initial_time_count ;
}
jlong os : : elapsed_frequency ( ) {
return NANOSECS_PER_SEC ; // nanosecond resolution
}
bool os : : supports_vtime ( ) { return true ; }
// Return the real, user, and system times in seconds from an
// arbitrary fixed point in the past.
bool os : : getTimesSecs ( double * process_real_time ,
double * process_user_time ,
double * process_system_time ) {
struct tms ticks ;
clock_t real_ticks = times ( & ticks ) ;
if ( real_ticks = = ( clock_t ) ( - 1 ) ) {
return false ;
} else {
double ticks_per_second = ( double ) clock_tics_per_sec ;
* process_user_time = ( ( double ) ticks . tms_utime ) / ticks_per_second ;
* process_system_time = ( ( double ) ticks . tms_stime ) / ticks_per_second ;
* process_real_time = ( ( double ) real_ticks ) / ticks_per_second ;
return true ;
}
}
char * os : : local_time_string ( char * buf , size_t buflen ) {
struct tm t ;
time_t long_time ;
time ( & long_time ) ;
localtime_r ( & long_time , & t ) ;
jio_snprintf ( buf , buflen , " %d-%02d-%02d %02d:%02d:%02d " ,
t . tm_year + 1900 , t . tm_mon + 1 , t . tm_mday ,
t . tm_hour , t . tm_min , t . tm_sec ) ;
return buf ;
}
struct tm * os : : localtime_pd ( const time_t * clock , struct tm * res ) {
return localtime_r ( clock , res ) ;
}
2017-05-30 17:14:52 -04:00
// PlatformEvent
//
// Assumption:
// Only one parker can exist on an event, which is why we allocate
// them per-thread. Multiple unparkers can coexist.
//
// _event serves as a restricted-range semaphore.
// -1 : thread is blocked, i.e. there is a waiter
// 0 : neutral: thread is running or ready,
// could have been signaled after a wait started
// 1 : signaled - thread is running or ready
//
// Having three states allows for some detection of bad usage - see
// comments on unpark().
2022-07-02 14:45:10 +00:00
PlatformEvent : : PlatformEvent ( ) {
2019-01-23 21:17:51 -05:00
int status = pthread_cond_init ( _cond , _condAttr ) ;
assert_status ( status = = 0 , status , " cond_init " ) ;
status = pthread_mutex_init ( _mutex , _mutexAttr ) ;
assert_status ( status = = 0 , status , " mutex_init " ) ;
_event = 0 ;
_nParked = 0 ;
}
2022-07-02 14:45:10 +00:00
void PlatformEvent : : park ( ) { // AKA "down()"
2017-05-30 17:14:52 -04:00
// Transitions for _event:
// -1 => -1 : illegal
// 1 => 0 : pass - return immediately
// 0 => -1 : block; then set _event to 0 before returning
// Invariant: Only the thread associated with the PlatformEvent
// may call park().
assert ( _nParked = = 0 , " invariant " ) ;
int v ;
// atomically decrement _event
for ( ; ; ) {
v = _event ;
2019-11-25 12:33:15 +01:00
if ( Atomic : : cmpxchg ( & _event , v , v - 1 ) = = v ) break ;
2017-05-30 17:14:52 -04:00
}
guarantee ( v > = 0 , " invariant " ) ;
if ( v = = 0 ) { // Do this the hard way by blocking ...
int status = pthread_mutex_lock ( _mutex ) ;
assert_status ( status = = 0 , status , " mutex_lock " ) ;
guarantee ( _nParked = = 0 , " invariant " ) ;
+ + _nParked ;
while ( _event < 0 ) {
// OS-level "spurious wakeups" are ignored
status = pthread_cond_wait ( _cond , _mutex ) ;
2020-05-06 12:42:28 -05:00
assert_status ( status = = 0 MACOS_ONLY ( | | status = = ETIMEDOUT ) ,
status , " cond_wait " ) ;
2017-05-30 17:14:52 -04:00
}
- - _nParked ;
_event = 0 ;
status = pthread_mutex_unlock ( _mutex ) ;
assert_status ( status = = 0 , status , " mutex_unlock " ) ;
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other.
OrderAccess : : fence ( ) ;
}
guarantee ( _event > = 0 , " invariant " ) ;
}
2022-07-02 14:45:10 +00:00
int PlatformEvent : : park ( jlong millis ) {
2023-05-03 09:39:57 +00:00
return park_nanos ( millis_to_nanos_bounded ( millis ) ) ;
}
int PlatformEvent : : park_nanos ( jlong nanos ) {
assert ( nanos > 0 , " nanos are positive " ) ;
2017-05-30 17:14:52 -04:00
// Transitions for _event:
// -1 => -1 : illegal
// 1 => 0 : pass - return immediately
// 0 => -1 : block; then set _event to 0 before returning
// Invariant: Only the thread associated with the Event/PlatformEvent
// may call park().
assert ( _nParked = = 0 , " invariant " ) ;
int v ;
// atomically decrement _event
for ( ; ; ) {
v = _event ;
2019-11-25 12:33:15 +01:00
if ( Atomic : : cmpxchg ( & _event , v , v - 1 ) = = v ) break ;
2017-05-30 17:14:52 -04:00
}
guarantee ( v > = 0 , " invariant " ) ;
if ( v = = 0 ) { // Do this the hard way by blocking ...
struct timespec abst ;
2023-05-03 09:39:57 +00:00
to_abstime ( & abst , nanos , false , false ) ;
2017-05-30 17:14:52 -04:00
int ret = OS_TIMEOUT ;
int status = pthread_mutex_lock ( _mutex ) ;
assert_status ( status = = 0 , status , " mutex_lock " ) ;
guarantee ( _nParked = = 0 , " invariant " ) ;
+ + _nParked ;
while ( _event < 0 ) {
status = pthread_cond_timedwait ( _cond , _mutex , & abst ) ;
assert_status ( status = = 0 | | status = = ETIMEDOUT ,
status , " cond_timedwait " ) ;
2021-12-20 10:47:37 +00:00
// OS-level "spurious wakeups" are ignored
2017-05-30 17:14:52 -04:00
if ( status = = ETIMEDOUT ) break ;
}
- - _nParked ;
if ( _event > = 0 ) {
ret = OS_OK ;
}
_event = 0 ;
status = pthread_mutex_unlock ( _mutex ) ;
assert_status ( status = = 0 , status , " mutex_unlock " ) ;
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other.
OrderAccess : : fence ( ) ;
return ret ;
}
return OS_OK ;
}
2022-07-02 14:45:10 +00:00
void PlatformEvent : : unpark ( ) {
2017-05-30 17:14:52 -04:00
// Transitions for _event:
// 0 => 1 : just return
// 1 => 1 : just return
// -1 => either 0 or 1; must signal target thread
// That is, we can safely transition _event from -1 to either
// 0 or 1.
// See also: "Semaphores in Plan 9" by Mullender & Cox
//
// Note: Forcing a transition from "-1" to "1" on an unpark() means
// that it will take two back-to-back park() calls for the owning
// thread to block. This has the benefit of forcing a spurious return
// from the first park() call after an unpark() call which will help
// shake out uses of park() and unpark() without checking state conditions
// properly. This spurious return doesn't manifest itself in any user code
// but only in the correctly written condition checking loops of ObjectMonitor,
2020-11-18 22:45:49 +00:00
// Mutex/Monitor, and JavaThread::sleep
2017-05-30 17:14:52 -04:00
2019-11-25 12:32:40 +01:00
if ( Atomic : : xchg ( & _event , 1 ) > = 0 ) return ;
2017-05-30 17:14:52 -04:00
int status = pthread_mutex_lock ( _mutex ) ;
assert_status ( status = = 0 , status , " mutex_lock " ) ;
int anyWaiters = _nParked ;
assert ( anyWaiters = = 0 | | anyWaiters = = 1 , " invariant " ) ;
status = pthread_mutex_unlock ( _mutex ) ;
assert_status ( status = = 0 , status , " mutex_unlock " ) ;
// Note that we signal() *after* dropping the lock for "immortal" Events.
// This is safe and avoids a common class of futile wakeups. In rare
// circumstances this can cause a thread to return prematurely from
// cond_{timed}wait() but the spurious wakeup is benign and the victim
// will simply re-test the condition and re-park itself.
// This provides particular benefit if the underlying platform does not
// provide wait morphing.
if ( anyWaiters ! = 0 ) {
status = pthread_cond_signal ( _cond ) ;
assert_status ( status = = 0 , status , " cond_signal " ) ;
}
}
// JSR166 support
2022-07-02 14:45:10 +00:00
PlatformParker : : PlatformParker ( ) : _counter ( 0 ) , _cur_index ( - 1 ) {
2021-01-21 02:41:52 +00:00
int status = pthread_cond_init ( & _cond [ REL_INDEX ] , _condAttr ) ;
2017-05-30 17:14:52 -04:00
assert_status ( status = = 0 , status , " cond_init rel " ) ;
2023-02-10 14:02:20 +00:00
status = pthread_cond_init ( & _cond [ ABS_INDEX ] , nullptr ) ;
2017-05-30 17:14:52 -04:00
assert_status ( status = = 0 , status , " cond_init abs " ) ;
status = pthread_mutex_init ( _mutex , _mutexAttr ) ;
assert_status ( status = = 0 , status , " mutex_init " ) ;
2021-01-21 02:41:52 +00:00
}
2022-07-02 14:45:10 +00:00
PlatformParker : : ~ PlatformParker ( ) {
2021-01-21 02:41:52 +00:00
int status = pthread_cond_destroy ( & _cond [ REL_INDEX ] ) ;
assert_status ( status = = 0 , status , " cond_destroy rel " ) ;
status = pthread_cond_destroy ( & _cond [ ABS_INDEX ] ) ;
assert_status ( status = = 0 , status , " cond_destroy abs " ) ;
status = pthread_mutex_destroy ( _mutex ) ;
assert_status ( status = = 0 , status , " mutex_destroy " ) ;
2017-05-30 17:14:52 -04:00
}
// Parker::park decrements count if > 0, else does a condvar wait. Unpark
// sets count to 1 and signals condvar. Only one thread ever waits
// on the condvar. Contention seen when trying to park implies that someone
// is unparking you, so don't wait. And spurious returns are fine, so there
// is no need to track notifications.
void Parker : : park ( bool isAbsolute , jlong time ) {
// Optional fast-path check:
// Return immediately if a permit is available.
// We depend on Atomic::xchg() having full barrier semantics
// since we are doing a lock-free update to _counter.
2019-11-25 12:32:40 +01:00
if ( Atomic : : xchg ( & _counter , 0 ) > 0 ) return ;
2017-05-30 17:14:52 -04:00
2020-09-11 01:31:32 +00:00
JavaThread * jt = JavaThread : : current ( ) ;
2017-05-30 17:14:52 -04:00
// Optional optimization -- avoid state transitions if there's
// an interrupt pending.
2019-09-17 19:09:37 -04:00
if ( jt - > is_interrupted ( false ) ) {
2017-05-30 17:14:52 -04:00
return ;
}
// Next, demultiplex/decode time arguments
struct timespec absTime ;
if ( time < 0 | | ( isAbsolute & & time = = 0 ) ) { // don't wait at all
return ;
}
if ( time > 0 ) {
2019-01-23 21:17:51 -05:00
to_abstime ( & absTime , time , isAbsolute , false ) ;
2017-05-30 17:14:52 -04:00
}
// Enter safepoint region
// Beware of deadlocks such as 6317397.
// The per-thread Parker:: mutex is a classic leaf-lock.
// In particular a thread must never block on the Threads_lock while
// holding the Parker:: mutex. If safepoints are pending both the
// the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
ThreadBlockInVM tbivm ( jt ) ;
2019-11-14 22:36:40 -05:00
// Can't access interrupt state now that we are _thread_blocked. If we've
// been interrupted since we checked above then _counter will be > 0.
2017-05-30 17:14:52 -04:00
// Don't wait if cannot get lock since interference arises from
2019-11-14 22:36:40 -05:00
// unparking.
if ( pthread_mutex_trylock ( _mutex ) ! = 0 ) {
2017-05-30 17:14:52 -04:00
return ;
}
int status ;
if ( _counter > 0 ) { // no wait needed
_counter = 0 ;
status = pthread_mutex_unlock ( _mutex ) ;
assert_status ( status = = 0 , status , " invariant " ) ;
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other and Java-level accesses.
OrderAccess : : fence ( ) ;
return ;
}
2020-09-11 01:31:32 +00:00
OSThreadWaitState osts ( jt - > osthread ( ) , false /* not Object.wait() */ ) ;
2017-05-30 17:14:52 -04:00
assert ( _cur_index = = - 1 , " invariant " ) ;
if ( time = = 0 ) {
_cur_index = REL_INDEX ; // arbitrary choice when not timed
status = pthread_cond_wait ( & _cond [ _cur_index ] , _mutex ) ;
2020-05-06 12:42:28 -05:00
assert_status ( status = = 0 MACOS_ONLY ( | | status = = ETIMEDOUT ) ,
status , " cond_wait " ) ;
2017-05-30 17:14:52 -04:00
}
else {
_cur_index = isAbsolute ? ABS_INDEX : REL_INDEX ;
status = pthread_cond_timedwait ( & _cond [ _cur_index ] , _mutex , & absTime ) ;
assert_status ( status = = 0 | | status = = ETIMEDOUT ,
status , " cond_timedwait " ) ;
}
_cur_index = - 1 ;
_counter = 0 ;
status = pthread_mutex_unlock ( _mutex ) ;
assert_status ( status = = 0 , status , " invariant " ) ;
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other and Java-level accesses.
OrderAccess : : fence ( ) ;
}
void Parker : : unpark ( ) {
int status = pthread_mutex_lock ( _mutex ) ;
assert_status ( status = = 0 , status , " invariant " ) ;
const int s = _counter ;
_counter = 1 ;
// must capture correct index before unlocking
int index = _cur_index ;
status = pthread_mutex_unlock ( _mutex ) ;
assert_status ( status = = 0 , status , " invariant " ) ;
// Note that we signal() *after* dropping the lock for "immortal" Events.
// This is safe and avoids a common class of futile wakeups. In rare
// circumstances this can cause a thread to return prematurely from
// cond_{timed}wait() but the spurious wakeup is benign and the victim
// will simply re-test the condition and re-park itself.
// This provides particular benefit if the underlying platform does not
// provide wait morphing.
if ( s < 1 & & index ! = - 1 ) {
// thread is definitely parked
status = pthread_cond_signal ( & _cond [ index ] ) ;
assert_status ( status = = 0 , status , " invariant " ) ;
}
}
2019-08-14 00:18:00 -04:00
// Platform Mutex/Monitor implementation
2019-02-05 15:12:13 -05:00
2019-08-14 00:18:00 -04:00
# if PLATFORM_MONITOR_IMPL_INDIRECT
2023-02-10 14:02:20 +00:00
PlatformMutex : : Mutex : : Mutex ( ) : _next ( nullptr ) {
2019-08-14 00:18:00 -04:00
int status = pthread_mutex_init ( & _mutex , _mutexAttr ) ;
2019-02-05 15:12:13 -05:00
assert_status ( status = = 0 , status , " mutex_init " ) ;
}
2022-07-02 14:45:10 +00:00
PlatformMutex : : Mutex : : ~ Mutex ( ) {
2019-08-14 00:18:00 -04:00
int status = pthread_mutex_destroy ( & _mutex ) ;
2019-02-05 15:12:13 -05:00
assert_status ( status = = 0 , status , " mutex_destroy " ) ;
}
2022-07-02 14:45:10 +00:00
pthread_mutex_t PlatformMutex : : _freelist_lock ;
2023-02-10 14:02:20 +00:00
PlatformMutex : : Mutex * PlatformMutex : : _mutex_freelist = nullptr ;
2019-03-19 14:32:41 -04:00
2022-07-02 14:45:10 +00:00
void PlatformMutex : : init ( ) {
2019-03-19 14:32:41 -04:00
int status = pthread_mutex_init ( & _freelist_lock , _mutexAttr ) ;
assert_status ( status = = 0 , status , " freelist lock init " ) ;
}
2022-07-02 14:45:10 +00:00
struct PlatformMutex : : WithFreeListLocked : public StackObj {
2019-03-19 14:32:41 -04:00
WithFreeListLocked ( ) {
int status = pthread_mutex_lock ( & _freelist_lock ) ;
assert_status ( status = = 0 , status , " freelist lock " ) ;
}
~ WithFreeListLocked ( ) {
int status = pthread_mutex_unlock ( & _freelist_lock ) ;
assert_status ( status = = 0 , status , " freelist unlock " ) ;
}
} ;
2022-07-02 14:45:10 +00:00
PlatformMutex : : PlatformMutex ( ) {
2019-08-14 00:18:00 -04:00
{
WithFreeListLocked wfl ;
_impl = _mutex_freelist ;
2023-02-10 14:02:20 +00:00
if ( _impl ! = nullptr ) {
2019-08-14 00:18:00 -04:00
_mutex_freelist = _impl - > _next ;
2023-02-10 14:02:20 +00:00
_impl - > _next = nullptr ;
2019-08-14 00:18:00 -04:00
return ;
}
}
_impl = new Mutex ( ) ;
}
2022-07-02 14:45:10 +00:00
PlatformMutex : : ~ PlatformMutex ( ) {
2019-08-14 00:18:00 -04:00
WithFreeListLocked wfl ;
2023-02-10 14:02:20 +00:00
assert ( _impl - > _next = = nullptr , " invariant " ) ;
2019-08-14 00:18:00 -04:00
_impl - > _next = _mutex_freelist ;
_mutex_freelist = _impl ;
}
2023-02-10 14:02:20 +00:00
PlatformMonitor : : Cond : : Cond ( ) : _next ( nullptr ) {
2019-08-14 00:18:00 -04:00
int status = pthread_cond_init ( & _cond , _condAttr ) ;
assert_status ( status = = 0 , status , " cond_init " ) ;
}
2022-07-02 14:45:10 +00:00
PlatformMonitor : : Cond : : ~ Cond ( ) {
2019-08-14 00:18:00 -04:00
int status = pthread_cond_destroy ( & _cond ) ;
assert_status ( status = = 0 , status , " cond_destroy " ) ;
}
2023-02-10 14:02:20 +00:00
PlatformMonitor : : Cond * PlatformMonitor : : _cond_freelist = nullptr ;
2019-08-14 00:18:00 -04:00
2022-07-02 14:45:10 +00:00
PlatformMonitor : : PlatformMonitor ( ) {
2019-03-19 14:32:41 -04:00
{
WithFreeListLocked wfl ;
2019-08-14 00:18:00 -04:00
_impl = _cond_freelist ;
2023-02-10 14:02:20 +00:00
if ( _impl ! = nullptr ) {
2019-08-14 00:18:00 -04:00
_cond_freelist = _impl - > _next ;
2023-02-10 14:02:20 +00:00
_impl - > _next = nullptr ;
2019-03-19 14:32:41 -04:00
return ;
}
}
2019-08-14 00:18:00 -04:00
_impl = new Cond ( ) ;
2019-03-19 14:32:41 -04:00
}
2022-07-02 14:45:10 +00:00
PlatformMonitor : : ~ PlatformMonitor ( ) {
2019-03-19 14:32:41 -04:00
WithFreeListLocked wfl ;
2023-02-10 14:02:20 +00:00
assert ( _impl - > _next = = nullptr , " invariant " ) ;
2019-08-14 00:18:00 -04:00
_impl - > _next = _cond_freelist ;
_cond_freelist = _impl ;
}
# else
2022-07-02 14:45:10 +00:00
PlatformMutex : : PlatformMutex ( ) {
2019-08-14 00:18:00 -04:00
int status = pthread_mutex_init ( & _mutex , _mutexAttr ) ;
assert_status ( status = = 0 , status , " mutex_init " ) ;
}
2022-07-02 14:45:10 +00:00
PlatformMutex : : ~ PlatformMutex ( ) {
2019-08-14 00:18:00 -04:00
int status = pthread_mutex_destroy ( & _mutex ) ;
assert_status ( status = = 0 , status , " mutex_destroy " ) ;
}
2022-07-02 14:45:10 +00:00
PlatformMonitor : : PlatformMonitor ( ) {
2019-08-14 00:18:00 -04:00
int status = pthread_cond_init ( & _cond , _condAttr ) ;
assert_status ( status = = 0 , status , " cond_init " ) ;
}
2022-07-02 14:45:10 +00:00
PlatformMonitor : : ~ PlatformMonitor ( ) {
2019-08-14 00:18:00 -04:00
int status = pthread_cond_destroy ( & _cond ) ;
assert_status ( status = = 0 , status , " cond_destroy " ) ;
2019-03-19 14:32:41 -04:00
}
# endif // PLATFORM_MONITOR_IMPL_INDIRECT
2019-02-05 15:12:13 -05:00
// Must already be locked
2023-01-10 00:45:43 +00:00
int PlatformMonitor : : wait ( uint64_t millis ) {
2019-02-05 15:12:13 -05:00
if ( millis > 0 ) {
struct timespec abst ;
// We have to watch for overflow when converting millis to nanos,
// but if millis is that large then we will end up limiting to
2023-01-10 00:45:43 +00:00
// MAX_SECS anyway, so just do that here. This also handles values
// larger than int64_t max.
2019-02-05 15:12:13 -05:00
if ( millis / MILLIUNITS > MAX_SECS ) {
2023-01-10 00:45:43 +00:00
millis = uint64_t ( MAX_SECS ) * MILLIUNITS ;
2019-02-05 15:12:13 -05:00
}
2023-01-10 00:45:43 +00:00
to_abstime ( & abst , millis_to_nanos ( int64_t ( millis ) ) , false , false ) ;
2019-02-05 15:12:13 -05:00
int ret = OS_TIMEOUT ;
2019-03-19 14:32:41 -04:00
int status = pthread_cond_timedwait ( cond ( ) , mutex ( ) , & abst ) ;
2019-02-05 15:12:13 -05:00
assert_status ( status = = 0 | | status = = ETIMEDOUT ,
status , " cond_timedwait " ) ;
if ( status = = 0 ) {
ret = OS_OK ;
}
return ret ;
} else {
2019-03-19 14:32:41 -04:00
int status = pthread_cond_wait ( cond ( ) , mutex ( ) ) ;
2020-05-06 12:42:28 -05:00
assert_status ( status = = 0 MACOS_ONLY ( | | status = = ETIMEDOUT ) ,
status , " cond_wait " ) ;
2019-02-05 15:12:13 -05:00
return OS_OK ;
}
}
2021-03-09 06:00:46 +00:00
// Darwin has no "environ" in a dynamic library.
# ifdef __APPLE__
# define environ (*_NSGetEnviron())
# else
extern char * * environ ;
# endif
char * * os : : get_environ ( ) { return environ ; }
// Run the specified command in a separate process. Return its exit value,
// or -1 on failure (e.g. can't fork a new process).
// Notes: -Unlike system(), this function can be called from signal handler. It
// doesn't block SIGINT et al.
// -this function is unsafe to use in non-error situations, mainly
// because the child process will inherit all parent descriptors.
2021-11-01 05:13:55 +00:00
int os : : fork_and_exec ( const char * cmd ) {
2023-02-10 14:02:20 +00:00
const char * argv [ 4 ] = { " sh " , " -c " , cmd , nullptr } ;
2021-11-01 05:13:55 +00:00
pid_t pid = - 1 ;
2021-03-09 06:00:46 +00:00
char * * env = os : : get_environ ( ) ;
2021-11-01 05:13:55 +00:00
// Note: cast is needed because posix_spawn() requires - for compatibility with ancient
// C-code - a non-const argv/envp pointer array. But it is fine to hand in literal
// strings and just cast the constness away. See also ProcessImpl_md.c.
2023-02-10 14:02:20 +00:00
int rc = : : posix_spawn ( & pid , " /bin/sh " , nullptr , nullptr , ( char * * ) argv , env ) ;
2021-11-01 05:13:55 +00:00
if ( rc = = 0 ) {
2021-03-09 06:00:46 +00:00
int status ;
// Wait for the child process to exit. This returns immediately if
// the child has already exited. */
while ( : : waitpid ( pid , & status , 0 ) < 0 ) {
switch ( errno ) {
case ECHILD : return 0 ;
case EINTR : break ;
default : return - 1 ;
}
}
if ( WIFEXITED ( status ) ) {
// The child exited normally; get its exit code.
return WEXITSTATUS ( status ) ;
} else if ( WIFSIGNALED ( status ) ) {
// The child exited because of a signal
// The best value to return is 0x80 + signal number,
// because that is what all Unix shells do, and because
// it allows callers to distinguish between process exit and
// process death by signal.
return 0x80 + WTERMSIG ( status ) ;
} else {
// Unknown exit code; pass it through
return status ;
}
2021-11-01 05:13:55 +00:00
} else {
// Don't log, we are inside error handling
return - 1 ;
2021-03-09 06:00:46 +00:00
}
}
2021-03-15 02:03:22 +00:00
2022-07-12 12:10:28 +00:00
bool os : : message_box ( const char * title , const char * message ) {
int i ;
fdStream err ( defaultStream : : error_fd ( ) ) ;
for ( i = 0 ; i < 78 ; i + + ) err . print_raw ( " = " ) ;
err . cr ( ) ;
err . print_raw_cr ( title ) ;
for ( i = 0 ; i < 78 ; i + + ) err . print_raw ( " - " ) ;
err . cr ( ) ;
err . print_raw_cr ( message ) ;
for ( i = 0 ; i < 78 ; i + + ) err . print_raw ( " = " ) ;
err . cr ( ) ;
char buf [ 16 ] ;
// Prevent process from exiting upon "read error" without consuming all CPU
while ( : : read ( 0 , buf , sizeof ( buf ) ) < = 0 ) { : : sleep ( 100 ) ; }
return buf [ 0 ] = = ' y ' | | buf [ 0 ] = = ' Y ' ;
}
2021-03-15 02:03:22 +00:00
////////////////////////////////////////////////////////////////////////////////
// runtime exit support
// Note: os::shutdown() might be called very early during initialization, or
// called from signal handler. Before adding something to os::shutdown(), make
// sure it is async-safe and can handle partially initialized VM.
void os : : shutdown ( ) {
// allow PerfMemory to attempt cleanup of any persistent resources
perfMemory_exit ( ) ;
// needs to remove object in file system
AttachListener : : abort ( ) ;
// flush buffered output, finish log files
ostream_abort ( ) ;
// Check for abort hook
abort_hook_t abort_hook = Arguments : : abort_hook ( ) ;
2023-02-10 14:02:20 +00:00
if ( abort_hook ! = nullptr ) {
2021-03-15 02:03:22 +00:00
abort_hook ( ) ;
}
}
// Note: os::abort() might be called very early during initialization, or
// called from signal handler. Before adding something to os::abort(), make
// sure it is async-safe and can handle partially initialized VM.
2021-03-15 23:11:13 +00:00
// Also note we can abort while other threads continue to run, so we can
// easily trigger secondary faults in those threads. To reduce the likelihood
// of that we use _exit rather than exit, so that no atexit hooks get run.
// But note that os::shutdown() could also trigger secondary faults.
2024-11-26 10:50:20 +00:00
void os : : abort ( bool dump_core , const void * siginfo , const void * context ) {
2021-03-15 02:03:22 +00:00
os : : shutdown ( ) ;
if ( dump_core ) {
LINUX_ONLY ( if ( DumpPrivateMappingsInCore ) ClassLoader : : close_jrt_image ( ) ; )
: : abort ( ) ; // dump core
}
2022-06-08 19:16:46 +00:00
os : : _exit ( 1 ) ;
2021-03-15 02:03:22 +00:00
}
// Die immediately, no exit hook, no abort hook, no cleanup.
// Dump a core file, if possible, for debugging.
void os : : die ( ) {
if ( TestUnresponsiveErrorHandler & & ! CreateCoredumpOnCrash ) {
// For TimeoutInErrorHandlingTest.java, we just kill the VM
// and don't take the time to generate a core file.
2022-10-31 05:55:54 +00:00
: : raise ( SIGKILL ) ;
2023-03-01 10:19:28 +00:00
// ::raise is not noreturn, even though with SIGKILL it definitely won't
// return. Hence "fall through" to ::abort, which is declared noreturn.
2021-03-15 02:03:22 +00:00
}
2023-03-01 10:19:28 +00:00
: : abort ( ) ;
2021-03-15 02:03:22 +00:00
}
2022-08-04 01:20:29 +00:00
const char * os : : file_separator ( ) { return " / " ; }
const char * os : : line_separator ( ) { return " \n " ; }
const char * os : : path_separator ( ) { return " : " ; }
2024-02-19 19:33:07 +00:00
// Map file into memory; uses mmap().
// Notes:
// - if caller specifies addr, MAP_FIXED is used. That means existing
// mappings will be replaced.
// - The file descriptor must be valid (to create anonymous mappings, use
// os::reserve_memory()).
// Returns address to mapped memory, nullptr on error
char * os : : pd_map_memory ( int fd , const char * unused ,
size_t file_offset , char * addr , size_t bytes ,
bool read_only , bool allow_exec ) {
assert ( fd ! = - 1 , " Specify a valid file descriptor " ) ;
int prot ;
int flags = MAP_PRIVATE ;
if ( read_only ) {
prot = PROT_READ ;
} else {
prot = PROT_READ | PROT_WRITE ;
}
if ( allow_exec ) {
prot | = PROT_EXEC ;
}
if ( addr ! = nullptr ) {
flags | = MAP_FIXED ;
}
char * mapped_address = ( char * ) mmap ( addr , ( size_t ) bytes , prot , flags ,
fd , file_offset ) ;
if ( mapped_address = = MAP_FAILED ) {
return nullptr ;
}
// If we did specify an address, and the mapping succeeded, it should
// have returned that address since we specify MAP_FIXED
assert ( addr = = nullptr | | addr = = mapped_address ,
" mmap+MAP_FIXED returned " PTR_FORMAT " , expected " PTR_FORMAT ,
p2i ( mapped_address ) , p2i ( addr ) ) ;
return mapped_address ;
}
// Unmap a block of memory. Uses munmap.
bool os : : pd_unmap_memory ( char * addr , size_t bytes ) {
return munmap ( addr , bytes ) = = 0 ;
}
2024-11-26 10:50:20 +00:00
# ifdef CAN_SHOW_REGISTERS_ON_ASSERT
static ucontext_t _saved_assert_context ;
static bool _has_saved_context = false ;
# endif // CAN_SHOW_REGISTERS_ON_ASSERT
void os : : save_assert_context ( const void * ucVoid ) {
# ifdef CAN_SHOW_REGISTERS_ON_ASSERT
assert ( ucVoid ! = nullptr , " invariant " ) ;
assert ( ! _has_saved_context , " invariant " ) ;
memcpy ( & _saved_assert_context , ucVoid , sizeof ( ucontext_t ) ) ;
// on Linux ppc64, ucontext_t contains pointers into itself which have to be patched up
// after copying the context (see comment in sys/ucontext.h):
# if defined(PPC64)
* ( ( void * * ) & _saved_assert_context . uc_mcontext . regs ) = & ( _saved_assert_context . uc_mcontext . gp_regs ) ;
# elif defined(AMD64)
// In the copied version, fpregs should point to the copied contents.
// Sanity check: fpregs should point into the context.
if ( ( address ) ( ( const ucontext_t * ) ucVoid ) - > uc_mcontext . fpregs > ( address ) ucVoid ) {
size_t fpregs_offset = pointer_delta ( ( ( const ucontext_t * ) ucVoid ) - > uc_mcontext . fpregs , ucVoid , 1 ) ;
if ( fpregs_offset < sizeof ( ucontext_t ) ) {
// Preserve the offset.
* ( ( void * * ) & _saved_assert_context . uc_mcontext . fpregs ) = ( void * ) ( ( address ) ( void * ) & _saved_assert_context + fpregs_offset ) ;
}
}
# endif
_has_saved_context = true ;
# endif // CAN_SHOW_REGISTERS_ON_ASSERT
}
const void * os : : get_saved_assert_context ( const void * * sigInfo ) {
# ifdef CAN_SHOW_REGISTERS_ON_ASSERT
assert ( sigInfo ! = nullptr , " invariant " ) ;
* sigInfo = nullptr ;
return _has_saved_context ? & _saved_assert_context : nullptr ;
# endif
* sigInfo = nullptr ;
return nullptr ;
}