Merge branch '11.4' into 11.8
This commit is contained in:
commit
8d36cafe4f
@ -373,6 +373,7 @@ IF(WIN32)
|
||||
ELSE()
|
||||
SET(DEFAULT_MYSQL_HOME ${CMAKE_INSTALL_PREFIX})
|
||||
SET(SHAREDIR ${INSTALL_MYSQLSHAREDIRABS})
|
||||
SET(HOSTNAME "uname -n" CACHE STRING "Command for determining hostname")
|
||||
ENDIF()
|
||||
|
||||
SET(DEFAULT_BASEDIR "${DEFAULT_MYSQL_HOME}")
|
||||
|
@ -180,7 +180,9 @@ extern my_bool my_use_large_pages;
|
||||
|
||||
int my_init_large_pages(void);
|
||||
uchar *my_large_malloc(size_t *size, myf my_flags);
|
||||
#if defined _WIN32 || defined HAVE_MMAP
|
||||
#ifdef _WIN32
|
||||
/* On Windows, use my_virtual_mem_reserve() and my_virtual_mem_commit(). */
|
||||
#else
|
||||
char *my_large_virtual_alloc(size_t *size);
|
||||
#endif
|
||||
void my_large_free(void *ptr, size_t size);
|
||||
|
@ -24,7 +24,9 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
# ifdef _WIN32
|
||||
char *my_virtual_mem_reserve(size_t *size);
|
||||
# endif
|
||||
char *my_virtual_mem_commit(char *ptr, size_t size);
|
||||
void my_virtual_mem_decommit(char *ptr, size_t size);
|
||||
void my_virtual_mem_release(char *ptr, size_t size);
|
||||
|
@ -1285,6 +1285,7 @@ explain select * from t1,t2 where t1.a=t2.b+2 and t2.a= t1.b {
|
||||
],
|
||||
"chosen_access_method": {
|
||||
"type": "ref",
|
||||
"index": "a",
|
||||
"rows_read": 1,
|
||||
"rows_out": 1,
|
||||
"cost": 0.1821659,
|
||||
@ -1340,6 +1341,7 @@ explain select * from t1,t2 where t1.a=t2.b+2 and t2.a= t1.b {
|
||||
],
|
||||
"chosen_access_method": {
|
||||
"type": "ref",
|
||||
"index": "a",
|
||||
"rows_read": 1,
|
||||
"rows_out": 1,
|
||||
"cost": 0.1821659,
|
||||
@ -2533,6 +2535,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
|
||||
],
|
||||
"chosen_access_method": {
|
||||
"type": "ref",
|
||||
"index": "a_b",
|
||||
"rows_read": 41,
|
||||
"rows_out": 41,
|
||||
"cost": 0.051379171,
|
||||
@ -3012,6 +3015,7 @@ explain select * from t1 left join t2 on t2.a=t1.a {
|
||||
],
|
||||
"chosen_access_method": {
|
||||
"type": "eq_ref",
|
||||
"index": "PRIMARY",
|
||||
"rows_read": 1,
|
||||
"rows_out": 1,
|
||||
"cost": 0.007120904,
|
||||
@ -3980,6 +3984,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
|
||||
],
|
||||
"chosen_access_method": {
|
||||
"type": "ref",
|
||||
"index": "pk_a_b",
|
||||
"rows_read": 1,
|
||||
"rows_out": 1,
|
||||
"cost": 0.000928812,
|
||||
@ -4676,6 +4681,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
|
||||
],
|
||||
"chosen_access_method": {
|
||||
"type": "range",
|
||||
"index": "a",
|
||||
"rows_read": 3,
|
||||
"rows_out": 3,
|
||||
"cost": 0.001755494,
|
||||
@ -4702,6 +4708,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
|
||||
],
|
||||
"chosen_access_method": {
|
||||
"type": "range",
|
||||
"index": "a",
|
||||
"rows_read": 3,
|
||||
"rows_out": 3,
|
||||
"cost": 0.001755494,
|
||||
@ -4744,6 +4751,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
|
||||
],
|
||||
"chosen_access_method": {
|
||||
"type": "ref",
|
||||
"index": "a",
|
||||
"rows_read": 1,
|
||||
"rows_out": 1,
|
||||
"cost": 0.002376836,
|
||||
@ -4795,6 +4803,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
|
||||
],
|
||||
"chosen_access_method": {
|
||||
"type": "ref",
|
||||
"index": "a",
|
||||
"rows_read": 1.166666667,
|
||||
"rows_out": 1.166666667,
|
||||
"cost": 0.002392836,
|
||||
@ -11275,6 +11284,7 @@ JS
|
||||
"chosen_access_method":
|
||||
{
|
||||
"type": "ref",
|
||||
"index": "b",
|
||||
"rows_read": 1,
|
||||
"rows_out": 1,
|
||||
"cost": 0.01901531,
|
||||
@ -11521,6 +11531,7 @@ JS
|
||||
"chosen_access_method":
|
||||
{
|
||||
"type": "ref",
|
||||
"index": "a",
|
||||
"rows_read": 1,
|
||||
"rows_out": 1,
|
||||
"cost": 0.01840091,
|
||||
@ -12906,6 +12917,7 @@ json_detailed(json_extract(trace, '$**.choose_best_splitting'))
|
||||
"chosen_access_method":
|
||||
{
|
||||
"type": "ref",
|
||||
"index": "idx_a",
|
||||
"rows_read": 1.8367,
|
||||
"rows_out": 1.8367,
|
||||
"cost": 0.002051185,
|
||||
@ -13201,6 +13213,7 @@ explain select * from t1 where a<10 and b between 10 and 50 and c < 10 {
|
||||
],
|
||||
"chosen_access_method": {
|
||||
"type": "range",
|
||||
"index": "a",
|
||||
"rows_read": 0.189,
|
||||
"rows_out": 0.017766,
|
||||
"cost": 0.006364199,
|
||||
|
@ -227,6 +227,7 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
|
||||
],
|
||||
"chosen_access_method": {
|
||||
"type": "ref",
|
||||
"index": "key1",
|
||||
"rows_read": 1,
|
||||
"rows_out": 1,
|
||||
"cost": 0.00345856,
|
||||
|
@ -186,6 +186,7 @@ JS
|
||||
"chosen_access_method":
|
||||
{
|
||||
"type": "ref",
|
||||
"index": "a",
|
||||
"rows_read": 6,
|
||||
"rows_out": 0.6,
|
||||
"cost": 0.005388489,
|
||||
|
@ -298,6 +298,7 @@ JS
|
||||
"chosen_access_method":
|
||||
{
|
||||
"type": "range",
|
||||
"index": "PRIMARY",
|
||||
"rows_read": 5,
|
||||
"rows_out": 2.490196078,
|
||||
"cost": 0.00948507,
|
||||
|
@ -293,6 +293,7 @@ JS
|
||||
"chosen_access_method":
|
||||
{
|
||||
"type": "range",
|
||||
"index": "PRIMARY",
|
||||
"rows_read": 5,
|
||||
"rows_out": 2.490196078,
|
||||
"cost": 0.010014472,
|
||||
|
@ -12,12 +12,19 @@ select @@innodb_buffer_pool_size;
|
||||
10485760
|
||||
create table t1 (id int primary key, val int not null)
|
||||
ENGINE=InnoDB ROW_FORMAT=COMPRESSED;
|
||||
create table t2 (id int primary key, val int not null)
|
||||
ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=$kbs;
|
||||
SET STATEMENT foreign_key_checks=0, unique_checks=0 FOR
|
||||
INSERT INTO t1 SELECT seq*4,seq*4 FROM seq_1_to_262144;
|
||||
SET STATEMENT foreign_key_checks=0, unique_checks=0 FOR
|
||||
INSERT INTO t2 SELECT seq*4,seq*4 FROM seq_1_to_16384;
|
||||
set global innodb_buffer_pool_size = 7340032;
|
||||
select count(val) from t1;
|
||||
count(val)
|
||||
262144
|
||||
select count(val) from t2;
|
||||
count(val)
|
||||
16384
|
||||
set global innodb_adaptive_hash_index=OFF;
|
||||
set global innodb_buffer_pool_size = 24117248;
|
||||
set global innodb_buffer_pool_size = 26214400;
|
||||
@ -29,7 +36,10 @@ select @@innodb_buffer_pool_size;
|
||||
select count(val) from t1;
|
||||
count(val)
|
||||
262144
|
||||
drop table t1;
|
||||
select count(val) from t2;
|
||||
count(val)
|
||||
16384
|
||||
drop table t1,t2;
|
||||
SET GLOBAL innodb_max_purge_lag_wait = 0;
|
||||
SET @save_pct= @@GLOBAL.innodb_max_dirty_pages_pct;
|
||||
SET @save_pct_lwm= @@GLOBAL.innodb_max_dirty_pages_pct_lwm;
|
||||
|
@ -21,6 +21,7 @@ set global innodb_buffer_pool_size = 9437184;
|
||||
set global innodb_buffer_pool_size = 10485760;
|
||||
|
||||
select @@innodb_buffer_pool_size;
|
||||
let $kbs=`SELECT CAST(@@innodb_page_size / 1024 AS INT)`;
|
||||
|
||||
# fill buffer pool
|
||||
--disable_query_log
|
||||
@ -29,9 +30,13 @@ SET GLOBAL innodb_read_only_compressed=OFF;
|
||||
--enable_query_log
|
||||
create table t1 (id int primary key, val int not null)
|
||||
ENGINE=InnoDB ROW_FORMAT=COMPRESSED;
|
||||
evalp create table t2 (id int primary key, val int not null)
|
||||
ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=$kbs;
|
||||
|
||||
SET STATEMENT foreign_key_checks=0, unique_checks=0 FOR
|
||||
INSERT INTO t1 SELECT seq*4,seq*4 FROM seq_1_to_262144;
|
||||
SET STATEMENT foreign_key_checks=0, unique_checks=0 FOR
|
||||
INSERT INTO t2 SELECT seq*4,seq*4 FROM seq_1_to_16384;
|
||||
|
||||
--disable_query_log
|
||||
SET GLOBAL innodb_read_only_compressed=@save_innodb_read_only_compressed;
|
||||
@ -42,6 +47,7 @@ SET GLOBAL innodb_read_only_compressed=@save_innodb_read_only_compressed;
|
||||
set global innodb_buffer_pool_size = 7340032;
|
||||
|
||||
select count(val) from t1;
|
||||
select count(val) from t2;
|
||||
|
||||
set global innodb_adaptive_hash_index=OFF;
|
||||
|
||||
@ -52,8 +58,9 @@ set global innodb_buffer_pool_size = 26214400;
|
||||
select @@innodb_buffer_pool_size;
|
||||
|
||||
select count(val) from t1;
|
||||
select count(val) from t2;
|
||||
|
||||
drop table t1;
|
||||
drop table t1,t2;
|
||||
|
||||
SET GLOBAL innodb_max_purge_lag_wait = 0;
|
||||
SET @save_pct= @@GLOBAL.innodb_max_dirty_pages_pct;
|
||||
@ -66,8 +73,6 @@ SELECT variable_value = 0
|
||||
FROM information_schema.global_status
|
||||
WHERE variable_name = 'INNODB_BUFFER_POOL_PAGES_DIRTY';
|
||||
--source include/wait_condition.inc
|
||||
# this may occasionally be aborted on a heavily loaded builder
|
||||
--error 0,ER_WRONG_USAGE
|
||||
SET GLOBAL innodb_buffer_pool_size = @old_innodb_buffer_pool_size;
|
||||
SET GLOBAL innodb_adaptive_hash_index = @old_innodb_adaptive_hash_index;
|
||||
SET GLOBAL innodb_max_dirty_pages_pct = @save_pct;
|
||||
|
@ -49,8 +49,8 @@ SET @@GLOBAL.rpl_semi_sync_slave_enabled= 1;
|
||||
|
||||
--connection master
|
||||
--echo # Verify Semi-Sync is active
|
||||
--let $status_var= Rpl_semi_sync_master_status
|
||||
--let $status_var_value= ON
|
||||
--let $status_var= Rpl_semi_sync_master_clients
|
||||
--let $status_var_value= 1
|
||||
--source include/wait_for_status_var.inc
|
||||
SHOW STATUS LIKE 'Rpl_semi_sync_master_clients';
|
||||
|
||||
@ -67,7 +67,7 @@ STOP SLAVE;
|
||||
--echo # MDEV-36663: Verifying dump thread connection is killed..
|
||||
# Prior to MDEV-36663 fixes, this would time out and
|
||||
# Rpl_semi_sync_master_clients would remain 1.
|
||||
--let $wait_condition= SELECT COUNT(*)=0 FROM information_schema.PROCESSLIST WHERE COMMAND = 'Binlog Dump'
|
||||
--let $wait_condition= SELECT COUNT(*)=0 FROM information_schema.PROCESSLIST WHERE USER = 'replssl'
|
||||
--source include/wait_condition.inc
|
||||
|
||||
--let $n_master_clients= query_get_value(SHOW STATUS LIKE 'Rpl_semi_sync_master_clients', Value, 1)
|
||||
|
@ -423,7 +423,7 @@ uchar *my_large_malloc(size_t *size, myf my_flags)
|
||||
DBUG_RETURN(ptr);
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
#ifndef _WIN32
|
||||
/**
|
||||
Special large pages allocator, with possibility to commit to allocating
|
||||
more memory later.
|
||||
@ -434,37 +434,10 @@ char *my_large_virtual_alloc(size_t *size)
|
||||
char *ptr;
|
||||
DBUG_ENTER("my_large_virtual_alloc");
|
||||
|
||||
if (my_use_large_pages)
|
||||
{
|
||||
size_t s= *size;
|
||||
s= MY_ALIGN(s, (size_t) my_large_page_size);
|
||||
ptr= VirtualAlloc(NULL, s, MEM_COMMIT | MEM_RESERVE | MEM_LARGE_PAGES,
|
||||
PAGE_READWRITE);
|
||||
if (ptr)
|
||||
{
|
||||
*size= s;
|
||||
DBUG_RETURN(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
DBUG_RETURN(VirtualAlloc(NULL, *size, MEM_RESERVE, PAGE_READWRITE));
|
||||
}
|
||||
#elif defined HAVE_MMAP
|
||||
/**
|
||||
Special large pages allocator, with possibility to commit to allocating
|
||||
more memory later.
|
||||
Every implementation returns a zero filled buffer here.
|
||||
*/
|
||||
char *my_large_mmap(size_t *size, int prot)
|
||||
{
|
||||
char *ptr;
|
||||
DBUG_ENTER("my_large_virtual_alloc");
|
||||
|
||||
if (my_use_large_pages)
|
||||
{
|
||||
size_t large_page_size;
|
||||
int page_i= 0;
|
||||
prot= PROT_READ | PROT_WRITE;
|
||||
|
||||
while ((large_page_size= my_next_large_page_size(*size, &page_i)) != 0)
|
||||
{
|
||||
@ -488,7 +461,7 @@ char *my_large_mmap(size_t *size, int prot)
|
||||
OS_MAP_ANON;
|
||||
|
||||
size_t aligned_size= MY_ALIGN(*size, (size_t) large_page_size);
|
||||
ptr= mmap(NULL, aligned_size, prot, mapflag, -1, 0);
|
||||
ptr= mmap(NULL, aligned_size, PROT_READ | PROT_WRITE, mapflag, -1, 0);
|
||||
if (ptr == (void*) -1)
|
||||
{
|
||||
ptr= NULL;
|
||||
@ -511,10 +484,7 @@ char *my_large_mmap(size_t *size, int prot)
|
||||
}
|
||||
}
|
||||
|
||||
ptr= mmap(NULL, *size, prot,
|
||||
# ifdef MAP_NORESERVE
|
||||
MAP_NORESERVE |
|
||||
# endif
|
||||
ptr= mmap(NULL, *size, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | OS_MAP_ANON, -1, 0);
|
||||
if (ptr == MAP_FAILED)
|
||||
{
|
||||
@ -524,16 +494,6 @@ char *my_large_mmap(size_t *size, int prot)
|
||||
|
||||
DBUG_RETURN(ptr);
|
||||
}
|
||||
|
||||
/**
|
||||
Special large pages allocator, with possibility to commit to allocating
|
||||
more memory later.
|
||||
Every implementation returns a zero filled buffer here.
|
||||
*/
|
||||
char *my_large_virtual_alloc(size_t *size)
|
||||
{
|
||||
return my_large_mmap(size, PROT_READ | PROT_WRITE);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
@ -34,13 +34,9 @@
|
||||
|
||||
We try to respect use_large_pages setting, on Windows and Linux
|
||||
*/
|
||||
#ifndef _WIN32
|
||||
char *my_large_mmap(size_t *size, int prot);
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
char *my_virtual_mem_reserve(size_t *size)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
DWORD flags= my_use_large_pages
|
||||
? MEM_LARGE_PAGES | MEM_RESERVE | MEM_COMMIT
|
||||
: MEM_RESERVE;
|
||||
@ -53,10 +49,8 @@ char *my_virtual_mem_reserve(size_t *size)
|
||||
my_error(EE_OUTOFMEMORY, MYF(ME_BELL + ME_ERROR_LOG), *size);
|
||||
}
|
||||
return ptr;
|
||||
#else
|
||||
return my_large_mmap(size, PROT_NONE);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined _WIN32 && !defined DBUG_OFF
|
||||
static my_bool is_memory_committed(char *ptr, size_t size)
|
||||
@ -88,7 +82,7 @@ char *my_virtual_mem_commit(char *ptr, size_t size)
|
||||
}
|
||||
#else
|
||||
if (my_use_large_pages)
|
||||
/* my_large_mmap() already created a read/write mapping. */;
|
||||
/* my_large_virtual_alloc() already created a read/write mapping. */;
|
||||
else
|
||||
{
|
||||
# ifdef _AIX
|
||||
|
@ -177,7 +177,6 @@ ELSE()
|
||||
SET(CHECK_PID "kill -s SIGCONT $PID > /dev/null 2> /dev/null")
|
||||
ENDIF()
|
||||
|
||||
SET(HOSTNAME "uname -n")
|
||||
SET(MYSQLD_USER "mysql")
|
||||
SET(MYSQLD_GROUP "mysql")
|
||||
ENDIF(UNIX)
|
||||
|
@ -711,12 +711,25 @@ void print_best_access_for_table(THD *thd, POSITION *pos)
|
||||
DBUG_ASSERT(thd->trace_started());
|
||||
|
||||
Json_writer_object obj(thd, "chosen_access_method");
|
||||
obj.
|
||||
add("type", pos->type == JT_ALL ? "scan" : join_type_str[pos->type]).
|
||||
add("rows_read", pos->records_read).
|
||||
add("rows_out", pos->records_out).
|
||||
add("cost", pos->read_time).
|
||||
add("uses_join_buffering", pos->use_join_buffer);
|
||||
|
||||
obj.add("type", pos->type == JT_ALL ? "scan" : join_type_str[pos->type]);
|
||||
|
||||
if (pos->type == JT_EQ_REF || pos->type == JT_REF || pos->type == JT_FT)
|
||||
{
|
||||
obj.add("index", pos->key->table->key_info[pos->key->key].name);
|
||||
}
|
||||
|
||||
if (pos->type == JT_RANGE)
|
||||
{
|
||||
obj.add("index",
|
||||
pos->table->table->key_info[pos->table->quick->index].name);
|
||||
}
|
||||
|
||||
obj.add("rows_read", pos->records_read)
|
||||
.add("rows_out", pos->records_out)
|
||||
.add("cost", pos->read_time)
|
||||
.add("uses_join_buffering", pos->use_join_buffer);
|
||||
|
||||
if (pos->range_rowid_filter_info)
|
||||
{
|
||||
uint key_no= pos->range_rowid_filter_info->get_key_no();
|
||||
|
@ -408,7 +408,7 @@ ATTRIBUTE_COLD void btr_sea::enable(bool resize) noexcept
|
||||
if (!parts[0].table.array)
|
||||
{
|
||||
enabled= true;
|
||||
alloc(buf_pool.curr_size() / sizeof(void *) / 64);
|
||||
alloc(buf_pool.curr_pool_size() / sizeof(void *) / 64);
|
||||
}
|
||||
|
||||
ut_ad(enabled);
|
||||
|
@ -637,7 +637,7 @@ func_exit:
|
||||
buf_buddy_add_to_free(reinterpret_cast<buf_buddy_free_t*>(buf), i);
|
||||
}
|
||||
|
||||
/** Reallocate a ROW_FORMAT=COMPRESSED page frame during buf_pool_t::resize().
|
||||
/** Reallocate a ROW_FORMAT=COMPRESSED page frame during buf_pool_t::shrink().
|
||||
@param bpage page descriptor covering a ROW_FORMAT=COMPRESSED page
|
||||
@param block uncompressed block for storage
|
||||
@return block
|
||||
@ -672,10 +672,9 @@ buf_block_t *buf_buddy_shrink(buf_page_t *bpage, buf_block_t *block) noexcept
|
||||
bpage->zip.data= static_cast<page_zip_t*>(dst);
|
||||
buf_pool.buddy_stat[i].relocated++;
|
||||
|
||||
for (;;)
|
||||
while (i < BUF_BUDDY_SIZES)
|
||||
{
|
||||
MEM_UNDEFINED(src, BUF_BUDDY_LOW << i);
|
||||
ut_ad(i < BUF_BUDDY_SIZES);
|
||||
/* Try to combine adjacent blocks. */
|
||||
buf_buddy_free_t *buddy= reinterpret_cast<buf_buddy_free_t*>
|
||||
(buf_buddy_get(static_cast<byte*>(src), BUF_BUDDY_LOW << i));
|
||||
@ -684,20 +683,16 @@ buf_block_t *buf_buddy_shrink(buf_page_t *bpage, buf_block_t *block) noexcept
|
||||
{
|
||||
ut_ad(!buf_pool.contains_zip(src, BUF_BUDDY_LOW_SHIFT + i));
|
||||
buf_buddy_add_to_free(static_cast<buf_buddy_free_t*>(src), i);
|
||||
break;
|
||||
return block;
|
||||
}
|
||||
|
||||
/* The buddy is free: recombine */
|
||||
buf_buddy_remove_from_free(buddy, i);
|
||||
i++;
|
||||
src= ut_align_down(src, BUF_BUDDY_LOW << i);
|
||||
if (i == BUF_BUDDY_SIZES)
|
||||
{
|
||||
buf_buddy_block_free(src);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
buf_buddy_block_free(src);
|
||||
return block;
|
||||
}
|
||||
|
||||
|
@ -1354,7 +1354,11 @@ bool buf_pool_t::create() noexcept
|
||||
retry:
|
||||
{
|
||||
NUMA_MEMPOLICY_INTERLEAVE_IN_SCOPE;
|
||||
#ifdef _WIN32
|
||||
memory_unaligned= my_virtual_mem_reserve(&size);
|
||||
#else
|
||||
memory_unaligned= my_large_virtual_alloc(&size);
|
||||
#endif
|
||||
}
|
||||
|
||||
if (!memory_unaligned)
|
||||
@ -1388,6 +1392,7 @@ bool buf_pool_t::create() noexcept
|
||||
#ifdef UNIV_PFS_MEMORY
|
||||
PSI_MEMORY_CALL(memory_alloc)(mem_key_buf_buf_pool, actual_size, &owner);
|
||||
#endif
|
||||
#ifdef _WIN32
|
||||
if (!my_virtual_mem_commit(memory, actual_size))
|
||||
{
|
||||
my_virtual_mem_release(memory_unaligned, size_unaligned);
|
||||
@ -1395,6 +1400,9 @@ bool buf_pool_t::create() noexcept
|
||||
memory_unaligned= nullptr;
|
||||
goto oom;
|
||||
}
|
||||
#else
|
||||
update_malloc_size(actual_size, 0);
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_LIBNUMA
|
||||
if (srv_numa_interleave)
|
||||
@ -1804,6 +1812,9 @@ ATTRIBUTE_COLD buf_pool_t::shrink_status buf_pool_t::shrink(size_t size)
|
||||
goto next;
|
||||
}
|
||||
|
||||
if (UT_LIST_GET_LEN(free) + UT_LIST_GET_LEN(LRU) < usable_size() / 20)
|
||||
return SHRINK_ABORT;
|
||||
|
||||
mysql_mutex_lock(&flush_list_mutex);
|
||||
|
||||
if (LRU_warned && !UT_LIST_GET_FIRST(free))
|
||||
|
@ -276,7 +276,10 @@ static void buf_LRU_check_size_of_non_data_objects() noexcept
|
||||
|
||||
auto s= UT_LIST_GET_LEN(buf_pool.free) + UT_LIST_GET_LEN(buf_pool.LRU);
|
||||
|
||||
if (s < curr_size / 20)
|
||||
if (s >= curr_size / 20);
|
||||
else if (buf_pool.is_shrinking())
|
||||
buf_pool.LRU_warn();
|
||||
else
|
||||
{
|
||||
sql_print_error("[FATAL] InnoDB: Over 95 percent of the buffer pool is"
|
||||
" occupied by lock heaps"
|
||||
|
@ -927,7 +927,7 @@ void dict_sys_t::create() noexcept
|
||||
UT_LIST_INIT(table_LRU, &dict_table_t::table_LRU);
|
||||
UT_LIST_INIT(table_non_LRU, &dict_table_t::table_LRU);
|
||||
|
||||
const ulint hash_size = buf_pool.curr_size()
|
||||
const ulint hash_size = buf_pool.curr_pool_size()
|
||||
/ (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE);
|
||||
|
||||
table_hash.create(hash_size);
|
||||
@ -4334,7 +4334,7 @@ void dict_sys_t::resize() noexcept
|
||||
table_id_hash.free();
|
||||
temp_id_hash.free();
|
||||
|
||||
const ulint hash_size = buf_pool.curr_size()
|
||||
const ulint hash_size = buf_pool.curr_pool_size()
|
||||
/ (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE);
|
||||
table_hash.create(hash_size);
|
||||
table_id_hash.create(hash_size);
|
||||
|
@ -4082,7 +4082,6 @@ skip_buffering_tweak:
|
||||
srv_use_native_aio= FALSE;
|
||||
#endif
|
||||
|
||||
srv_lock_table_size= 5 * buf_pool.curr_size();
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ inline void buf_buddy_free(void* buf, ulint size) noexcept
|
||||
}
|
||||
|
||||
ATTRIBUTE_COLD MY_ATTRIBUTE((nonnull, warn_unused_result))
|
||||
/** Reallocate a ROW_FORMAT=COMPRESSED page frame during buf_pool_t::resize().
|
||||
/** Reallocate a ROW_FORMAT=COMPRESSED page frame during buf_pool_t::shrink().
|
||||
@param bpage page descriptor covering a ROW_FORMAT=COMPRESSED page
|
||||
@param block uncompressed block for storage
|
||||
@return block
|
||||
|
@ -917,17 +917,16 @@ ATTRIBUTE_COLD void log_t::append_prepare_wait(bool late, bool ex) noexcept
|
||||
{
|
||||
got_ex:
|
||||
const uint64_t l= write_lsn_offset.load(std::memory_order_relaxed);
|
||||
const lsn_t lsn{base_lsn.load(std::memory_order_relaxed)};
|
||||
ut_d(lsn_t ll= lsn + (l & (WRITE_BACKOFF - 1)));
|
||||
ut_ad(is_mmap()
|
||||
? ll - get_flushed_lsn(std::memory_order_relaxed) < capacity()
|
||||
: ll - write_lsn - ((write_size - 1) & (write_lsn - first_lsn)) <
|
||||
buf_size);
|
||||
const lsn_t lsn= base_lsn.load(std::memory_order_relaxed) +
|
||||
(l & (WRITE_BACKOFF - 1));
|
||||
waits++;
|
||||
#ifdef HAVE_PMEM
|
||||
const bool is_pmem{is_mmap()};
|
||||
if (is_pmem)
|
||||
persist(lsn + (l & (WRITE_BACKOFF - 1)));
|
||||
{
|
||||
ut_ad(lsn - get_flushed_lsn(std::memory_order_relaxed) < capacity());
|
||||
persist(lsn);
|
||||
}
|
||||
#endif
|
||||
latch.wr_unlock();
|
||||
/* write_buf() or persist() will clear the WRITE_BACKOFF flag,
|
||||
@ -935,7 +934,7 @@ ATTRIBUTE_COLD void log_t::append_prepare_wait(bool late, bool ex) noexcept
|
||||
#ifdef HAVE_PMEM
|
||||
if (!is_pmem)
|
||||
#endif
|
||||
log_write_up_to(lsn + (l & (WRITE_BACKOFF - 1)), false);
|
||||
log_write_up_to(lsn, false);
|
||||
if (ex)
|
||||
{
|
||||
latch.wr_lock(SRW_LOCK_CALL);
|
||||
|
@ -1392,7 +1392,7 @@ dberr_t srv_start(bool create_new_db)
|
||||
|
||||
log_sys.create();
|
||||
recv_sys.create();
|
||||
lock_sys.create(srv_lock_table_size);
|
||||
lock_sys.create(srv_lock_table_size = 5 * buf_pool.curr_size());
|
||||
|
||||
srv_startup_is_before_trx_rollback_phase = true;
|
||||
|
||||
|
@ -48,6 +48,8 @@ ut_find_prime(
|
||||
ulint pow2;
|
||||
ulint i;
|
||||
|
||||
ut_ad(n);
|
||||
|
||||
n += 100;
|
||||
|
||||
pow2 = 1;
|
||||
|
@ -31,7 +31,6 @@ ELSE()
|
||||
SET(MYSQLD_USER "mysql")
|
||||
SET(MYSQLD_GROUP "mysql")
|
||||
SET(ini_file_extension "cnf")
|
||||
SET(HOSTNAME "uname -n")
|
||||
|
||||
# Define directly here, as cmake/install_layout.cmake has no LOGDIR to be inherited
|
||||
SET(su_user "su mysql mysql")
|
||||
|
Loading…
x
Reference in New Issue
Block a user