btr0cur.c Improve range size estimate for big ranges
ha_innobase.cc Fix Sort aborted bug sql/ha_innobase.cc: Fix Sort aborted bug innobase/btr/btr0cur.c: Improve range size estimate for big ranges BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted
This commit is contained in:
parent
83fbd8d8db
commit
5ea83eed34
@ -1 +1,2 @@
|
||||
jani@hynda.mysql.fi
|
||||
heikki@donna.mysql.fi
|
||||
|
@ -2351,6 +2351,7 @@ btr_estimate_n_rows_in_range(
|
||||
btr_path_t* slot1;
|
||||
btr_path_t* slot2;
|
||||
ibool diverged;
|
||||
ulint divergence_level;
|
||||
ulint n_rows;
|
||||
ulint i;
|
||||
mtr_t mtr;
|
||||
@ -2393,6 +2394,7 @@ btr_estimate_n_rows_in_range(
|
||||
|
||||
n_rows = 1;
|
||||
diverged = FALSE;
|
||||
divergence_level = 1000000;
|
||||
|
||||
for (i = 0; ; i++) {
|
||||
ut_ad(i < BTR_PATH_ARRAY_N_SLOTS);
|
||||
@ -2403,6 +2405,13 @@ btr_estimate_n_rows_in_range(
|
||||
if (slot1->nth_rec == ULINT_UNDEFINED
|
||||
|| slot2->nth_rec == ULINT_UNDEFINED) {
|
||||
|
||||
if (i > divergence_level + 1) {
|
||||
/* In trees whose height is > 1 our algorithm
|
||||
tends to underestimate: multiply the estimate
|
||||
by 2: */
|
||||
|
||||
n_rows = n_rows * 2;
|
||||
}
|
||||
return(n_rows);
|
||||
}
|
||||
|
||||
@ -2417,6 +2426,8 @@ btr_estimate_n_rows_in_range(
|
||||
return(10);
|
||||
}
|
||||
|
||||
divergence_level = i;
|
||||
|
||||
diverged = TRUE;
|
||||
} else if (diverged) {
|
||||
n_rows = (n_rows * (slot1->n_recs + slot2->n_recs))
|
||||
|
@ -822,11 +822,11 @@ ha_innobase::open(
|
||||
|
||||
if (NULL == (ib_table = dict_table_get(norm_name, NULL))) {
|
||||
|
||||
fprintf(stderr, "\
|
||||
Cannot find table %s from the internal data dictionary\n\
|
||||
of InnoDB though the .frm file for the table exists. Maybe you have deleted\n\
|
||||
and created again an InnoDB database but forgotten to delete the\n\
|
||||
corresponding .frm files of old InnoDB tables?\n",
|
||||
fprintf(stderr,
|
||||
"Cannot find table %s from the internal data dictionary\n"
|
||||
"of InnoDB though the .frm file for the table exists. Maybe you have deleted\n"
|
||||
"and created again an InnoDB database but forgotten to delete the\n"
|
||||
"corresponding .frm files of old InnoDB tables?\n",
|
||||
norm_name);
|
||||
|
||||
free_share(share);
|
||||
@ -2659,6 +2659,37 @@ ha_innobase::records_in_range(
|
||||
DBUG_RETURN((ha_rows) n_rows);
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
Gives an UPPER BOUND to the number of rows in a table. This is used in
|
||||
filesort.cc and the upper bound must hold. TODO: Since the number of
|
||||
rows in a table may change after this function is called, we still may
|
||||
get a 'Sort aborted' error in filesort.cc of MySQL. The ultimate fix is to
|
||||
improve the algorithm of filesort.cc. */
|
||||
|
||||
ha_rows
|
||||
ha_innobase::estimate_number_of_rows(void)
|
||||
/*======================================*/
|
||||
/* out: upper bound of rows, currently 32-bit int
|
||||
or uint */
|
||||
{
|
||||
row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
|
||||
dict_table_t* ib_table;
|
||||
|
||||
DBUG_ENTER("info");
|
||||
|
||||
ib_table = prebuilt->table;
|
||||
|
||||
dict_update_statistics(ib_table);
|
||||
|
||||
data_file_length = ((ulonglong)
|
||||
ib_table->stat_clustered_index_size)
|
||||
* UNIV_PAGE_SIZE;
|
||||
|
||||
/* The minimum clustered index record size is 20 bytes */
|
||||
|
||||
return((ha_rows) (1000 + data_file_length / 20));
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
How many seeks it will take to read through the table. This is to be
|
||||
comparable to the number returned by records_in_range so that we can
|
||||
|
Loading…
x
Reference in New Issue
Block a user