WL 1682: Use bitvector instead of query_id to tell handler which fields

to read and write
Changed Server code, added new interface to handler and changed the
NDB handler, InnoDB handler and Federated handler that previously used
query_id
Bug#10202 fix (one-liner fix for memory leak)


mysys/my_bitmap.c:
  Debug code
sql/field.cc:
  Initialise fieldnr to 0 if not set anywhere else
sql/field.h:
  Introduce a fieldnr (first field = 1 and last = table->s->fields
  in field object to be able to quickly set appropriate bit in
  read/write set
sql/ha_federated.cc:
  Changed federated handler to use write set instead of query_id
sql/ha_innodb.cc:
  Changed InnoDB handler to use write set instead of query_id
sql/ha_ndbcluster.cc:
  Changed NDB handler to use write set instead of query_id
sql/ha_ndbcluster.h:
  Changed NDB handler to use write set instead of query_id
sql/handler.cc:
  Allocate read_set and write_set either in get_new_handler or in
  special routine
  Routines used at destruction of handler object
  plus routine to set all primary key fields in read_set
sql/handler.h:
  bool to keep track if read/write set allocated or not in handler
  Deallocate read/write set at delete of handler object
  New bitmap's for read/write set
sql/item.cc:
  Set bits in read or write set (set_query_id = 2 => write_set
  set_query_id = 1 => read_set
sql/lock.cc:
  Clear bit set when starting a new statement in external lock
sql/mysql_priv.h:
  changed set_query_id from bool to ulong
sql/opt_range.cc:
  Set primary key read set in all places where HA_EXTRA_RETRIEVE_PRIMARY_KEY
  is used
sql/sql_acl.cc:
  set all bits in read set all places where HA_EXTRA_RETRIEVE_ALL_COLS
  are used
sql/sql_base.cc:
  Clear all bits before start new statement when table locked already
  Set bit in read/write set dependent on set_query_id and fieldnr
  bool -> ulong for set_query_id
  set all bits in read set for SELECT * queries where table is not view
sql/sql_class.h:
  Added comments + changed type of set_query_id
sql/sql_insert.cc:
  Use 2 when setup_fields called for fields to updated in UPDATE
  and INSERT statements
  set primary key fields when EXTRA param used
sql/sql_load.cc:
  Set all bits in write set if no fields specified in LOAD DATA FROM
  INFILE
  Otherwise use 2 to set specific fields to be updated by LOAD DATA...
sql/sql_select.cc:
  Set primary key read set when EXTRA param specified
  Set fieldnr for temporary tables
  Set number of fields before calling get_new_handler
  and only set all bits if there at least one field in table
sql/sql_table.cc:
  Set all bits in read set for old table and all bits in write set for
  new table for ALTER TABLE copy method
  Set all bits in read set when EXTRA param used
sql/sql_udf.cc:
  Set all bits in read set when EXTRA param used
sql/sql_update.cc:
  Set fields to UPDATE to use 2 for set_query_id
  Set all bits in read set when EXTRA param used
sql/table.cc:
  Set fieldnr in openfrm
  Reallocate read/write set in openfrm since table->s->fields==0
  at call to get_new_handler
sql/unireg.cc:
  Fix Bug #10202
This commit is contained in:
unknown 2005-04-28 14:45:27 +02:00
parent a2ed27af52
commit 5d3af2b0a7
24 changed files with 299 additions and 199 deletions

View File

@ -102,6 +102,14 @@ void bitmap_free(MY_BITMAP *map)
void bitmap_set_bit(MY_BITMAP *map, uint bitmap_bit)
{
if (map->bitmap)
{
DBUG_PRINT("info", ("Bitmap not defined"));
}
if (bitmap_bit >= map->bitmap_size*8)
{
DBUG_PRINT("info", ("bit %d size in bytes %d", bitmap_bit, map->bitmap_size));
}
DBUG_ASSERT(map->bitmap && bitmap_bit < map->bitmap_size*8);
bitmap_lock(map);
bitmap_fast_set_bit(map, bitmap_bit);

View File

@ -1198,6 +1198,7 @@ Field::Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,
flags=null_ptr ? 0: NOT_NULL_FLAG;
comment.str= (char*) "";
comment.length=0;
fieldnr= 0;
}
uint Field::offset()

View File

@ -86,6 +86,7 @@ public:
utype unireg_check;
uint32 field_length; // Length of field
uint16 flags;
uint16 fieldnr; // Field number
uchar null_bit; // Bit used to test null bit
Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,uchar null_bit_arg,

View File

@ -1076,11 +1076,8 @@ inline uint field_in_record_is_null(TABLE *table,
int ha_federated::write_row(byte *buf)
{
uint x= 0, num_fields= 0;
uint x, num_fields;
Field **field;
ulong current_query_id= 1;
ulong tmp_query_id= 1;
uint all_fields_have_same_query_id= 1;
char insert_buffer[IO_SIZE];
char values_buffer[IO_SIZE], insert_field_value_buffer[IO_SIZE];
@ -1105,14 +1102,6 @@ int ha_federated::write_row(byte *buf)
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
/*
get the current query id - the fields that we add to the insert
statement to send to the foreign will not be appended unless they match
this query id
*/
current_query_id= table->in_use->query_id;
DBUG_PRINT("info", ("current query id %d", current_query_id));
/* start off our string */
insert_string.append("INSERT INTO `");
insert_string.append(share->table_base_name);
@ -1120,44 +1109,25 @@ int ha_federated::write_row(byte *buf)
/* start both our field and field values strings */
insert_string.append(" (");
values_string.append(" VALUES (");
/*
Even if one field is different, all_fields_same_query_id can't remain
0 if it remains 0, then that means no fields were specified in the query
such as in the case of INSERT INTO table VALUES (val1, val2, valN)
*/
for (field= table->field; *field; field++, x++)
{
if (x > 0 && tmp_query_id != (*field)->query_id)
all_fields_have_same_query_id= 0;
tmp_query_id= (*field)->query_id;
}
/*
loop through the field pointer array, add any fields to both the values
list and the fields list that match the current query id
list and the fields list that is part of the write set
*/
x=0;
for (field= table->field; *field; field++, x++)
for (x=0, num_fields= 0, field= table->field; *field; field++, x++)
{
/* if there is a query id and if it's equal to the current query id */
if (((*field)->query_id && (*field)->query_id == current_query_id)
|| all_fields_have_same_query_id)
if (ha_get_bit_in_write_set((*field)->fieldnr))
{
num_fields++;
if ((*field)->is_null())
{
DBUG_PRINT("info",
("column %d current query id %d field is_null query id %d",
x, current_query_id, (*field)->query_id));
DBUG_PRINT("info", ("column %d field is_null", x));
insert_field_value_string.append("NULL");
}
else
{
DBUG_PRINT("info",
("column %d current query id %d field is not null query ID %d",
x, current_query_id, (*field)->query_id));
DBUG_PRINT("info", ("column %d field is not null", x));
(*field)->val_str(&insert_field_value_string);
/* quote these fields if they require it */
(*field)->quote_data(&insert_field_value_string); }

View File

@ -2684,7 +2684,8 @@ build_template(
(!(fetch_all_in_key && index_contains_field) &&
!(fetch_primary_key_cols &&
dict_table_col_in_clustered_key(index->table, i)) &&
thd->query_id != field->query_id))) {
!(ha_get_bit_in_read_set(i+1) ||
ha_get_bit_in_write_set(i+1))))) {
/* This field is not needed in the query, skip it */

View File

@ -833,12 +833,10 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field,
/*
Check if any set or get of blob value in current query.
*/
bool ha_ndbcluster::uses_blob_value(bool all_fields)
bool ha_ndbcluster::uses_blob_value()
{
if (table->s->blob_fields == 0)
return FALSE;
if (all_fields)
return TRUE;
{
uint no_fields= table->s->fields;
int i;
@ -847,7 +845,8 @@ bool ha_ndbcluster::uses_blob_value(bool all_fields)
for (i= no_fields - 1; i >= 0; i--)
{
Field *field= table->field[i];
if (thd->query_id == field->query_id)
if ((m_write_op && ha_get_bit_in_write_set(i+1)) ||
(!m_write_op && ha_get_bit_in_read_set(i+1)))
{
return TRUE;
}
@ -1116,7 +1115,7 @@ int ha_ndbcluster::get_ndb_lock_type(enum thr_lock_type type)
{
if (type >= TL_WRITE_ALLOW_WRITE)
return NdbOperation::LM_Exclusive;
else if (uses_blob_value(m_retrieve_all_fields))
else if (uses_blob_value())
return NdbOperation::LM_Read;
else
return NdbOperation::LM_CommittedRead;
@ -1294,9 +1293,8 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
for (i= 0; i < table->s->fields; i++)
{
Field *field= table->field[i];
if ((thd->query_id == field->query_id) ||
((field->flags & PRI_KEY_FLAG)) ||
m_retrieve_all_fields)
if (ha_get_bit_in_read_set(i+1) ||
((field->flags & PRI_KEY_FLAG)))
{
if (get_ndb_value(op, field, i, buf))
ERR_RETURN(op->getNdbError());
@ -1337,6 +1335,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
DBUG_ENTER("pk_read");
DBUG_PRINT("enter", ("key_len: %u", key_len));
DBUG_DUMP("key", (char*)key, key_len);
m_write_op= FALSE;
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
@ -1388,10 +1387,13 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
NdbOperation *op;
THD *thd= current_thd;
DBUG_ENTER("complemented_pk_read");
m_write_op= FALSE;
if (m_retrieve_all_fields)
if (ha_get_all_bit_in_read_set())
{
// We have allready retrieved all fields, nothing to complement
DBUG_RETURN(0);
}
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
@ -1408,7 +1410,7 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
{
Field *field= table->field[i];
if (!((field->flags & PRI_KEY_FLAG) ||
(thd->query_id == field->query_id)))
(ha_get_bit_in_read_set(i+1))))
{
if (get_ndb_value(op, field, i, new_data))
ERR_RETURN(trans->getNdbError());
@ -1432,7 +1434,7 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
{
Field *field= table->field[i];
if (!((field->flags & PRI_KEY_FLAG) ||
(thd->query_id == field->query_id)))
(ha_get_bit_in_read_set(i+1))))
{
m_value[i].ptr= NULL;
}
@ -1810,6 +1812,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
DBUG_PRINT("enter", ("index: %u, sorted: %d, descending: %d",
active_index, sorted, descending));
DBUG_PRINT("enter", ("Starting new ordered scan on %s", m_tabname));
m_write_op= FALSE;
// Check that sorted seems to be initialised
DBUG_ASSERT(sorted == 0 || sorted == 1);
@ -1869,6 +1872,7 @@ int ha_ndbcluster::full_table_scan(byte *buf)
DBUG_ENTER("full_table_scan");
DBUG_PRINT("enter", ("Starting new scan on %s", m_tabname));
m_write_op= FALSE;
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
@ -1898,6 +1902,7 @@ int ha_ndbcluster::write_row(byte *record)
NdbOperation *op;
int res;
THD *thd= current_thd;
m_write_op= TRUE;
DBUG_ENTER("write_row");
@ -1916,7 +1921,10 @@ int ha_ndbcluster::write_row(byte *record)
statistic_increment(thd->status_var.ha_write_count, &LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
{
table->timestamp_field->set_time();
ha_set_bit_in_write_set(table->timestamp_field->fieldnr);
}
has_auto_increment= (table->next_number_field && record == table->record[0]);
if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)))
@ -1969,6 +1977,7 @@ int ha_ndbcluster::write_row(byte *record)
{
Field *field= table->field[i];
if (!(field->flags & PRI_KEY_FLAG) &&
(ha_get_bit_in_write_set(i+1)) &&
set_ndb_value(op, field, i, &set_blob_value))
{
m_skip_auto_increment= TRUE;
@ -2086,13 +2095,13 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
NdbOperation *op;
uint i;
DBUG_ENTER("update_row");
m_write_op= TRUE;
statistic_increment(thd->status_var.ha_update_count, &LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
{
table->timestamp_field->set_time();
// Set query_id so that field is really updated
table->timestamp_field->query_id= thd->query_id;
ha_set_bit_in_write_set(table->timestamp_field->fieldnr);
}
/* Check for update of primary key for special handling */
@ -2152,7 +2161,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
if (!(op= cursor->updateCurrentTuple()))
ERR_RETURN(trans->getNdbError());
m_ops_pending++;
if (uses_blob_value(FALSE))
if (uses_blob_value())
m_blobs_pending= TRUE;
}
else
@ -2190,7 +2199,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
for (i= 0; i < table->s->fields; i++)
{
Field *field= table->field[i];
if (((thd->query_id == field->query_id) || m_retrieve_all_fields) &&
if (ha_get_bit_in_write_set(i+1) &&
(!(field->flags & PRI_KEY_FLAG)) &&
set_ndb_value(op, field, i))
ERR_RETURN(op->getNdbError());
@ -2217,6 +2226,7 @@ int ha_ndbcluster::delete_row(const byte *record)
NdbScanOperation* cursor= m_active_cursor;
NdbOperation *op;
DBUG_ENTER("delete_row");
m_write_op= TRUE;
statistic_increment(thd->status_var.ha_delete_count,&LOCK_status);
m_rows_changed++;
@ -2481,6 +2491,7 @@ int ha_ndbcluster::index_read(byte *buf,
int error;
ndb_index_type type= get_index_type(active_index);
const KEY* key_info= table->key_info+active_index;
m_write_op= FALSE;
switch (type){
case PRIMARY_KEY_ORDERED_INDEX:
case PRIMARY_KEY_INDEX:
@ -2647,6 +2658,7 @@ int ha_ndbcluster::read_range_first(const key_range *start_key,
{
byte* buf= table->record[0];
DBUG_ENTER("ha_ndbcluster::read_range_first");
m_write_op= FALSE;
DBUG_RETURN(read_range_first_to_buf(start_key,
end_key,
@ -2868,83 +2880,11 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
{
DBUG_ENTER("extra");
switch (operation) {
case HA_EXTRA_NORMAL: /* Optimize for space (def) */
DBUG_PRINT("info", ("HA_EXTRA_NORMAL"));
break;
case HA_EXTRA_QUICK: /* Optimize for speed */
DBUG_PRINT("info", ("HA_EXTRA_QUICK"));
break;
case HA_EXTRA_RESET: /* Reset database to after open */
DBUG_PRINT("info", ("HA_EXTRA_RESET"));
DBUG_PRINT("info", ("Clearing condition stack"));
cond_clear();
break;
case HA_EXTRA_CACHE: /* Cash record in HA_rrnd() */
DBUG_PRINT("info", ("HA_EXTRA_CACHE"));
break;
case HA_EXTRA_NO_CACHE: /* End cacheing of records (def) */
DBUG_PRINT("info", ("HA_EXTRA_NO_CACHE"));
break;
case HA_EXTRA_NO_READCHECK: /* No readcheck on update */
DBUG_PRINT("info", ("HA_EXTRA_NO_READCHECK"));
break;
case HA_EXTRA_READCHECK: /* Use readcheck (def) */
DBUG_PRINT("info", ("HA_EXTRA_READCHECK"));
break;
case HA_EXTRA_KEYREAD: /* Read only key to database */
DBUG_PRINT("info", ("HA_EXTRA_KEYREAD"));
break;
case HA_EXTRA_NO_KEYREAD: /* Normal read of records (def) */
DBUG_PRINT("info", ("HA_EXTRA_NO_KEYREAD"));
break;
case HA_EXTRA_NO_USER_CHANGE: /* No user is allowed to write */
DBUG_PRINT("info", ("HA_EXTRA_NO_USER_CHANGE"));
break;
case HA_EXTRA_KEY_CACHE:
DBUG_PRINT("info", ("HA_EXTRA_KEY_CACHE"));
break;
case HA_EXTRA_NO_KEY_CACHE:
DBUG_PRINT("info", ("HA_EXTRA_NO_KEY_CACHE"));
break;
case HA_EXTRA_WAIT_LOCK: /* Wait until file is avalably (def) */
DBUG_PRINT("info", ("HA_EXTRA_WAIT_LOCK"));
break;
case HA_EXTRA_NO_WAIT_LOCK: /* If file is locked, return quickly */
DBUG_PRINT("info", ("HA_EXTRA_NO_WAIT_LOCK"));
break;
case HA_EXTRA_WRITE_CACHE: /* Use write cache in ha_write() */
DBUG_PRINT("info", ("HA_EXTRA_WRITE_CACHE"));
break;
case HA_EXTRA_FLUSH_CACHE: /* flush write_record_cache */
DBUG_PRINT("info", ("HA_EXTRA_FLUSH_CACHE"));
break;
case HA_EXTRA_NO_KEYS: /* Remove all update of keys */
DBUG_PRINT("info", ("HA_EXTRA_NO_KEYS"));
break;
case HA_EXTRA_KEYREAD_CHANGE_POS: /* Keyread, but change pos */
DBUG_PRINT("info", ("HA_EXTRA_KEYREAD_CHANGE_POS")); /* xxxxchk -r must be used */
break;
case HA_EXTRA_REMEMBER_POS: /* Remember pos for next/prev */
DBUG_PRINT("info", ("HA_EXTRA_REMEMBER_POS"));
break;
case HA_EXTRA_RESTORE_POS:
DBUG_PRINT("info", ("HA_EXTRA_RESTORE_POS"));
break;
case HA_EXTRA_REINIT_CACHE: /* init cache from current record */
DBUG_PRINT("info", ("HA_EXTRA_REINIT_CACHE"));
break;
case HA_EXTRA_FORCE_REOPEN: /* Datafile have changed on disk */
DBUG_PRINT("info", ("HA_EXTRA_FORCE_REOPEN"));
break;
case HA_EXTRA_FLUSH: /* Flush tables to disk */
DBUG_PRINT("info", ("HA_EXTRA_FLUSH"));
break;
case HA_EXTRA_NO_ROWS: /* Don't write rows */
DBUG_PRINT("info", ("HA_EXTRA_NO_ROWS"));
break;
case HA_EXTRA_RESET_STATE: /* Reset positions */
DBUG_PRINT("info", ("HA_EXTRA_RESET_STATE"));
break;
case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/
DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY"));
if (current_thd->lex->sql_command == SQLCOM_REPLACE)
@ -2963,34 +2903,6 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
m_use_write= FALSE;
m_ignore_dup_key= FALSE;
break;
case HA_EXTRA_RETRIEVE_ALL_COLS: /* Retrieve all columns, not just those
where field->query_id is the same as
the current query id */
DBUG_PRINT("info", ("HA_EXTRA_RETRIEVE_ALL_COLS"));
m_retrieve_all_fields= TRUE;
break;
case HA_EXTRA_PREPARE_FOR_DELETE:
DBUG_PRINT("info", ("HA_EXTRA_PREPARE_FOR_DELETE"));
break;
case HA_EXTRA_PREPARE_FOR_UPDATE: /* Remove read cache if problems */
DBUG_PRINT("info", ("HA_EXTRA_PREPARE_FOR_UPDATE"));
break;
case HA_EXTRA_PRELOAD_BUFFER_SIZE:
DBUG_PRINT("info", ("HA_EXTRA_PRELOAD_BUFFER_SIZE"));
break;
case HA_EXTRA_RETRIEVE_PRIMARY_KEY:
DBUG_PRINT("info", ("HA_EXTRA_RETRIEVE_PRIMARY_KEY"));
m_retrieve_primary_key= TRUE;
break;
case HA_EXTRA_CHANGE_KEY_TO_UNIQUE:
DBUG_PRINT("info", ("HA_EXTRA_CHANGE_KEY_TO_UNIQUE"));
break;
case HA_EXTRA_CHANGE_KEY_TO_DUP:
DBUG_PRINT("info", ("HA_EXTRA_CHANGE_KEY_TO_DUP"));
case HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
DBUG_PRINT("info", ("HA_EXTRA_KEYREAD_PRESERVE_FIELDS"));
break;
}
DBUG_RETURN(0);
@ -3255,8 +3167,6 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
DBUG_ASSERT(m_active_trans);
// Start of transaction
m_rows_changed= 0;
m_retrieve_all_fields= FALSE;
m_retrieve_primary_key= FALSE;
m_ops_pending= 0;
{
NDBDICT *dict= ndb->getDictionary();
@ -3383,8 +3293,6 @@ int ha_ndbcluster::start_stmt(THD *thd)
m_active_trans= trans;
// Start of statement
m_retrieve_all_fields= FALSE;
m_retrieve_primary_key= FALSE;
m_ops_pending= 0;
DBUG_RETURN(error);
@ -4166,8 +4074,6 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
m_use_write(FALSE),
m_ignore_dup_key(FALSE),
m_primary_key_update(FALSE),
m_retrieve_all_fields(FALSE),
m_retrieve_primary_key(FALSE),
m_rows_to_insert((ha_rows) 1),
m_rows_inserted((ha_rows) 0),
m_bulk_insert_rows((ha_rows) 1024),
@ -5489,6 +5395,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
HANDLER_BUFFER *buffer)
{
DBUG_ENTER("ha_ndbcluster::read_multi_range_first");
m_write_op= FALSE;
int res;
KEY* key_info= table->key_info + active_index;
@ -5496,7 +5403,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
ulong reclength= table->s->reclength;
NdbOperation* op;
if (uses_blob_value(m_retrieve_all_fields))
if (uses_blob_value())
{
/**
* blobs can't be batched currently

View File

@ -560,7 +560,7 @@ private:
ulonglong get_auto_increment();
void invalidateDictionaryCache();
int ndb_err(NdbTransaction*);
bool uses_blob_value(bool all_fields);
bool uses_blob_value();
char *update_table_comment(const char * comment);
@ -611,8 +611,7 @@ private:
bool m_use_write;
bool m_ignore_dup_key;
bool m_primary_key_update;
bool m_retrieve_all_fields;
bool m_retrieve_primary_key;
bool m_write_op;
ha_rows m_rows_to_insert;
ha_rows m_rows_inserted;
ha_rows m_bulk_insert_rows;

View File

@ -193,54 +193,67 @@ enum db_type ha_checktype(enum db_type database_type)
handler *get_new_handler(TABLE *table, enum db_type db_type)
{
handler *file;
switch (db_type) {
#ifndef NO_HASH
case DB_TYPE_HASH:
return new ha_hash(table);
file= new ha_hash(table);
#endif
#ifdef HAVE_ISAM
case DB_TYPE_MRG_ISAM:
return new ha_isammrg(table);
file= new ha_isammrg(table);
break;
case DB_TYPE_ISAM:
return new ha_isam(table);
file= new ha_isam(table);
break;
#else
case DB_TYPE_MRG_ISAM:
return new ha_myisammrg(table);
file= new ha_myisammrg(table);
break;
#endif
#ifdef HAVE_BERKELEY_DB
case DB_TYPE_BERKELEY_DB:
return new ha_berkeley(table);
file= new ha_berkeley(table);
break;
#endif
#ifdef HAVE_INNOBASE_DB
case DB_TYPE_INNODB:
return new ha_innobase(table);
file= new ha_innobase(table);
break;
#endif
#ifdef HAVE_EXAMPLE_DB
case DB_TYPE_EXAMPLE_DB:
return new ha_example(table);
file= new ha_example(table);
break;
#endif
#ifdef HAVE_ARCHIVE_DB
case DB_TYPE_ARCHIVE_DB:
return new ha_archive(table);
file= new ha_archive(table);
break;
#endif
#ifdef HAVE_BLACKHOLE_DB
case DB_TYPE_BLACKHOLE_DB:
return new ha_blackhole(table);
file= new ha_blackhole(table);
break;
#endif
#ifdef HAVE_FEDERATED_DB
case DB_TYPE_FEDERATED_DB:
return new ha_federated(table);
file= new ha_federated(table);
break;
#endif
#ifdef HAVE_CSV_DB
case DB_TYPE_CSV_DB:
return new ha_tina(table);
file= new ha_tina(table);
break;
#endif
#ifdef HAVE_NDBCLUSTER_DB
case DB_TYPE_NDBCLUSTER:
return new ha_ndbcluster(table);
file= new ha_ndbcluster(table);
break;
#endif
case DB_TYPE_HEAP:
return new ha_heap(table);
file= new ha_heap(table);
break;
default: // should never happen
{
enum db_type def=(enum db_type) current_thd->variables.table_type;
@ -250,10 +263,21 @@ handler *get_new_handler(TABLE *table, enum db_type db_type)
}
/* Fall back to MyISAM */
case DB_TYPE_MYISAM:
return new ha_myisam(table);
file= new ha_myisam(table);
break;
case DB_TYPE_MRG_MYISAM:
return new ha_myisammrg(table);
file= new ha_myisammrg(table);
break;
}
if (file)
{
if (table && file->ha_allocate_read_write_set(table->s->fields))
{
delete file;
file= 0;
}
}
return file;
}
/*
@ -1319,6 +1343,47 @@ int handler::ha_open(const char *name, int mode, int test_if_locked)
DBUG_RETURN(error);
}
int handler::ha_allocate_read_write_set(ulong no_fields)
{
DBUG_ENTER("ha_allocate_read_write_set");
uint map_size= ((no_fields + 8)/8)*8;
if (rw_set_allocated)
ha_deallocate_read_write_set();
DBUG_PRINT("info", ("no_fields = %d, map_size = %d", no_fields, map_size));
if (bitmap_init(&read_set, NULL, map_size, FALSE))
{
DBUG_RETURN(TRUE);
}
if (bitmap_init(&write_set, NULL, map_size, FALSE))
{
bitmap_free(&read_set);
DBUG_RETURN(TRUE);
}
ha_clear_all_set();
rw_set_allocated= TRUE;
DBUG_RETURN(FALSE);
}
void handler::ha_deallocate_read_write_set()
{
if (!rw_set_allocated)
return;
bitmap_free(&read_set);
bitmap_free(&write_set);
}
void handler::ha_set_primary_key_in_read_set()
{
ulong prim_key= table->s->primary_key;
if (prim_key != MAX_KEY)
{
KEY_PART_INFO *key_part= table->key_info[prim_key].key_part;
KEY_PART_INFO *key_part_end= key_part +
table->key_info[prim_key].key_parts;
for (;key_part != key_part_end; ++key_part)
ha_set_bit_in_read_set(key_part->fieldnr);
}
}
/*
Read first row (only) from a table
This is never called for InnoDB or BDB tables, as these table types

View File

@ -483,6 +483,9 @@ public:
bool auto_increment_column_changed;
bool implicit_emptied; /* Can be !=0 only if HEAP */
const COND *pushed_cond;
MY_BITMAP read_set;
MY_BITMAP write_set;
bool rw_set_allocated;
handler(TABLE *table_arg) :table(table_arg),
ref(0), data_file_length(0), max_data_file_length(0), index_file_length(0),
@ -492,9 +495,13 @@ public:
key_used_on_scan(MAX_KEY), active_index(MAX_KEY),
ref_length(sizeof(my_off_t)), block_size(0),
raid_type(0), ft_handler(0), inited(NONE), implicit_emptied(0),
pushed_cond(NULL)
pushed_cond(NULL), rw_set_allocated(0)
{}
virtual ~handler(void) { /* TODO: DBUG_ASSERT(inited == NONE); */ }
virtual ~handler(void)
{
ha_deallocate_read_write_set();
/* TODO: DBUG_ASSERT(inited == NONE); */
}
int ha_open(const char *name, int mode, int test_if_locked);
void update_auto_increment();
virtual void print_error(int error, myf errflag);
@ -559,6 +566,77 @@ public:
{
return inited == INDEX ? ha_index_end() : inited == RND ? ha_rnd_end() : 0;
}
/*
These are a set of routines used to enable handlers to only read/write
partial lists of the fields in the table. The bit vector is maintained
by the server part and is used by the handler at calls to read/write
data in the table.
It replaces the use of query id's for this purpose. The benefit is that
the handler can also set bits in the read/write set if it has special
needs and it is also easy for other parts of the server to interact
with the handler (e.g. the replication part for row-level logging).
The routines are all part of the general handler and are not possible
to override by a handler. A handler can however set/reset bits by
calling these routines.
*/
void ha_set_all_bits_in_read_set()
{
bitmap_set_all(&read_set);
}
void ha_set_all_bits_in_write_set()
{
bitmap_set_all(&write_set);
}
void ha_set_bit_in_read_set(uint fieldnr)
{
bitmap_set_bit(&read_set, fieldnr);
}
void ha_clear_bit_in_read_set(uint fieldnr)
{
bitmap_clear_bit(&read_set, fieldnr);
}
void ha_set_bit_in_write_set(uint fieldnr)
{
bitmap_set_bit(&write_set, fieldnr);
}
void ha_clear_bit_in_write_set(uint fieldnr)
{
bitmap_clear_bit(&write_set, fieldnr);
}
void ha_set_bit_in_rw_set(uint fieldnr, bool write_set)
{
if (!write_set)
ha_set_bit_in_read_set(fieldnr);
else
ha_set_bit_in_write_set(fieldnr);
}
my_bool ha_get_bit_in_read_set(uint fieldnr)
{
return bitmap_is_set(&read_set, fieldnr);
}
my_bool ha_get_all_bit_in_read_set()
{
return bitmap_is_set_all(&read_set);
}
my_bool ha_get_all_bit_in_write_set()
{
return bitmap_is_set_all(&write_set);
}
my_bool ha_get_bit_in_write_set(uint fieldnr)
{
return bitmap_is_set(&write_set, fieldnr);
}
void ha_clear_all_set()
{
bitmap_clear_all(&read_set);
bitmap_clear_all(&write_set);
bitmap_set_bit(&read_set,0);
bitmap_set_bit(&write_set,0);
}
void ha_set_primary_key_in_read_set();
int ha_allocate_read_write_set(ulong no_fields);
void ha_deallocate_read_write_set();
uint get_index(void) const { return active_index; }
virtual int open(const char *name, int mode, uint test_if_locked)=0;
virtual int close(void)=0;

View File

@ -2825,13 +2825,18 @@ bool Item_field::fix_fields(THD *thd, TABLE_LIST *tables, Item **reference)
set_field(from_field);
}
else if (thd->set_query_id && field->query_id != thd->query_id)
else if (thd->set_query_id)
{
/* We only come here in unions */
TABLE *table=field->table;
field->query_id=thd->query_id;
table->used_fields++;
table->used_keys.intersect(field->part_of_key);
TABLE *table= field->table;
table->file->ha_set_bit_in_rw_set(field->fieldnr,
(bool)(thd->set_query_id-1));
if (field->query_id != thd->query_id)
{
/* We only come here in unions */
field->query_id=thd->query_id;
table->used_fields++;
table->used_keys.intersect(field->part_of_key);
}
}
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (any_privileges)

View File

@ -179,7 +179,7 @@ static int lock_external(THD *thd, TABLE **tables, uint count)
((*tables)->reginfo.lock_type >= TL_READ &&
(*tables)->reginfo.lock_type <= TL_READ_NO_INSERT))
lock_type=F_RDLCK;
(*tables)->file->ha_clear_all_set();
if ((error=(*tables)->file->external_lock(thd,lock_type)))
{
print_lock_error(error, (*tables)->file->table_type());

View File

@ -881,7 +881,7 @@ bool setup_tables(THD *thd, TABLE_LIST *tables, Item **conds,
int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields,
List<Item> *sum_func_list, uint wild_num);
bool setup_fields(THD *thd, Item** ref_pointer_array, TABLE_LIST *tables,
List<Item> &item, bool set_query_id,
List<Item> &item, ulong set_query_id,
List<Item> *sum_func_list, bool allow_sum_func);
int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves,
COND **conds);

View File

@ -921,6 +921,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
{
DBUG_RETURN(1);
}
file->ha_set_primary_key_in_read_set();
DBUG_RETURN(0);
}
@ -951,6 +952,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
file->close();
goto failure;
}
file->ha_set_primary_key_in_read_set();
free_file= TRUE;
last_rowid= file->ref;
DBUG_RETURN(0);
@ -5627,6 +5629,7 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
*/
if (head->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY))
DBUG_RETURN(1);
head->file->ha_set_primary_key_in_read_set();
cur_quick_it.rewind();
cur_quick= cur_quick_it++;

View File

@ -1526,6 +1526,7 @@ static bool update_user_table(THD *thd, const char *host, const char *user,
key_copy((byte *) user_key, table->record[0], table->key_info,
table->key_info->key_length);
table->file->ha_set_all_bits_in_read_set();
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (table->file->index_read_idx(table->record[0], 0,
(byte *) user_key, table->key_info->key_length,
@ -1619,6 +1620,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
table->file->ha_set_all_bits_in_read_set();
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (table->file->index_read_idx(table->record[0], 0,
user_key, table->key_info->key_length,
@ -1752,6 +1754,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
We should NEVER delete from the user table, as a uses can still
use mysqld even if he doesn't have any privileges in the user table!
*/
table->file->ha_set_all_bits_in_read_set();
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (cmp_record(table,record[1]) &&
(error=table->file->update_row(table->record[1],table->record[0])))
@ -1834,6 +1837,7 @@ static int replace_db_table(TABLE *table, const char *db,
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
table->file->ha_set_all_bits_in_read_set();
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (table->file->index_read_idx(table->record[0],0,
user_key, table->key_info->key_length,
@ -1870,6 +1874,7 @@ static int replace_db_table(TABLE *table, const char *db,
/* update old existing row */
if (rights)
{
table->file->ha_set_all_bits_in_read_set();
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if ((error=table->file->update_row(table->record[1],table->record[0])))
goto table_error; /* purecov: deadcode */
@ -2203,6 +2208,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
table->file->ha_set_all_bits_in_read_set();
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (table->file->index_read(table->record[0], user_key,
table->key_info->key_length,
@ -2280,6 +2286,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
key_copy(user_key, table->record[0], table->key_info,
key_prefix_length);
table->file->ha_set_all_bits_in_read_set();
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (table->file->index_read(table->record[0], user_key,
key_prefix_length,
@ -2378,6 +2385,7 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table,
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
table->file->ha_set_all_bits_in_read_set();
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (table->file->index_read_idx(table->record[0], 0,
user_key, table->key_info->key_length,

View File

@ -1954,6 +1954,7 @@ static bool check_lock_and_start_stmt(THD *thd, TABLE *table,
my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, MYF(0),table->alias);
DBUG_RETURN(1);
}
table->file->ha_clear_all_set();
if ((error=table->file->start_stmt(thd)))
{
table->file->print_error(error,MYF(0));
@ -2588,6 +2589,8 @@ Field *find_field_in_real_table(THD *thd, TABLE *table,
if (thd->set_query_id)
{
table->file->ha_set_bit_in_rw_set(field->fieldnr,
(bool)(thd->set_query_id-1));
if (field->query_id != thd->query_id)
{
field->query_id=thd->query_id;
@ -3098,7 +3101,7 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields,
****************************************************************************/
bool setup_fields(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
List<Item> &fields, bool set_query_id,
List<Item> &fields, ulong set_query_id,
List<Item> *sum_func_list, bool allow_sum_func)
{
reg2 Item *item;
@ -3547,7 +3550,10 @@ insert_fields(THD *thd, TABLE_LIST *tables, const char *db_name,
fields marked in setup_tables during fix_fields of view columns
*/
if (table)
{
table->used_fields= table->s->fields;
table->file->ha_set_all_bits_in_read_set();
}
}
}
if (found)

View File

@ -760,8 +760,15 @@ public:
/*
- if set_query_id=1, we set field->query_id for all fields. In that case
field list can not contain duplicates.
0: Means query_id is not set and no indicator to handler of fields used
is set
1: Means query_id is set for fields in list and bit in read set is set
to inform handler of that field is to be read
2: Means query is set for fields in list and bit is set in update set
to inform handler that it needs to update this field in write_row
and update_row
*/
bool set_query_id;
ulong set_query_id;
/*
This variable is used in post-parse stage to declare that sum-functions,
or functions which have sense only if GROUP BY is present, are allowed.

View File

@ -105,6 +105,11 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
#endif
clear_timestamp_auto_bits(table->timestamp_field_type,
TIMESTAMP_AUTO_SET_ON_INSERT);
/*
No fields are provided so all fields must be provided in the values.
Thus we set all bits in the write set.
*/
table->file->ha_set_all_bits_in_write_set();
}
else
{ // Part field list
@ -120,7 +125,11 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
thd->lex->select_lex.no_wrap_view_item= 1;
save_next= table_list->next_local; // fields only from first table
table_list->next_local= 0;
res= setup_fields(thd, 0, table_list, fields, 1, 0, 0);
/*
Indicate fields in list is to be updated by setting set_query_id
parameter to 2. This sets the bit in the write_set for each field.
*/
res= setup_fields(thd, 0, table_list, fields, 2, 0, 0);
table_list->next_local= save_next;
thd->lex->select_lex.no_wrap_view_item= 0;
if (res)
@ -209,9 +218,10 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
/*
Check the fields we are going to modify. This will set the query_id
of all used fields to the threads query_id.
of all used fields to the threads query_id. It will also set all
fields into the write set of this table.
*/
if (setup_fields(thd, 0, insert_table_list, update_fields, 1, 0, 0))
if (setup_fields(thd, 0, insert_table_list, update_fields, 2, 0, 0))
return -1;
if (table->timestamp_field)
@ -788,7 +798,10 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, TABLE *table,
DBUG_RETURN(TRUE);
}
if (duplic == DUP_UPDATE || duplic == DUP_REPLACE)
{
table->file->ha_set_primary_key_in_read_set();
table->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
}
thd->lex->select_lex.first_execution= 0;
DBUG_RETURN(FALSE);
}

View File

@ -179,6 +179,10 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
Field **field;
for (field=table->field; *field ; field++)
fields_vars.push_back(new Item_field(*field));
/*
Since all fields is be set we set all bits in the write set
*/
table->file->ha_set_all_bits_in_write_set();
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
/*
Let us also prepare SET clause, altough it is probably empty
@ -191,8 +195,12 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
else
{ // Part field list
/* TODO: use this conds for 'WITH CHECK OPTIONS' */
if (setup_fields(thd, 0, table_list, fields_vars, 1, 0, 0) ||
setup_fields(thd, 0, table_list, set_fields, 1, 0, 0) ||
/*
Indicate that both variables in field list and fields in update_list
is to be included in write set of table
*/
if (setup_fields(thd, 0, table_list, fields_vars, 2, 0, 0) ||
setup_fields(thd, 0, table_list, set_fields, 2, 0, 0) ||
check_that_all_fields_are_given_values(thd, table))
DBUG_RETURN(TRUE);
/*

View File

@ -935,6 +935,7 @@ JOIN::optimize()
for (uint i_h = const_tables; i_h < tables; i_h++)
{
TABLE* table_h = join_tab[i_h].table;
table_h->file->set_primary_key_in_read_set();
table_h->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
}
}
@ -7978,7 +7979,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
uint hidden_null_count, hidden_null_pack_length, hidden_field_count;
uint blob_count,group_null_items, string_count;
uint temp_pool_slot=MY_BIT_NONE;
ulong reclength, string_total_length;
ulong reclength, string_total_length, fieldnr= 0;
bool using_unique_constraint= 0;
bool use_packed_rows= 0;
bool not_all_columns= !(select_options & TMP_TABLE_ALL_COLUMNS);
@ -8162,6 +8163,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
(*argp)->maybe_null=1;
}
new_field->query_id= thd->query_id;
new_field->fieldnr= ++fieldnr;
}
}
}
@ -8209,6 +8211,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
new_field->flags|= GROUP_FLAG;
}
new_field->query_id= thd->query_id;
new_field->fieldnr= ++fieldnr;
*(reg_field++) =new_field;
}
if (!--hidden_field_count)
@ -8217,6 +8220,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
DBUG_ASSERT(field_count >= (uint) (reg_field - table->field));
field_count= (uint) (reg_field - table->field);
*blob_field= 0; // End marker
table->s->fields= field_count;
/* If result table is small; use a heap */
if (blob_count || using_unique_constraint ||
@ -8233,7 +8237,11 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
{
table->file=get_new_handler(table,table->s->db_type= DB_TYPE_HEAP);
}
if (table->s->fields)
{
table->file->ha_set_all_bits_in_read_set();
table->file->ha_set_all_bits_in_write_set();
}
if (!using_unique_constraint)
reclength+= group_null_items; // null flag is stored separately
@ -8259,7 +8267,6 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
string_total_length / string_count >= AVG_STRING_LENGTH_TO_PACK_ROWS))
use_packed_rows= 1;
table->s->fields= field_count;
table->s->reclength= reclength;
{
uint alloc_length=ALIGN_SIZE(reclength+MI_UNIQUE_HASH_LENGTH+1);

View File

@ -3825,7 +3825,9 @@ copy_data_between_tables(TABLE *from,TABLE *to,
this function does not set field->query_id in the columns to the
current query id
*/
from->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
to->file->ha_set_all_bits_in_write_set();
from->file->ha_set_all_bits_in_read_set();
from->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); //To be removed RONM
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1);
if (ignore ||
handle_duplicates == DUP_REPLACE)
@ -3991,6 +3993,7 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt)
/* InnoDB must be told explicitly to retrieve all columns, because
this function does not set field->query_id in the columns to the
current query id */
t->file->ha_set_all_bits_in_read_set();
t->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (t->file->ha_rnd_init(1))

View File

@ -527,6 +527,7 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name)
goto err;
table->field[0]->store(udf_name->str, udf_name->length, system_charset_info);
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
table->file->ha_set_all_bits_in_read_set();
if (!table->file->index_read_idx(table->record[0], 0,
(byte*) table->field[0]->ptr,
table->key_info[0].key_length,

View File

@ -188,7 +188,11 @@ int mysql_update(THD *thd,
{
bool res;
select_lex->no_wrap_view_item= 1;
res= setup_fields(thd, 0, table_list, fields, 1, 0, 0);
/*
Indicate that the set of fields is to be updated by passing 2 for
set_query_id.
*/
res= setup_fields(thd, 0, table_list, fields, 2, 0, 0);
select_lex->no_wrap_view_item= 0;
if (res)
DBUG_RETURN(1); /* purecov: inspected */
@ -268,6 +272,7 @@ int mysql_update(THD *thd,
We can't update table directly; We must first search after all
matching rows before updating the table!
*/
table->file->ha_set_all_bits_in_read_set();
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (used_index < MAX_KEY && old_used_keys.is_set(used_index))
{

View File

@ -567,6 +567,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
error= 4;
goto err; /* purecov: inspected */
}
reg_field->fieldnr= i+1; //Set field number
reg_field->comment=comment;
if (field_type == FIELD_TYPE_BIT && !f_bit_as_char(pack_flag))
{
@ -796,6 +797,8 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
(*save++)= i;
}
}
if (outparam->file->ha_allocate_read_write_set(share->fields))
goto err;
/* The table struct is now initialized; Open the table */
error=2;

View File

@ -715,6 +715,7 @@ static bool make_empty_rec(THD *thd, File file,enum db_type table_type,
{
my_error(ER_INVALID_DEFAULT, MYF(0), regfield->field_name);
error= 1;
delete regfield; //To avoid memory leak
goto err;
}
}