Skip to content

Commit

Permalink
MDEV-33087 ALTER TABLE...ALGORITHM=COPY should build indexes more eff…
Browse files Browse the repository at this point in the history
…iciently

- During copy algorithm, InnoDB should use bulk insert operation
for row by row insert operation. By doing this, copy algorithm
can effectively build indexes. This optimization is disabled
for temporary table, versioning table and table which has
foreign key relation.

ha_innobase::extra(): HA_EXTRA_END_ALTER_COPY mode tries to apply
the buffered bulk insert operation, updates the non-persistent
table stats.

row_merge_bulk_t::write_to_index(): Update stat_n_rows after
applying the bulk insert operation

row_ins_clust_index_entry_low(): In case of copy algorithm,
switch to bulk insert operation.

copy_data_error_handler(): Handles the error while copying
the data from source to target file.
  • Loading branch information
Thirunarayanan committed Apr 11, 2024
1 parent cac0fc9 commit c4b52b0
Show file tree
Hide file tree
Showing 10 changed files with 152 additions and 66 deletions.
4 changes: 2 additions & 2 deletions mysql-test/main/rowid_filter_innodb.result
Original file line number Diff line number Diff line change
Expand Up @@ -1986,7 +1986,7 @@ ANALYZE
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"r_engine_stats": {
"pages_accessed": 3
"pages_accessed": 2
},
"filtered": "REPLACED",
"r_filtered": 66.66666667,
Expand Down Expand Up @@ -2140,7 +2140,7 @@ ANALYZE
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"r_engine_stats": {
"pages_accessed": 3
"pages_accessed": 2
},
"filtered": "REPLACED",
"r_filtered": 66.66666667,
Expand Down
11 changes: 10 additions & 1 deletion sql/sql_insert.cc
Original file line number Diff line number Diff line change
Expand Up @@ -4318,7 +4318,16 @@ bool select_insert::prepare_eof()
if (info.ignore || info.handle_duplicates != DUP_ERROR)
if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
table->file->ha_rnd_end();
table->file->extra(HA_EXTRA_END_ALTER_COPY);
if (error <= 0)
{
error= table->file->extra(HA_EXTRA_END_ALTER_COPY);
if (error == HA_ERR_FOUND_DUPP_KEY)
{
uint key_nr= table->file->get_dup_key(error);
if ((int)key_nr >= 0 && key_nr < table->s->keys)
print_keydup_error(table, &table->key_info[key_nr], MYF(0));
}
}
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);

Expand Down
120 changes: 68 additions & 52 deletions sql/sql_table.cc
Original file line number Diff line number Diff line change
Expand Up @@ -11746,6 +11746,60 @@ bool mysql_trans_commit_alter_copy_data(THD *thd)
DBUG_RETURN(error);
}

/** Handle the error when copying data from source to target table.
@param error error code
@param ignore alter ignore statement
@param to target table handler
@param thd Mysql Thread
@param alter_ctx Runtime context for alter statement
@return true in case of error
@return false in case of skipping the row and continue alter operation */
static bool
copy_data_error_handler(int &error, bool ignore, TABLE *to,
THD *thd, Alter_table_ctx *alter_ctx)
{
if (to->file->is_fatal_error(error, HA_CHECK_DUP))
{
/* Not a duplicate key error. */
to->file->print_error(error, MYF(0));
error = 1;
func_exit:
return true;
}
/* Duplicate key error. */
if (unlikely(alter_ctx->fk_error_if_delete_row))
{
/* We are trying to omit a row from the table which serves
as parent in a foreign key. This might have broken
referential integrity so emit an error. Note that we
can't ignore this error even if we are
executing ALTER IGNORE TABLE. IGNORE allows to skip rows, but
doesn't allow to break unique or foreign key constraints, */
my_error(ER_FK_CANNOT_DELETE_PARENT, MYF(0),
alter_ctx->fk_error_id,
alter_ctx->fk_error_table);
goto func_exit;
}
if (ignore)
return false;
/* Ordinary ALTER TABLE. Report duplicate key error. */
uint key_nr= to->file->get_dup_key(error);
if ((int)key_nr >= 0)
{
const char *err_msg= ER_THD(thd, ER_DUP_ENTRY_WITH_KEY_NAME);
if (key_nr == 0 && to->s->keys > 0 &&
(to->key_info[0].key_part[0].field->flags &
AUTO_INCREMENT_FLAG))
err_msg= ER_THD(thd, ER_DUP_ENTRY_AUTOINCREMENT_CASE);
print_keydup_error(to,
key_nr >= to->s->keys ? NULL :
&to->key_info[key_nr],
err_msg, MYF(0));
}
else
to->file->print_error(error, MYF(0));
goto func_exit;
}

static int
copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, bool ignore,
Expand Down Expand Up @@ -12007,57 +12061,13 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, bool ignore,
to->auto_increment_field_not_null= FALSE;
if (unlikely(error))
{
if (to->file->is_fatal_error(error, HA_CHECK_DUP))
{
/* Not a duplicate key error. */
to->file->print_error(error, MYF(0));
error= 1;
break;
}
if (copy_data_error_handler(error, ignore, to, thd, alter_ctx))
break;
else
{
/* Duplicate key error. */
if (unlikely(alter_ctx->fk_error_if_delete_row))
{
/*
We are trying to omit a row from the table which serves as parent
in a foreign key. This might have broken referential integrity so
emit an error. Note that we can't ignore this error even if we are
executing ALTER IGNORE TABLE. IGNORE allows to skip rows, but
doesn't allow to break unique or foreign key constraints,
*/
my_error(ER_FK_CANNOT_DELETE_PARENT, MYF(0),
alter_ctx->fk_error_id,
alter_ctx->fk_error_table);
break;
}

if (ignore)
{
/* This ALTER IGNORE TABLE. Simply skip row and continue. */
to->file->restore_auto_increment(prev_insert_id);
delete_count++;
}
else
{
/* Ordinary ALTER TABLE. Report duplicate key error. */
uint key_nr= to->file->get_dup_key(error);
if ((int) key_nr >= 0)
{
const char *err_msg= ER_THD(thd, ER_DUP_ENTRY_WITH_KEY_NAME);
if (key_nr == 0 && to->s->keys > 0 &&
(to->key_info[0].key_part[0].field->flags &
AUTO_INCREMENT_FLAG))
err_msg= ER_THD(thd, ER_DUP_ENTRY_AUTOINCREMENT_CASE);
print_keydup_error(to,
key_nr >= to->s->keys ? NULL :
&to->key_info[key_nr],
err_msg, MYF(0));
}
else
to->file->print_error(error, MYF(0));
break;
}
/* This ALTER IGNORE TABLE. Simply skip row and continue. */
to->file->restore_auto_increment(prev_insert_id);
delete_count++;
}
}
else
Expand Down Expand Up @@ -12085,9 +12095,15 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, bool ignore,
error= 1;
}
bulk_insert_started= 0;
if (!ignore)
to->file->extra(HA_EXTRA_END_ALTER_COPY);

if (!ignore && error <= 0)
{
int alt_error= to->file->extra(HA_EXTRA_END_ALTER_COPY);
if (alt_error > 0)
{
error= alt_error;
copy_data_error_handler(error, false, to, thd, alter_ctx);
}
}
cleanup_done= 1;
to->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);

Expand Down
20 changes: 20 additions & 0 deletions storage/innobase/handler/ha_innodb.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15741,6 +15741,26 @@ ha_innobase::extra(
break;
case HA_EXTRA_END_ALTER_COPY:
trx = check_trx_exists(ha_thd());
if (m_prebuilt->table->skip_alter_undo) {
if (dberr_t err= trx->bulk_insert_apply()) {
m_prebuilt->table->skip_alter_undo = 0;
return convert_error_code_to_mysql(
err,
m_prebuilt->table->flags,
trx->mysql_thd);
}

trx->end_bulk_insert(*m_prebuilt->table);
trx->bulk_insert = false;
/* During copy alter operation, InnoDB
updates the stats only for non-persistent
tables. */
if (!dict_stats_is_persistent_enabled(
m_prebuilt->table)) {
dict_stats_update_if_needed(
m_prebuilt->table, *trx);
}
}
m_prebuilt->table->skip_alter_undo = 0;
if (!m_prebuilt->table->is_temporary()
&& !high_level_read_only) {
Expand Down
12 changes: 6 additions & 6 deletions storage/innobase/handler/handler0alter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -845,14 +845,14 @@ inline void dict_table_t::rollback_instant(
}
}

/* Report an InnoDB error to the client by invoking my_error(). */
static ATTRIBUTE_COLD __attribute__((nonnull))
/* Report an InnoDB error to the client by invoking my_error().
@param error InnoDB error code
@param table table name
@param flags table flags */
ATTRIBUTE_COLD __attribute__((nonnull))
void
my_error_innodb(
/*============*/
dberr_t error, /*!< in: InnoDB error code */
const char* table, /*!< in: table name */
ulint flags) /*!< in: table flags */
dberr_t error, const char *table, ulint flags)
{
switch (error) {
case DB_MISSING_HISTORY:
Expand Down
9 changes: 9 additions & 0 deletions storage/innobase/include/row0merge.h
Original file line number Diff line number Diff line change
Expand Up @@ -416,6 +416,15 @@ row_merge_read_rec(
ulint space) /*!< in: space id */
MY_ATTRIBUTE((warn_unused_result));

/* Report an InnoDB error to the client by invoking my_error().
@param error InnoDB error code
@param table table name
@param flags table flags */
ATTRIBUTE_COLD __attribute__((nonnull))
void
my_error_innodb(
dberr_t error, const char *table, ulint flags);

/** Buffer for bulk insert */
class row_merge_bulk_t
{
Expand Down
4 changes: 2 additions & 2 deletions storage/innobase/include/trx0trx.h
Original file line number Diff line number Diff line change
Expand Up @@ -1172,8 +1172,8 @@ struct trx_t : ilist_node<>
{
if (UNIV_LIKELY(!bulk_insert))
return nullptr;
ut_ad(!check_unique_secondary);
ut_ad(!check_foreigns);
ut_ad(table->skip_alter_undo || !check_unique_secondary);
ut_ad(table->skip_alter_undo || !check_foreigns);
auto it= mod_tables.find(table);
if (it == mod_tables.end() || !it->second.bulk_buffer_exist())
return nullptr;
Expand Down
19 changes: 17 additions & 2 deletions storage/innobase/row/row0ins.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2711,6 +2711,20 @@ row_ins_clust_index_entry_low(

DBUG_EXECUTE_IF("row_ins_row_level", goto skip_bulk_insert;);

/* Ignore the bulk insert operation for temporary,
versioning table. In case of foreign key constraint,
InnoDB has to check the constraint validity for
each row. But bulk insert operation doesn't
check foreign constraint. Skip bulk insert
operation if the table contains foreign key relation */
if (index->table->skip_alter_undo
&& !index->table->is_temporary()
&& !index->table->versioned()
&& index->table->foreign_set.empty()) {
trx_start_if_not_started(trx, true);
goto bulk_insert;
}

if (!(flags & BTR_NO_UNDO_LOG_FLAG)
&& page_is_empty(block->page.frame)
&& !entry->is_metadata() && !trx->duplicates
Expand All @@ -2723,7 +2737,7 @@ row_ins_clust_index_entry_low(
&& !index->table->has_spatial_index()
&& !index->table->versioned()) {
DEBUG_SYNC_C("empty_root_page_insert");

bulk_insert:
trx->bulk_insert = true;

if (!index->table->is_temporary()) {
Expand Down Expand Up @@ -3393,7 +3407,8 @@ row_ins_index_entry(
if (auto t= trx->check_bulk_buffer(index->table)) {
/* MDEV-25036 FIXME: check also foreign key
constraints */
ut_ad(!trx->check_foreigns);
ut_ad(index->table->skip_alter_undo
|| !trx->check_foreigns);
return t->bulk_insert_buffered(*entry, *index, trx);
}
}
Expand Down
15 changes: 14 additions & 1 deletion storage/innobase/row/row0merge.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5320,6 +5320,9 @@ dberr_t row_merge_bulk_t::write_to_index(ulint index_no, trx_t *trx)
else if (index->is_primary() && table->persistent_autoinc)
btr_write_autoinc(index, table->autoinc - 1);
err= btr_bulk.finish(err);
if (err == DB_SUCCESS && index->is_clust())
table->stat_n_rows= (file && file->fd != OS_FILE_CLOSED)
? file->n_rec : buf.n_tuples;
return err;
}

Expand All @@ -5333,8 +5336,18 @@ dberr_t row_merge_bulk_t::write_to_table(dict_table_t *table, trx_t *trx)
continue;

dberr_t err= write_to_index(i, trx);
if (err != DB_SUCCESS)
switch(err) {
default:
if (table->skip_alter_undo)
my_error_innodb(err, table->name.m_name, table->flags);
err_exit:
return err;
case DB_SUCCESS:
break;
case DB_DUPLICATE_KEY:
trx->error_info = index;
goto err_exit;
}
i++;
}

Expand Down
4 changes: 4 additions & 0 deletions storage/innobase/trx/trx0rec.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1871,6 +1871,10 @@ trx_undo_report_row_operation(
*clust_entry, *index, trx)) {
return err;
}

if (index->table->skip_alter_undo) {
return DB_SUCCESS;
}
} else {
bulk = false;
}
Expand Down

0 comments on commit c4b52b0

Please sign in to comment.