summaryrefslogtreecommitdiffstats
path: root/storage/innobase/trx
diff options
context:
space:
mode:
Diffstat (limited to 'storage/innobase/trx')
-rw-r--r--storage/innobase/trx/trx0purge.cc351
-rw-r--r--storage/innobase/trx/trx0rec.cc3
-rw-r--r--storage/innobase/trx/trx0rseg.cc41
-rw-r--r--storage/innobase/trx/trx0trx.cc10
-rw-r--r--storage/innobase/trx/trx0undo.cc63
5 files changed, 275 insertions, 193 deletions
diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc
index 1f31ceda..cff16d9c 100644
--- a/storage/innobase/trx/trx0purge.cc
+++ b/storage/innobase/trx/trx0purge.cc
@@ -41,6 +41,7 @@ Created 3/26/1996 Heikki Tuuri
#include "dict0load.h"
#include <mysql/service_thd_mdl.h>
#include <mysql/service_wsrep.h>
+#include "log.h"
/** Maximum allowable purge history length. <=0 means 'infinite'. */
ulong srv_max_purge_lag = 0;
@@ -168,10 +169,15 @@ void purge_sys_t::create()
ut_ad(this == &purge_sys);
ut_ad(!m_initialized);
ut_ad(!enabled());
+ ut_ad(!m_active);
+ /* If innodb_undo_tablespaces>0, the rollback segment 0
+ (which always resides in the system tablespace) will
+ never be used; @see trx_assign_rseg_low() */
+ skipped_rseg= srv_undo_tablespaces > 0;
m_paused= 0;
query= purge_graph_build();
next_stored= false;
- rseg= NULL;
+ rseg= nullptr;
page_no= 0;
offset= 0;
hdr_page_no= 0;
@@ -179,8 +185,8 @@ void purge_sys_t::create()
latch.SRW_LOCK_INIT(trx_purge_latch_key);
end_latch.init();
mysql_mutex_init(purge_sys_pq_mutex_key, &pq_mutex, nullptr);
- truncate.current= NULL;
- truncate.last= NULL;
+ truncate_undo_space.current= nullptr;
+ truncate_undo_space.last= 0;
m_initialized= true;
}
@@ -350,14 +356,21 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr)
}
/** Free an undo log segment.
-@param block rollback segment header page
+@param rseg_hdr rollback segment header page
+@param block undo segment header page
@param mtr mini-transaction */
-static void trx_purge_free_segment(buf_block_t *block, mtr_t &mtr)
+static void trx_purge_free_segment(buf_block_t *rseg_hdr, buf_block_t *block,
+ mtr_t &mtr)
{
+ ut_ad(mtr.memo_contains_flagged(rseg_hdr, MTR_MEMO_PAGE_X_FIX));
+ ut_ad(mtr.memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
+
while (!fseg_free_step_not_header(TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER +
block->page.frame, &mtr))
{
+ rseg_hdr->fix();
block->fix();
+ ut_d(const page_id_t rseg_hdr_id{rseg_hdr->page.id()});
ut_d(const page_id_t id{block->page.id()});
mtr.commit();
/* NOTE: If the server is killed after the log that was produced
@@ -368,26 +381,62 @@ static void trx_purge_free_segment(buf_block_t *block, mtr_t &mtr)
This does not matter when using multiple innodb_undo_tablespaces;
innodb_undo_log_truncate=ON will be able to reclaim the space. */
mtr.start();
+ rseg_hdr->page.lock.x_lock();
+ ut_ad(rseg_hdr->page.id() == rseg_hdr_id);
block->page.lock.x_lock();
ut_ad(block->page.id() == id);
- mtr.memo_push(block, MTR_MEMO_PAGE_X_MODIFY);
+ mtr.memo_push(rseg_hdr, MTR_MEMO_PAGE_X_FIX);
+ mtr.memo_push(block, MTR_MEMO_PAGE_X_FIX);
}
while (!fseg_free_step(TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER +
block->page.frame, &mtr));
}
+void purge_sys_t::rseg_enable(trx_rseg_t &rseg)
+{
+ ut_ad(this == &purge_sys);
+#ifndef SUX_LOCK_GENERIC
+ ut_ad(rseg.latch.is_write_locked());
+#endif
+ uint8_t skipped= skipped_rseg;
+ ut_ad(skipped < TRX_SYS_N_RSEGS);
+ if (&rseg == &trx_sys.rseg_array[skipped])
+ {
+ /* If this rollback segment is subject to innodb_undo_log_truncate=ON,
+ we must not clear the flag. But we will advance purge_sys.skipped_rseg
+ to be able to choose another candidate for this soft truncation, and
+ to prevent the following scenario:
+
+ (1) purge_sys_t::iterator::free_history_rseg() had invoked
+ rseg.set_skip_allocation()
+ (2) undo log truncation had completed on this rollback segment
+ (3) SET GLOBAL innodb_undo_log_truncate=OFF
+ (4) purge_sys_t::iterator::free_history_rseg() would not be able to
+ invoke rseg.set_skip_allocation() on any other rollback segment
+ before this rseg has grown enough */
+ if (truncate_undo_space.current != rseg.space)
+ rseg.clear_skip_allocation();
+ skipped++;
+ /* If innodb_undo_tablespaces>0, the rollback segment 0
+ (which always resides in the system tablespace) will
+ never be used; @see trx_assign_rseg_low() */
+ if (!(skipped&= (TRX_SYS_N_RSEGS - 1)) && srv_undo_tablespaces)
+ skipped++;
+ skipped_rseg= skipped;
+ }
+}
+
/** Remove unnecessary history data from a rollback segment.
@param rseg rollback segment
@param limit truncate anything before this
-@param all whether everything can be truncated
@return error code */
-static dberr_t
-trx_purge_truncate_rseg_history(trx_rseg_t &rseg,
- const purge_sys_t::iterator &limit, bool all)
+inline dberr_t purge_sys_t::iterator::free_history_rseg(trx_rseg_t &rseg) const
{
fil_addr_t hdr_addr;
mtr_t mtr;
+ bool freed= false;
+ uint32_t rseg_ref= 0;
mtr.start();
@@ -397,6 +446,8 @@ trx_purge_truncate_rseg_history(trx_rseg_t &rseg,
{
func_exit:
mtr.commit();
+ if (freed && (rseg.SKIP & rseg_ref))
+ purge_sys.rseg_enable(rseg);
return err;
}
@@ -418,16 +469,40 @@ loop:
const trx_id_t undo_trx_no=
mach_read_from_8(b->page.frame + hdr_addr.boffset + TRX_UNDO_TRX_NO);
- if (undo_trx_no >= limit.trx_no)
+ if (undo_trx_no >= trx_no)
{
- if (undo_trx_no == limit.trx_no)
- err = trx_undo_truncate_start(&rseg, hdr_addr.page,
- hdr_addr.boffset, limit.undo_no);
+ if (undo_trx_no == trx_no)
+ err= trx_undo_truncate_start(&rseg, hdr_addr.page,
+ hdr_addr.boffset, undo_no);
goto func_exit;
}
-
- if (!all)
- goto func_exit;
+ else
+ {
+ rseg_ref= rseg.ref_load();
+ if (rseg_ref >= rseg.REF || !purge_sys.sees(rseg.needs_purge))
+ {
+ /* We cannot clear this entire rseg because trx_assign_rseg_low()
+ has already chosen it for a future trx_undo_assign(), or
+ because some recently started transaction needs purging.
+
+ If this invocation could not reduce rseg.history_size at all
+ (!freed), we will try to ensure progress and prevent our
+ starvation by disabling one rollback segment for future
+ trx_assign_rseg_low() invocations until a future invocation has
+ made progress and invoked purge_sys_t::rseg_enable(rseg) on that
+ rollback segment. */
+
+ if (!(rseg.SKIP & rseg_ref) && !freed &&
+ ut_d(!trx_rseg_n_slots_debug &&)
+ &rseg == &trx_sys.rseg_array[purge_sys.skipped_rseg])
+ /* If rseg.space == purge_sys.truncate_undo_space.current
+ the following will be a no-op. A possible conflict
+ with innodb_undo_log_truncate=ON will be handled in
+ purge_sys_t::rseg_enable(). */
+ rseg.set_skip_allocation();
+ goto func_exit;
+ }
+ }
fil_addr_t prev_hdr_addr=
flst_get_prev_addr(b->page.frame + hdr_addr.boffset +
@@ -459,7 +534,7 @@ loop:
free_segment:
ut_ad(rseg.curr_size >= seg_size);
rseg.curr_size-= seg_size;
- trx_purge_free_segment(b, mtr);
+ trx_purge_free_segment(rseg_hdr, b, mtr);
break;
case TRX_UNDO_CACHED:
/* rseg.undo_cached must point to this page */
@@ -490,10 +565,11 @@ loop:
mtr.commit();
ut_ad(rseg.history_size > 0);
rseg.history_size--;
+ freed= true;
mtr.start();
rseg_hdr->page.lock.x_lock();
ut_ad(rseg_hdr->page.id() == rseg.page_id());
- mtr.memo_push(rseg_hdr, MTR_MEMO_PAGE_X_MODIFY);
+ mtr.memo_push(rseg_hdr, MTR_MEMO_PAGE_X_FIX);
goto loop;
}
@@ -544,9 +620,7 @@ dberr_t purge_sys_t::iterator::free_history() const
ut_ad(rseg.is_persistent());
log_free_check();
rseg.latch.wr_lock(SRW_LOCK_CALL);
- dberr_t err=
- trx_purge_truncate_rseg_history(rseg, *this, !rseg.is_referenced() &&
- purge_sys.sees(rseg.needs_purge));
+ dberr_t err= free_history_rseg(rseg);
rseg.latch.wr_unlock();
if (err)
return err;
@@ -554,6 +628,62 @@ dberr_t purge_sys_t::iterator::free_history() const
return DB_SUCCESS;
}
+inline void trx_sys_t::undo_truncate_start(fil_space_t &space)
+{
+ ut_ad(this == &trx_sys);
+ /* Undo tablespace always are a single file. */
+ ut_a(UT_LIST_GET_LEN(space.chain) == 1);
+ fil_node_t *file= UT_LIST_GET_FIRST(space.chain);
+ /* The undo tablespace files are never closed. */
+ ut_ad(file->is_open());
+ sql_print_information("InnoDB: Starting to truncate %s", file->name);
+
+ for (auto &rseg : rseg_array)
+ if (rseg.space == &space)
+ {
+ /* Prevent a race with purge_sys_t::iterator::free_history_rseg() */
+ rseg.latch.rd_lock(SRW_LOCK_CALL);
+ /* Once set, this rseg will not be allocated to subsequent
+ transactions, but we will wait for existing active
+ transactions to finish. */
+ rseg.set_skip_allocation();
+ rseg.latch.rd_unlock();
+ }
+}
+
+inline fil_space_t *purge_sys_t::undo_truncate_try(uint32_t id, uint32_t size)
+{
+ ut_ad(srv_is_undo_tablespace(id));
+ fil_space_t *space= fil_space_get(id);
+ if (space && space->get_size() > size)
+ {
+ truncate_undo_space.current= space;
+ trx_sys.undo_truncate_start(*space);
+ return space;
+ }
+ return nullptr;
+}
+
+fil_space_t *purge_sys_t::truncating_tablespace()
+{
+ ut_ad(this == &purge_sys);
+
+ fil_space_t *space= truncate_undo_space.current;
+ if (space || srv_undo_tablespaces_active < 2 || !srv_undo_log_truncate)
+ return space;
+
+ const uint32_t size= uint32_t(srv_max_undo_log_size >> srv_page_size_shift);
+ for (uint32_t i= truncate_undo_space.last, j= i;; )
+ {
+ if (fil_space_t *s= undo_truncate_try(srv_undo_space_id_start + i, size))
+ return s;
+ ++i;
+ i%= srv_undo_tablespaces_active;
+ if (i == j)
+ return nullptr;
+ }
+}
+
#if defined __GNUC__ && __GNUC__ == 4 && !defined __clang__
# if defined __arm__ || defined __aarch64__
/* Work around an internal compiler error in GCC 4.8.5 */
@@ -579,55 +709,14 @@ TRANSACTIONAL_TARGET void trx_purge_truncate_history()
head.undo_no= 0;
}
- if (head.free_history() != DB_SUCCESS || srv_undo_tablespaces_active < 2)
+ if (head.free_history() != DB_SUCCESS)
return;
- while (srv_undo_log_truncate)
+ while (fil_space_t *space= purge_sys.truncating_tablespace())
{
- if (!purge_sys.truncate.current)
- {
- const ulint threshold=
- ulint(srv_max_undo_log_size >> srv_page_size_shift);
- for (uint32_t i= purge_sys.truncate.last
- ? purge_sys.truncate.last->id - srv_undo_space_id_start : 0,
- j= i;; )
- {
- const uint32_t space_id= srv_undo_space_id_start + i;
- ut_ad(srv_is_undo_tablespace(space_id));
- fil_space_t *space= fil_space_get(space_id);
- ut_a(UT_LIST_GET_LEN(space->chain) == 1);
-
- if (space && space->get_size() > threshold)
- {
- purge_sys.truncate.current= space;
- break;
- }
-
- ++i;
- i %= srv_undo_tablespaces_active;
- if (i == j)
- return;
- }
- }
-
- fil_space_t &space= *purge_sys.truncate.current;
- /* Undo tablespace always are a single file. */
- fil_node_t *file= UT_LIST_GET_FIRST(space.chain);
- /* The undo tablespace files are never closed. */
- ut_ad(file->is_open());
-
- DBUG_LOG("undo", "marking for truncate: " << file->name);
-
- for (auto &rseg : trx_sys.rseg_array)
- if (rseg.space == &space)
- /* Once set, this rseg will not be allocated to subsequent
- transactions, but we will wait for existing active
- transactions to finish. */
- rseg.set_skip_allocation();
-
for (auto &rseg : trx_sys.rseg_array)
{
- if (rseg.space != &space)
+ if (rseg.space != space)
continue;
rseg.latch.rd_lock(SRW_LOCK_CALL);
@@ -660,15 +749,9 @@ not_free:
rseg.latch.rd_unlock();
}
- ib::info() << "Truncating " << file->name;
- trx_purge_cleanse_purge_queue(space);
-
- log_free_check();
-
- mtr_t mtr;
- mtr.start();
- mtr.x_lock_space(&space);
- const auto space_id= space.id;
+ const char *file_name= UT_LIST_GET_FIRST(space->chain)->name;
+ sql_print_information("InnoDB: Truncating %s", file_name);
+ trx_purge_cleanse_purge_queue(*space);
/* Lock all modified pages of the tablespace.
@@ -678,104 +761,41 @@ not_free:
mini-transaction commit and the server was killed, then
discarding the to-be-trimmed pages without flushing would
break crash recovery. */
- rescan:
- mysql_mutex_lock(&buf_pool.flush_list_mutex);
- for (buf_page_t *bpage= UT_LIST_GET_LAST(buf_pool.flush_list); bpage; )
- {
- ut_ad(bpage->oldest_modification());
- ut_ad(bpage->in_file());
-
- buf_page_t *prev= UT_LIST_GET_PREV(list, bpage);
-
- if (bpage->oldest_modification() > 2 && bpage->id().space() == space_id)
- {
- ut_ad(bpage->frame);
- bpage->fix();
- {
- /* Try to acquire an exclusive latch while the cache line is
- fresh after fix(). */
- const bool got_lock{bpage->lock.x_lock_try()};
- buf_pool.flush_hp.set(prev);
- mysql_mutex_unlock(&buf_pool.flush_list_mutex);
- if (!got_lock)
- bpage->lock.x_lock();
- }
-
-#ifdef BTR_CUR_HASH_ADAPT
- /* There is no AHI on undo tablespaces. */
- ut_ad(!reinterpret_cast<buf_block_t*>(bpage)->index);
-#endif
- ut_ad(!bpage->is_io_fixed());
- ut_ad(bpage->id().space() == space_id);
-
- if (bpage->oldest_modification() > 2)
- {
- mtr.memo_push(reinterpret_cast<buf_block_t*>(bpage),
- MTR_MEMO_PAGE_X_FIX);
- mysql_mutex_lock(&buf_pool.flush_list_mutex);
- ut_ad(bpage->oldest_modification() > 2);
- bpage->reset_oldest_modification();
- }
- else
- {
- bpage->unfix();
- bpage->lock.x_unlock();
- mysql_mutex_lock(&buf_pool.flush_list_mutex);
- }
-
- if (prev != buf_pool.flush_hp.get())
- {
- mysql_mutex_unlock(&buf_pool.flush_list_mutex);
- goto rescan;
- }
- }
- bpage= prev;
- }
-
- mysql_mutex_unlock(&buf_pool.flush_list_mutex);
-
- /* Re-initialize tablespace, in a single mini-transaction. */
- const ulint size= SRV_UNDO_TABLESPACE_SIZE_IN_PAGES;
+ if (UNIV_UNLIKELY(srv_shutdown_state != SRV_SHUTDOWN_NONE) &&
+ srv_fast_shutdown)
+ return;
/* Adjust the tablespace metadata. */
mysql_mutex_lock(&fil_system.mutex);
- space.set_stopping();
- space.is_being_truncated= true;
- if (space.crypt_data)
+ if (space->crypt_data)
{
- space.reacquire();
+ space->reacquire();
mysql_mutex_unlock(&fil_system.mutex);
- fil_space_crypt_close_tablespace(&space);
- space.release();
+ fil_space_crypt_close_tablespace(space);
+ space->release();
}
else
mysql_mutex_unlock(&fil_system.mutex);
- for (auto i= 6000; space.referenced();
- std::this_thread::sleep_for(std::chrono::milliseconds(10)))
- {
- if (!--i)
- {
- mtr.commit();
- ib::error() << "Failed to freeze UNDO tablespace " << file->name;
- return;
- }
- }
+ /* Re-initialize tablespace, in a single mini-transaction. */
+ const uint32_t size= SRV_UNDO_TABLESPACE_SIZE_IN_PAGES;
+
+ log_free_check();
+ mtr_t mtr;
+ mtr.start();
+ mtr.x_lock_space(space);
/* Associate the undo tablespace with mtr.
During mtr::commit_shrink(), InnoDB can use the undo
tablespace object to clear all freed ranges */
- mtr.set_named_space(&space);
- mtr.trim_pages(page_id_t(space.id, size));
- ut_a(fsp_header_init(&space, size, &mtr) == DB_SUCCESS);
- mysql_mutex_lock(&fil_system.mutex);
- space.size= file->size= size;
- mysql_mutex_unlock(&fil_system.mutex);
+ mtr.set_named_space(space);
+ mtr.trim_pages(page_id_t(space->id, size));
+ ut_a(fsp_header_init(space, size, &mtr) == DB_SUCCESS);
for (auto &rseg : trx_sys.rseg_array)
{
- if (rseg.space != &space)
+ if (rseg.space != space)
continue;
ut_ad(!rseg.is_referenced());
@@ -784,7 +804,7 @@ not_free:
possibly before this server had been started up. */
dberr_t err;
- buf_block_t *rblock= trx_rseg_header_create(&space,
+ buf_block_t *rblock= trx_rseg_header_create(space,
&rseg - trx_sys.rseg_array,
trx_sys.get_max_trx_id(),
&mtr, &err);
@@ -797,7 +817,7 @@ not_free:
rseg.reinit(rblock->page.id().page_no());
}
- mtr.commit_shrink(space);
+ mtr.commit_shrink(*space, size);
/* No mutex; this is only updated by the purge coordinator. */
export_vars.innodb_undo_truncations++;
@@ -814,14 +834,15 @@ not_free:
purge_sys.next_stored= false;
}
- DBUG_EXECUTE_IF("ib_undo_trunc", ib::info() << "ib_undo_trunc";
+ DBUG_EXECUTE_IF("ib_undo_trunc",
+ sql_print_information("InnoDB: ib_undo_trunc");
log_buffer_flush_to_disk();
DBUG_SUICIDE(););
- ib::info() << "Truncated " << file->name;
- purge_sys.truncate.last= purge_sys.truncate.current;
- ut_ad(&space == purge_sys.truncate.current);
- purge_sys.truncate.current= nullptr;
+ sql_print_information("InnoDB: Truncated %s", file_name);
+ ut_ad(space == purge_sys.truncate_undo_space.current);
+ purge_sys.truncate_undo_space.current= nullptr;
+ purge_sys.truncate_undo_space.last= space->id - srv_undo_space_id_start;
}
}
@@ -853,7 +874,9 @@ void purge_sys_t::rseg_get_next_history_log()
{
fil_addr_t prev_log_addr;
+#ifndef SUX_LOCK_GENERIC
ut_ad(rseg->latch.is_write_locked());
+#endif
ut_a(rseg->last_page_no != FIL_NULL);
tail.trx_no= rseg->last_trx_no() + 1;
@@ -969,7 +992,9 @@ inline trx_purge_rec_t purge_sys_t::get_next_rec(roll_ptr_t roll_ptr)
{
ut_ad(next_stored);
ut_ad(tail.trx_no < low_limit_no());
+#ifndef SUX_LOCK_GENERIC
ut_ad(rseg->latch.is_write_locked());
+#endif
if (!offset)
{
diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc
index b381c9de..2923dc64 100644
--- a/storage/innobase/trx/trx0rec.cc
+++ b/storage/innobase/trx/trx0rec.cc
@@ -2069,9 +2069,10 @@ trx_undo_get_undo_rec_low(
mtr.start();
trx_undo_rec_t *undo_rec= nullptr;
- if (const buf_block_t* undo_page=
+ if (buf_block_t* undo_page=
buf_page_get(page_id_t(rseg->space->id, page_no), 0, RW_S_LATCH, &mtr))
{
+ buf_page_make_young_if_needed(&undo_page->page);
undo_rec= undo_page->page.frame + offset;
const size_t end= mach_read_from_2(undo_rec);
if (UNIV_UNLIKELY(end <= offset ||
diff --git a/storage/innobase/trx/trx0rseg.cc b/storage/innobase/trx/trx0rseg.cc
index 8d1a381c..87a2ac7b 100644
--- a/storage/innobase/trx/trx0rseg.cc
+++ b/storage/innobase/trx/trx0rseg.cc
@@ -296,8 +296,13 @@ buf_block_t *trx_rseg_t::get(mtr_t *mtr, dberr_t *err) const
if (err) *err= DB_TABLESPACE_NOT_FOUND;
return nullptr;
}
- return buf_page_get_gen(page_id(), 0, RW_X_LATCH, nullptr,
- BUF_GET, mtr, err);
+
+ buf_block_t *block= buf_page_get_gen(page_id(), 0, RW_X_LATCH, nullptr,
+ BUF_GET, mtr, err);
+ if (UNIV_LIKELY(block != nullptr))
+ buf_page_make_young_if_needed(&block->page);
+
+ return block;
}
/** Upgrade a rollback segment header page to MariaDB 10.3 format.
@@ -462,20 +467,32 @@ static dberr_t trx_rseg_mem_restore(trx_rseg_t *rseg, mtr_t *mtr)
TRX_RSEG + TRX_RSEG_BINLOG_NAME + rseg_hdr->page.frame;
if (*binlog_name)
{
- lsn_t lsn= mach_read_from_8(my_assume_aligned<8>
- (FIL_PAGE_LSN + rseg_hdr->page.frame));
static_assert(TRX_RSEG_BINLOG_NAME_LEN ==
sizeof trx_sys.recovered_binlog_filename, "compatibility");
- if (lsn > trx_sys.recovered_binlog_lsn)
- {
- trx_sys.recovered_binlog_lsn= lsn;
- trx_sys.recovered_binlog_offset=
+
+ /* Always prefer a position from rollback segment over
+ a legacy position from before version 10.3.5. */
+ int cmp= *trx_sys.recovered_binlog_filename &&
+ !trx_sys.recovered_binlog_is_legacy_pos
+ ? strncmp(reinterpret_cast<const char*>(binlog_name),
+ trx_sys.recovered_binlog_filename,
+ TRX_RSEG_BINLOG_NAME_LEN)
+ : 1;
+
+ if (cmp >= 0) {
+ uint64_t binlog_offset =
mach_read_from_8(TRX_RSEG + TRX_RSEG_BINLOG_OFFSET +
rseg_hdr->page.frame);
- memcpy(trx_sys.recovered_binlog_filename, binlog_name,
- TRX_RSEG_BINLOG_NAME_LEN);
+ if (cmp)
+ {
+ memcpy(trx_sys.recovered_binlog_filename, binlog_name,
+ TRX_RSEG_BINLOG_NAME_LEN);
+ trx_sys.recovered_binlog_offset= binlog_offset;
+ }
+ else if (binlog_offset > trx_sys.recovered_binlog_offset)
+ trx_sys.recovered_binlog_offset= binlog_offset;
+ trx_sys.recovered_binlog_is_legacy_pos= false;
}
-
#ifdef WITH_WSREP
trx_rseg_read_wsrep_checkpoint(rseg_hdr, trx_sys.recovered_wsrep_xid);
#endif
@@ -548,6 +565,7 @@ static void trx_rseg_init_binlog_info(const page_t* page)
trx_sys.recovered_binlog_offset = mach_read_from_8(
TRX_SYS_MYSQL_LOG_INFO + TRX_SYS_MYSQL_LOG_OFFSET
+ TRX_SYS + page);
+ trx_sys.recovered_binlog_is_legacy_pos= true;
}
#ifdef WITH_WSREP
@@ -562,6 +580,7 @@ dberr_t trx_rseg_array_init()
*trx_sys.recovered_binlog_filename = '\0';
trx_sys.recovered_binlog_offset = 0;
+ trx_sys.recovered_binlog_is_legacy_pos= false;
#ifdef WITH_WSREP
trx_sys.recovered_wsrep_xid.null();
XID wsrep_sys_xid;
diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc
index e5e2ef9e..942b8bd4 100644
--- a/storage/innobase/trx/trx0trx.cc
+++ b/storage/innobase/trx/trx0trx.cc
@@ -582,6 +582,7 @@ static dberr_t trx_resurrect_table_locks(trx_t *trx, const trx_undo_t &undo)
undo.top_page_no), 0, RW_S_LATCH, nullptr,
BUF_GET, &mtr, &err))
{
+ buf_page_make_young_if_needed(&block->page);
buf_block_t *undo_block= block;
const trx_undo_rec_t *undo_rec= block->page.frame + undo.top_offset;
@@ -980,7 +981,13 @@ void trx_t::commit_empty(mtr_t *mtr)
trx_undo_t *&undo= rsegs.m_redo.undo;
ut_ad(undo->state == TRX_UNDO_ACTIVE || undo->state == TRX_UNDO_PREPARED);
- ut_ad(undo->size == 1);
+
+ if (UNIV_UNLIKELY(undo->size != 1))
+ {
+ sql_print_error("InnoDB: Undo log for transaction " TRX_ID_FMT
+ " is corrupted (" UINT32PF "!=1)", id, undo->size);
+ ut_ad("corrupted undo log" == 0);
+ }
if (buf_block_t *u=
buf_page_get(page_id_t(rseg->space->id, undo->hdr_page_no), 0,
@@ -1504,6 +1511,7 @@ void trx_t::commit_cleanup()
mutex.wr_lock();
state= TRX_STATE_NOT_STARTED;
+ *detailed_error= '\0';
mod_tables.clear();
check_foreigns= true;
diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc
index 203edd9f..ccc68dfe 100644
--- a/storage/innobase/trx/trx0undo.cc
+++ b/storage/innobase/trx/trx0undo.cc
@@ -25,8 +25,8 @@ Created 3/26/1996 Heikki Tuuri
*******************************************************/
#include "trx0undo.h"
+#include "buf0rea.h"
#include "fsp0fsp.h"
-#include "mach0data.h"
#include "mtr0log.h"
#include "srv0mon.h"
#include "srv0srv.h"
@@ -178,8 +178,12 @@ trx_undo_get_prev_rec_from_prev_page(buf_block_t *&block, uint16_t rec,
block= buf_page_get(page_id_t(block->page.id().space(), prev_page_no),
0, shared ? RW_S_LATCH : RW_X_LATCH, mtr);
+ if (UNIV_UNLIKELY(!block))
+ return nullptr;
- return block ? trx_undo_page_get_last_rec(block, page_no, offset) : nullptr;
+ if (!buf_page_make_young_if_needed(&block->page))
+ buf_read_ahead_linear(block->page.id(), 0, false);
+ return trx_undo_page_get_last_rec(block, page_no, offset);
}
/** Get the previous undo log record.
@@ -268,12 +272,16 @@ trx_undo_get_first_rec(const fil_space_t &space, uint32_t page_no,
uint16_t offset, ulint mode, const buf_block_t*& block,
mtr_t *mtr, dberr_t *err)
{
- block= buf_page_get_gen(page_id_t{space.id, page_no}, 0, mode,
- nullptr, BUF_GET, mtr, err);
+ buf_block_t *b= buf_page_get_gen(page_id_t{space.id, page_no}, 0, mode,
+ nullptr, BUF_GET, mtr, err);
+ block= b;
if (!block)
return nullptr;
- if (trx_undo_rec_t *rec= trx_undo_page_get_first_rec(block, page_no, offset))
+ if (!buf_page_make_young_if_needed(&b->page))
+ buf_read_ahead_linear(b->page.id(), 0, false);
+
+ if (trx_undo_rec_t *rec= trx_undo_page_get_first_rec(b, page_no, offset))
return rec;
return trx_undo_get_next_rec_from_next_page(block, page_no, offset, mode,
@@ -663,6 +671,8 @@ buf_block_t *trx_undo_add_page(trx_undo_t *undo, mtr_t *mtr, dberr_t *err)
0, RW_X_LATCH, nullptr, BUF_GET, mtr, err);
if (!header_block)
goto func_exit;
+ buf_page_make_young_if_needed(&header_block->page);
+
*err= fsp_reserve_free_extents(&n_reserved, rseg->space, 1, FSP_UNDO, mtr);
if (UNIV_UNLIKELY(*err != DB_SUCCESS))
@@ -732,6 +742,8 @@ trx_undo_free_page(
return FIL_NULL;
}
+ buf_page_make_young_if_needed(&header_block->page);
+
*err = flst_remove(header_block, TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST,
undo_block, TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE,
mtr);
@@ -740,6 +752,14 @@ trx_undo_free_page(
return FIL_NULL;
}
+ const fil_addr_t last_addr = flst_get_last(
+ TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST
+ + header_block->page.frame);
+ if (UNIV_UNLIKELY(last_addr.page == page_no)) {
+ *err = DB_CORRUPTION;
+ return FIL_NULL;
+ }
+
*err = fseg_free_page(TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER
+ header_block->page.frame,
rseg->space, page_no, mtr);
@@ -748,9 +768,6 @@ trx_undo_free_page(
}
buf_page_free(rseg->space, page_no, mtr);
- const fil_addr_t last_addr = flst_get_last(
- TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST
- + header_block->page.frame);
rseg->curr_size--;
if (!in_history) {
@@ -794,6 +811,9 @@ static dberr_t trx_undo_truncate_end(trx_undo_t &undo, undo_no_t limit,
{
ut_ad(is_temp == !undo.rseg->is_persistent());
+ if (UNIV_UNLIKELY(undo.last_page_no == FIL_NULL))
+ return DB_CORRUPTION;
+
for (mtr_t mtr;;)
{
mtr.start();
@@ -887,15 +907,13 @@ trx_undo_truncate_start(
trx_undo_rec_t* last_rec;
mtr_t mtr;
+ ut_ad(rseg->is_persistent());
+
if (!limit) {
return DB_SUCCESS;
}
loop:
- mtr_start(&mtr);
-
- if (!rseg->is_persistent()) {
- mtr.set_log_mode(MTR_LOG_NO_REDO);
- }
+ mtr.start();
dberr_t err;
const buf_block_t* undo_page;
@@ -1263,6 +1281,8 @@ trx_undo_reuse_cached(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** pundo,
return NULL;
}
+ buf_page_make_young_if_needed(&block->page);
+
UT_LIST_REMOVE(rseg->undo_cached, undo);
*pundo = undo;
@@ -1297,19 +1317,24 @@ trx_undo_assign(trx_t* trx, dberr_t* err, mtr_t* mtr)
ut_ad(mtr->get_log_mode() == MTR_LOG_ALL);
trx_undo_t* undo = trx->rsegs.m_redo.undo;
+ buf_block_t* block;
if (undo) {
- return buf_page_get_gen(
+ block = buf_page_get_gen(
page_id_t(undo->rseg->space->id, undo->last_page_no),
0, RW_X_LATCH, undo->guess_block,
BUF_GET, mtr, err);
+ if (UNIV_LIKELY(block != nullptr)) {
+ buf_page_make_young_if_needed(&block->page);
+ }
+ return block;
}
*err = DB_SUCCESS;
trx_rseg_t* rseg = trx->rsegs.m_redo.rseg;
rseg->latch.wr_lock(SRW_LOCK_CALL);
- buf_block_t* block = trx_undo_reuse_cached(
+ block = trx_undo_reuse_cached(
trx, rseg, &trx->rsegs.m_redo.undo, mtr, err);
if (!block) {
@@ -1350,12 +1375,17 @@ trx_undo_assign_low(trx_t *trx, trx_rseg_t *rseg, trx_undo_t **undo,
: &trx->rsegs.m_redo.undo));
ut_ad(mtr->get_log_mode()
== (is_temp ? MTR_LOG_NO_REDO : MTR_LOG_ALL));
+ buf_block_t* block;
if (*undo) {
- return buf_page_get_gen(
+ block = buf_page_get_gen(
page_id_t(rseg->space->id, (*undo)->last_page_no),
0, RW_X_LATCH, (*undo)->guess_block,
BUF_GET, mtr, err);
+ if (UNIV_LIKELY(block != nullptr)) {
+ buf_page_make_young_if_needed(&block->page);
+ }
+ return block;
}
DBUG_EXECUTE_IF(
@@ -1365,7 +1395,6 @@ trx_undo_assign_low(trx_t *trx, trx_rseg_t *rseg, trx_undo_t **undo,
*err = DB_SUCCESS;
rseg->latch.wr_lock(SRW_LOCK_CALL);
- buf_block_t* block;
if (is_temp) {
ut_ad(!UT_LIST_GET_LEN(rseg->undo_cached));
} else {