diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 13:22:53 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 13:22:53 +0000 |
commit | 347c164c35eddab388009470e6848cb361ac93f8 (patch) | |
tree | 2c0c44eac690f510bb0a35b2a13b36d606b77b6b /storage/innobase/lock | |
parent | Releasing progress-linux version 1:10.11.7-4~progress7.99u1. (diff) | |
download | mariadb-347c164c35eddab388009470e6848cb361ac93f8.tar.xz mariadb-347c164c35eddab388009470e6848cb361ac93f8.zip |
Merging upstream version 1:10.11.8.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'storage/innobase/lock')
-rw-r--r-- | storage/innobase/lock/lock0lock.cc | 437 |
1 files changed, 318 insertions, 119 deletions
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index df51ceb1..a2107007 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -47,6 +47,7 @@ Created 5/7/1996 Heikki Tuuri #include "que0que.h" #include "scope.h" #include <debug_sync.h> +#include <mysql/service_thd_mdl.h> #include <set> @@ -173,7 +174,7 @@ void lock_sys_t::assert_locked(const dict_table_t &table) const ut_ad(!table.is_temporary()); if (is_writer()) return; - ut_ad(readers); + ut_ad(latch.have_rd()); ut_ad(table.lock_mutex_is_owner()); } @@ -182,7 +183,7 @@ void lock_sys_t::hash_table::assert_locked(const page_id_t id) const { if (lock_sys.is_writer()) return; - ut_ad(lock_sys.readers); + ut_ad(lock_sys.is_holder()); ut_ad(latch(cell_get(id.fold()))->is_locked()); } @@ -191,7 +192,7 @@ void lock_sys_t::assert_locked(const hash_cell_t &cell) const { if (is_writer()) return; - ut_ad(lock_sys.readers); + ut_ad(lock_sys.is_holder()); ut_ad(hash_table::latch(const_cast<hash_cell_t*>(&cell))->is_locked()); } #endif @@ -426,13 +427,10 @@ void lock_sys_t::wr_lock(const char *file, unsigned line) { mysql_mutex_assert_not_owner(&wait_mutex); latch.wr_lock(file, line); - ut_ad(!writer.exchange(pthread_self(), std::memory_order_relaxed)); } /** Release exclusive lock_sys.latch */ void lock_sys_t::wr_unlock() { - ut_ad(writer.exchange(0, std::memory_order_relaxed) == - pthread_self()); latch.wr_unlock(); } @@ -441,15 +439,11 @@ void lock_sys_t::rd_lock(const char *file, unsigned line) { mysql_mutex_assert_not_owner(&wait_mutex); latch.rd_lock(file, line); - ut_ad(!writer.load(std::memory_order_relaxed)); - ut_d(readers.fetch_add(1, std::memory_order_relaxed)); } /** Release shared lock_sys.latch */ void lock_sys_t::rd_unlock() { - ut_ad(!writer.load(std::memory_order_relaxed)); - ut_ad(readers.fetch_sub(1, std::memory_order_relaxed)); latch.rd_unlock(); } #endif @@ -976,8 +970,31 @@ func_exit: for (lock_t *lock= UT_LIST_GET_FIRST(table->locks); lock; lock= UT_LIST_GET_NEXT(un_member.tab_lock.locks, lock)) { - /* if victim has also BF status, but has earlier seqno, we have to wait */ - if (lock->trx != trx && + /* Victim trx needs to be different from BF trx and it has to have a + THD so that we can kill it. Victim might not have THD in two cases: + + (1) An incomplete transaction that was recovered from undo logs + on server startup (and not yet rolled back). + + (2) Transaction that is in XA PREPARE state and whose client + connection was disconnected. + + Neither of these can complete before lock_wait_wsrep() releases + lock_sys.latch. + + (1) trx_t::commit_in_memory() is clearing both + trx_t::state and trx_t::is_recovered before it invokes + lock_release(trx_t*) (which would be blocked by the exclusive + lock_sys.latch that we are holding here). Hence, it is not + possible to write a debug assertion to document this scenario. + + (2) If is in XA PREPARE state, it would eventually be rolled + back and the lock conflict would be resolved when an XA COMMIT + or XA ROLLBACK statement is executed in some other connection. + + If victim has also BF status, but has earlier seqno, we have to wait. + */ + if (lock->trx != trx && lock->trx->mysql_thd && !(wsrep_thd_is_BF(lock->trx->mysql_thd, false) && wsrep_thd_order_before(lock->trx->mysql_thd, trx->mysql_thd))) { @@ -1009,8 +1026,11 @@ func_exit: lock= lock_rec_get_next(heap_no, lock); do { - /* if victim has also BF status, but has earlier seqno, we have to wait */ - if (lock->trx != trx && + /* This is similar case as above except here we have + record-locks instead of table locks. See details + from comment above. + */ + if (lock->trx != trx && lock->trx->mysql_thd && !(wsrep_thd_is_BF(lock->trx->mysql_thd, false) && wsrep_thd_order_before(lock->trx->mysql_thd, trx->mysql_thd))) { @@ -1036,8 +1056,12 @@ func_exit: std::vector<std::pair<ulong,trx_id_t>> victim_id; for (trx_t *v : victims) + { + /* Victim must have THD */ + ut_ad(v->mysql_thd); victim_id.emplace_back(std::pair<ulong,trx_id_t> {thd_get_thread_id(v->mysql_thd), v->id}); + } DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort", { @@ -3940,6 +3964,8 @@ static void lock_table_dequeue(lock_t *in_lock, bool owns_wait_mutex) dberr_t lock_table_for_trx(dict_table_t *table, trx_t *trx, lock_mode mode, bool no_wait) { + ut_ad(!dict_sys.frozen()); + mem_heap_t *heap= mem_heap_create(512); sel_node_t *node= sel_node_create(heap); que_thr_t *thr= pars_complete_graph_for_exec(node, trx, heap, nullptr); @@ -3976,6 +4002,67 @@ run_again: return err; } +/** Lock the child tables of a table. +@param table parent table +@param trx transaction +@return error code */ +dberr_t lock_table_children(dict_table_t *table, trx_t *trx) +{ + MDL_context *mdl_context= + static_cast<MDL_context*>(thd_mdl_context(trx->mysql_thd)); + ut_ad(mdl_context); + struct table_mdl{dict_table_t* table; MDL_ticket *mdl;}; + std::vector<table_mdl> children; + children.emplace_back(table_mdl{table, nullptr}); + + dberr_t err= DB_SUCCESS; + dict_sys.freeze(SRW_LOCK_CALL); + + rescan: + for (auto f : table->referenced_set) + if (dict_table_t *child= f->foreign_table) + { + if (std::find_if(children.begin(), children.end(), + [&](const table_mdl &c){ return c.table == child; }) != + children.end()) + continue; /* We already acquired MDL on this child table. */ + MDL_ticket *mdl= nullptr; + child->acquire(); + child= dict_acquire_mdl_shared<false>(child, mdl_context, &mdl, + DICT_TABLE_OP_NORMAL); + if (child) + { + if (!mdl) + child->release(); + children.emplace_back(table_mdl{child, mdl}); + goto rescan; + } + err= DB_LOCK_WAIT_TIMEOUT; + break; + } + dict_sys.unfreeze(); + + if (err == DB_SUCCESS) + for (const table_mdl &child : children) + if (child.mdl) + if ((err= lock_table_for_trx(child.table, trx, LOCK_X)) != DB_SUCCESS) + break; + + dict_sys.freeze(SRW_LOCK_CALL); + for (table_mdl &child : children) + { + if (child.mdl) + { + child.table->release(); + mdl_context->release_lock(child.mdl); + } + } + dict_sys.unfreeze(); + + return err; +} + + /** Exclusively lock the data dictionary tables. @param trx dictionary transaction @return error code @@ -4125,7 +4212,7 @@ restart: ulint count= 1000; /* We will not attempt hardware lock elision (memory transaction) here. Both lock_rec_dequeue_from_page() and lock_table_dequeue() - would likely lead to a memory transaction due to a system call, to + would likely lead to a memory transaction abort due to a system call, to wake up a waiting transaction. */ lock_sys.rd_lock(SRW_LOCK_CALL); trx->mutex_lock(); @@ -4295,28 +4382,82 @@ void lock_release_on_drop(trx_t *trx) } } -/** Reset lock bit for supremum and rebuild waiting queue. +/** Reset a lock bit and rebuild waiting queue. @param cell rec hash cell of in_lock @param lock the lock with supemum bit set */ -static void lock_rec_unlock_supremum(hash_cell_t &cell, lock_t *lock) +static void lock_rec_unlock(hash_cell_t &cell, lock_t *lock, ulint heap_no) { - ut_ad(lock_rec_get_nth_bit(lock, PAGE_HEAP_NO_SUPREMUM)); + ut_ad(lock_rec_get_nth_bit(lock, heap_no)); #ifdef SAFE_MUTEX ut_ad(!mysql_mutex_is_owner(&lock_sys.wait_mutex)); #endif /* SAFE_MUTEX */ ut_ad(!lock->is_table()); ut_ad(lock_sys.is_writer() || lock->trx->mutex_is_owner()); - lock_rec_reset_nth_bit(lock, PAGE_HEAP_NO_SUPREMUM); + lock_rec_reset_nth_bit(lock, heap_no); - lock_t *first_lock= lock_sys_t::get_first( - cell, lock->un_member.rec_lock.page_id, PAGE_HEAP_NO_SUPREMUM); + lock_t *first_lock= + lock_sys_t::get_first(cell, lock->un_member.rec_lock.page_id, heap_no); lock_rec_rebuild_waiting_queue( #if defined(UNIV_DEBUG) || !defined(DBUG_OFF) lock->trx, #endif /* defined(UNIV_DEBUG) || !defined(DBUG_OFF) */ - cell, first_lock, PAGE_HEAP_NO_SUPREMUM); + cell, first_lock, heap_no); +} + +/** Release locks to unmodified records on a clustered index page. +@param cell lock_sys.rec_hash cell of lock +@param lock record lock +@param offsets storage for rec_get_offsets() +@param heap storage for rec_get_offsets() +@param mtr mini-transaction (will be started and committed) */ +static void lock_rec_unlock_unmodified(hash_cell_t &cell, lock_t *lock, + rec_offs *&offsets, mem_heap_t *&heap, + mtr_t &mtr) +{ + ut_ad(!lock->is_waiting()); + + dict_index_t *const index= lock->index; + + mtr.start(); + if (buf_block_t *block= + btr_block_get(*index, lock->un_member.rec_lock.page_id.page_no(), + RW_S_LATCH, true, &mtr)) + { + if (UNIV_UNLIKELY(!page_is_leaf(block->page.frame))) + { + ut_ad("corrupted lock system" == 0); + goto func_exit; + } + + for (ulint i= PAGE_HEAP_NO_USER_LOW; i < lock_rec_get_n_bits(lock); ++i) + { + if (!lock_rec_get_nth_bit(lock, i)); + else if (const rec_t *rec= + page_find_rec_with_heap_no(block->page.frame, i)) + { + if (index->is_clust()) + { + if (trx_read_trx_id(rec + row_trx_id_offset(rec, index)) == + lock->trx->id) + continue; + unlock_rec: + lock_rec_unlock(cell, lock, i); + } + else + { + offsets= rec_get_offsets(rec, index, offsets, index->n_core_fields, + ULINT_UNDEFINED, &heap); + if (lock->trx != + lock_sec_rec_some_has_impl(lock->trx, rec, index, offsets)) + goto unlock_rec; + } + } + } + } +func_exit: + mtr.commit(); } /** Release non-exclusive locks on XA PREPARE, @@ -4334,6 +4475,12 @@ static bool lock_release_on_prepare_try(trx_t *trx) DBUG_ASSERT(trx->state == TRX_STATE_PREPARED); bool all_released= true; + mtr_t mtr; + rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; + rec_offs *offsets= offsets_; + mem_heap_t *heap= nullptr; + rec_offs_init(offsets_); + lock_sys.rd_lock(SRW_LOCK_CALL); trx->mutex_lock(); @@ -4350,20 +4497,24 @@ static bool lock_release_on_prepare_try(trx_t *trx) if (!lock->is_table()) { ut_ad(!lock->index->table->is_temporary()); - bool supremum_bit = lock_rec_get_nth_bit(lock, PAGE_HEAP_NO_SUPREMUM); - bool rec_granted_exclusive_not_gap = + bool supremum_bit= lock_rec_get_nth_bit(lock, PAGE_HEAP_NO_SUPREMUM); + bool rec_granted_exclusive_not_gap= lock->is_rec_granted_exclusive_not_gap(); if (!supremum_bit && rec_granted_exclusive_not_gap) continue; - auto &lock_hash= lock_sys.hash_get(lock->type_mode); - auto cell= lock_hash.cell_get(lock->un_member.rec_lock.page_id.fold()); + if (UNIV_UNLIKELY(lock->type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE))) + continue; /* SPATIAL INDEX locking is broken. */ + auto cell= + lock_sys.rec_hash.cell_get(lock->un_member.rec_lock.page_id.fold()); auto latch= lock_sys_t::hash_table::latch(cell); if (latch->try_acquire()) { if (!rec_granted_exclusive_not_gap) lock_rec_dequeue_from_page(lock, false); else if (supremum_bit) - lock_rec_unlock_supremum(*cell, lock); + lock_rec_unlock(*cell, lock, PAGE_HEAP_NO_SUPREMUM); + else + lock_rec_unlock_unmodified(*cell, lock, offsets, heap, mtr); latch->release(); } else @@ -4396,6 +4547,8 @@ static bool lock_release_on_prepare_try(trx_t *trx) lock_sys.rd_unlock(); trx->mutex_unlock(); + if (UNIV_LIKELY_NULL(heap)) + mem_heap_free(heap); return all_released; } @@ -4409,52 +4562,71 @@ void lock_release_on_prepare(trx_t *trx) if (lock_release_on_prepare_try(trx)) return; - LockMutexGuard g{SRW_LOCK_CALL}; - trx->mutex_lock(); + mtr_t mtr; + rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; + rec_offs *offsets= offsets_; + mem_heap_t *heap= nullptr; + + rec_offs_init(offsets_); - for (lock_t *prev, *lock= UT_LIST_GET_LAST(trx->lock.trx_locks); lock; - lock= prev) { - ut_ad(lock->trx == trx); - prev= UT_LIST_GET_PREV(trx_locks, lock); - if (!lock->is_table()) + LockMutexGuard g{SRW_LOCK_CALL}; + trx->mutex_lock(); + + for (lock_t *prev, *lock= UT_LIST_GET_LAST(trx->lock.trx_locks); lock; + lock= prev) { - ut_ad(!lock->index->table->is_temporary()); - if (!lock->is_rec_granted_exclusive_not_gap()) - lock_rec_dequeue_from_page(lock, false); - else if (lock_rec_get_nth_bit(lock, PAGE_HEAP_NO_SUPREMUM)) + ut_ad(lock->trx == trx); + prev= UT_LIST_GET_PREV(trx_locks, lock); + if (!lock->is_table()) { - auto &lock_hash= lock_sys.hash_get(lock->type_mode); - auto cell= lock_hash.cell_get(lock->un_member.rec_lock.page_id.fold()); - lock_rec_unlock_supremum(*cell, lock); + ut_ad(!lock->index->table->is_temporary()); + if (!lock->is_rec_granted_exclusive_not_gap()) + lock_rec_dequeue_from_page(lock, false); + else if (UNIV_UNLIKELY(lock->type_mode & + (LOCK_PREDICATE | LOCK_PRDT_PAGE))) + /* SPATIAL INDEX locking is broken. */; + else + { + auto cell= lock_sys.rec_hash.cell_get(lock->un_member.rec_lock. + page_id.fold()); + if (lock_rec_get_nth_bit(lock, PAGE_HEAP_NO_SUPREMUM)) + lock_rec_unlock(*cell, lock, PAGE_HEAP_NO_SUPREMUM); + else + { + ut_ad(lock->trx->isolation_level > TRX_ISO_READ_COMMITTED || + /* Insert-intention lock is valid for supremum for isolation + level > TRX_ISO_READ_COMMITTED */ + lock->mode() == LOCK_X || + !lock_rec_get_nth_bit(lock, PAGE_HEAP_NO_SUPREMUM)); + lock_rec_unlock_unmodified(*cell, lock, offsets, heap, mtr); + } + } } else - ut_ad(lock->trx->isolation_level > TRX_ISO_READ_COMMITTED || - /* Insert-intention lock is valid for supremum for isolation - level > TRX_ISO_READ_COMMITTED */ - lock->mode() == LOCK_X || - !lock_rec_get_nth_bit(lock, PAGE_HEAP_NO_SUPREMUM)); - } - else - { - ut_d(dict_table_t *table= lock->un_member.tab_lock.table); - ut_ad(!table->is_temporary()); - switch (lock->mode()) { - case LOCK_IS: - case LOCK_S: - lock_table_dequeue(lock, false); - break; - case LOCK_IX: - case LOCK_X: - ut_ad(table->id >= DICT_HDR_FIRST_ID || trx->dict_operation); - /* fall through */ - default: - break; + { + ut_d(dict_table_t *table= lock->un_member.tab_lock.table); + ut_ad(!table->is_temporary()); + switch (lock->mode()) { + case LOCK_IS: + case LOCK_S: + lock_table_dequeue(lock, false); + break; + case LOCK_IX: + case LOCK_X: + ut_ad(table->id >= DICT_HDR_FIRST_ID || trx->dict_operation); + /* fall through */ + default: + break; + } } } } trx->mutex_unlock(); + + if (UNIV_LIKELY_NULL(heap)) + mem_heap_free(heap); } /** Release locks on a table whose creation is being rolled back */ @@ -5414,47 +5586,43 @@ lock_rec_insert_check_and_lock( return err; } -/*********************************************************************//** -Creates an explicit record lock for a running transaction that currently only -has an implicit lock on the record. The transaction instance must have a -reference count > 0 so that it can't be committed and freed before this -function has completed. */ -static -bool -lock_rec_convert_impl_to_expl_for_trx( -/*==================================*/ - trx_t* trx, /*!< in/out: active transaction */ - const page_id_t id, /*!< in: page identifier */ - const rec_t* rec, /*!< in: user record on page */ - dict_index_t* index) /*!< in: index of record */ +/** Create an explicit record lock for a transaction that currently only +has an implicit lock on the record. +@param trx referenced, active transaction, or nullptr +@param id page identifier +@param rec record in the page +@param index the index B-tree that the record belongs to +@return trx, with the reference released */ +static trx_t *lock_rec_convert_impl_to_expl_for_trx(trx_t *trx, + const page_id_t id, + const rec_t *rec, + dict_index_t *index) { - if (!trx) - return false; - - ut_ad(trx->is_referenced()); - ut_ad(page_rec_is_leaf(rec)); - ut_ad(!rec_is_metadata(rec, *index)); + if (trx) + { + ut_ad(trx->is_referenced()); + ut_ad(page_rec_is_leaf(rec)); + ut_ad(!rec_is_metadata(rec, *index)); - DEBUG_SYNC_C("before_lock_rec_convert_impl_to_expl_for_trx"); - ulint heap_no= page_rec_get_heap_no(rec); + ulint heap_no= page_rec_get_heap_no(rec); - { - LockGuard g{lock_sys.rec_hash, id}; - trx->mutex_lock(); - ut_ad(!trx_state_eq(trx, TRX_STATE_NOT_STARTED)); + { + LockGuard g{lock_sys.rec_hash, id}; + trx->mutex_lock(); + ut_ad(!trx_state_eq(trx, TRX_STATE_NOT_STARTED)); + + if (!trx_state_eq(trx, TRX_STATE_COMMITTED_IN_MEMORY) && + !lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, g.cell(), id, heap_no, + trx)) + lock_rec_add_to_queue(LOCK_X | LOCK_REC_NOT_GAP, g.cell(), id, + page_align(rec), heap_no, index, trx, true); + } - if (!trx_state_eq(trx, TRX_STATE_COMMITTED_IN_MEMORY) && - !lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, g.cell(), id, heap_no, - trx)) - lock_rec_add_to_queue(LOCK_X | LOCK_REC_NOT_GAP, g.cell(), id, - page_align(rec), heap_no, index, trx, true); + trx->release_reference(); + trx->mutex_unlock(); } - trx->mutex_unlock(); - trx->release_reference(); - - DEBUG_SYNC_C("after_lock_rec_convert_impl_to_expl_for_trx"); - return false; + return trx; } @@ -5545,10 +5713,11 @@ should be created. @param[in] rec record on the leaf page @param[in] index the index of the record @param[in] offsets rec_get_offsets(rec,index) -@return whether caller_trx already holds an exclusive lock on rec */ +@return unsafe pointer to a transaction that held an exclusive lock on rec +@retval nullptr if no transaction held an exclusive lock */ template<bool is_primary> static -bool +const trx_t * lock_rec_convert_impl_to_expl( trx_t* caller_trx, page_id_t id, @@ -5572,10 +5741,10 @@ lock_rec_convert_impl_to_expl( trx_id = lock_clust_rec_some_has_impl(rec, index, offsets); if (trx_id == 0) { - return false; + return nullptr; } if (UNIV_UNLIKELY(trx_id == caller_trx->id)) { - return true; + return caller_trx; } trx = trx_sys.find(caller_trx, trx_id); @@ -5586,7 +5755,7 @@ lock_rec_convert_impl_to_expl( offsets); if (trx == caller_trx) { trx->release_reference(); - return true; + return trx; } ut_d(lock_rec_other_trx_holds_expl(caller_trx, trx, rec, id)); @@ -5631,11 +5800,18 @@ lock_clust_rec_modify_check_and_lock( /* If a transaction has no explicit x-lock set on the record, set one for it */ - if (lock_rec_convert_impl_to_expl<true>(thr_get_trx(thr), - block->page.id(), + trx_t *trx = thr_get_trx(thr); + if (const trx_t *owner = + lock_rec_convert_impl_to_expl<true>(trx, block->page.id(), rec, index, offsets)) { - /* We already hold an implicit exclusive lock. */ - return DB_SUCCESS; + if (owner == trx) { + /* We already hold an exclusive lock. */ + return DB_SUCCESS; + } + + if (trx->snapshot_isolation && trx->read_view.is_open()) { + return DB_RECORD_CHANGED; + } } err = lock_rec_lock(true, LOCK_X | LOCK_REC_NOT_GAP, @@ -5798,12 +5974,19 @@ lock_sec_rec_read_check_and_lock( return DB_SUCCESS; } - if (!page_rec_is_supremum(rec) - && lock_rec_convert_impl_to_expl<false>( - trx, block->page.id(), rec, index, offsets) - && gap_mode == LOCK_REC_NOT_GAP) { - /* We already hold an implicit exclusive lock. */ - return DB_SUCCESS; + if (page_rec_is_supremum(rec)) { + } else if (const trx_t *owner = + lock_rec_convert_impl_to_expl<false>(trx, block->page.id(), + rec, index, offsets)) { + if (owner == trx) { + if (gap_mode == LOCK_REC_NOT_GAP) { + /* We already hold an exclusive lock. */ + return DB_SUCCESS; + } + } else if (trx->snapshot_isolation + && trx->read_view.is_open()) { + return DB_RECORD_CHANGED; + } } #ifdef WITH_WSREP @@ -5883,13 +6066,28 @@ lock_clust_rec_read_check_and_lock( ulint heap_no = page_rec_get_heap_no(rec); trx_t *trx = thr_get_trx(thr); - if (!lock_table_has(trx, index->table, LOCK_X) - && heap_no != PAGE_HEAP_NO_SUPREMUM - && lock_rec_convert_impl_to_expl<true>(trx, id, - rec, index, offsets) - && gap_mode == LOCK_REC_NOT_GAP) { - /* We already hold an implicit exclusive lock. */ - return DB_SUCCESS; + if (lock_table_has(trx, index->table, LOCK_X) + || heap_no == PAGE_HEAP_NO_SUPREMUM) { + } else if (const trx_t *owner = + lock_rec_convert_impl_to_expl<true>(trx, id, + rec, index, offsets)) { + if (owner == trx) { + if (gap_mode == LOCK_REC_NOT_GAP) { + /* We already hold an exclusive lock. */ + return DB_SUCCESS; + } + } else if (trx->snapshot_isolation + && trx->read_view.is_open()) { + return DB_RECORD_CHANGED; + } + } + + if (heap_no > PAGE_HEAP_NO_SUPREMUM && gap_mode != LOCK_GAP + && trx->snapshot_isolation + && trx->read_view.is_open() + && !trx->read_view.changes_visible( + trx_read_trx_id(rec + row_trx_id_offset(rec, index)))) { + return DB_RECORD_CHANGED; } dberr_t err = lock_rec_lock(false, gap_mode | mode, @@ -6656,6 +6854,7 @@ and less modified rows. Bit 0 is used to prefer orig_trx in case of a tie. print(buf); } + DBUG_EXECUTE_IF("innodb_deadlock_victim_self", victim= trx;); ut_ad(victim->state == TRX_STATE_ACTIVE); /* victim->lock.was_chosen_as_deadlock_victim must always be set before |