summaryrefslogtreecommitdiffstats
path: root/mysql-test/suite/large_tests/t
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 18:00:34 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 18:00:34 +0000
commit3f619478f796eddbba6e39502fe941b285dd97b1 (patch)
treee2c7b5777f728320e5b5542b6213fd3591ba51e2 /mysql-test/suite/large_tests/t
parentInitial commit. (diff)
downloadmariadb-upstream.tar.xz
mariadb-upstream.zip
Adding upstream version 1:10.11.6.upstream/1%10.11.6upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'mysql-test/suite/large_tests/t')
-rw-r--r--mysql-test/suite/large_tests/t/alter_table.test47
-rw-r--r--mysql-test/suite/large_tests/t/lock_tables_big.test42
-rw-r--r--mysql-test/suite/large_tests/t/maria_recover_encrypted.test90
-rw-r--r--mysql-test/suite/large_tests/t/rpl_slave_net_timeout-slave.opt1
-rw-r--r--mysql-test/suite/large_tests/t/rpl_slave_net_timeout.test79
5 files changed, 259 insertions, 0 deletions
diff --git a/mysql-test/suite/large_tests/t/alter_table.test b/mysql-test/suite/large_tests/t/alter_table.test
new file mode 100644
index 00000000..61f0b34d
--- /dev/null
+++ b/mysql-test/suite/large_tests/t/alter_table.test
@@ -0,0 +1,47 @@
+# The test for bug#27029 requires about 8 hours and 20 minutes on my
+# fastest machine. This test requires at least 60 GB of disk space.
+--source include/big_test.inc
+
+#
+# Bug#27029 alter table ... enable keys crashes mysqld on large table
+#
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+create table `t1` (`c1` tinyint unsigned not null, key `c1` (`c1`))
+engine=myisam default charset=utf8;
+alter table `t1` disable keys;
+insert into t1 values (1),(2),(3),(4);
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+insert into t1 select * from t1 limit 2147483647; #Insert 4294967295 rows.
+alter table t1 enable keys;
+drop table t1;
diff --git a/mysql-test/suite/large_tests/t/lock_tables_big.test b/mysql-test/suite/large_tests/t/lock_tables_big.test
new file mode 100644
index 00000000..7fab19ac
--- /dev/null
+++ b/mysql-test/suite/large_tests/t/lock_tables_big.test
@@ -0,0 +1,42 @@
+#
+# Bug#24509 cannot use more than 2048 file descriptors on windows
+#
+
+#
+# This test requires approximately 6000 of files to be open simultaneously.
+# Let us skip it on platforms where open files limit is too low.
+let $max_open_files_limit= `SELECT @@open_files_limit < 6100`;
+if ($max_open_files_limit)
+{
+ skip Need open_files_limit to be greater than 6100;
+}
+
+--disable_query_log
+create database many_tables;
+use many_tables;
+let $max_tables=3000;
+let $i=$max_tables;
+
+--disable_warnings
+create table t (i int);
+let $table_list=t READ;
+
+while ($i)
+{
+ eval create table t$i (i int);
+ let $table_list= $table_list ,t$i READ;
+ dec $i;
+}
+
+#lock all tables we just created (resembles mysqldump startup is doing with --all-databases operation)
+#There will be 2 descriptors for each table (table.MYI and table.MYD files) means 6000 files
+#descriptors altogether. For Microsoft C runtime, this is way too many.
+
+eval LOCK TABLES $table_list;
+unlock tables;
+
+drop database many_tables;
+--enable_query_log
+--echo all done
+
+
diff --git a/mysql-test/suite/large_tests/t/maria_recover_encrypted.test b/mysql-test/suite/large_tests/t/maria_recover_encrypted.test
new file mode 100644
index 00000000..4c590e5e
--- /dev/null
+++ b/mysql-test/suite/large_tests/t/maria_recover_encrypted.test
@@ -0,0 +1,90 @@
+# MDEV-18187: If server crashes before flushing index pages in an
+# encrypted Aria table, it could permanently fail to repair the table
+
+--source include/have_maria.inc
+--source include/default_charset.inc
+
+# Cleanup
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+DROP PROCEDURE IF EXISTS proc_insert_many;
+--enable_warnings
+
+# --------
+
+# Configure encryption
+--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+--shutdown_server
+--source include/wait_until_disconnected.inc
+
+--write_file $MYSQLTEST_VARDIR/key.txt
+1;76025E3ADC78D74819927DB02AAA4C35
+EOF
+
+--exec echo "restart:--aria-encrypt-tables=1 --plugin-load-add=file_key_management --file-key-management --file-key-management-filename=$MYSQLTEST_VARDIR/key.txt" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+
+# Create table with many indexes so that its index size grows quickly
+# and it can be grown to needed size without too many inserts
+CREATE TABLE t1 (
+ field1 INTEGER NOT NULL,
+ field2 INTEGER NOT NULL,
+ field3 INTEGER NOT NULL,
+ KEY i_1 (field1),
+ KEY i_2 (field2),
+ KEY i_3 (field3),
+ KEY i_12 (field1, field2),
+ KEY i_13 (field1, field3),
+ KEY i_21 (field2, field1),
+ KEY i_23 (field2, field3),
+ KEY i_31 (field3, field1),
+ KEY i_32 (field3, field2),
+ KEY i_123 (field1, field2, field3),
+ KEY i_132 (field1, field3, field2),
+ KEY i_213 (field2, field1, field3),
+ KEY i_231 (field2, field3, field1),
+ KEY i_312 (field3, field1, field2),
+ KEY i_321 (field3, field2, field1)
+) ENGINE=Aria;
+
+# Create procedures to insert many rows.
+DELIMITER |;
+CREATE PROCEDURE proc_insert_many()
+BEGIN
+ DECLARE iRow INT DEFAULT 0;
+ insertRows: LOOP
+ IF (iRow = 70000) THEN
+ LEAVE insertRows;
+ END IF;
+
+ INSERT INTO t1 VALUES (1000000+iRow,2000000+iRow,3000000+iRow);
+ SET iRow = iRow + 1;
+ END LOOP insertRows;
+END|
+DELIMITER ;|
+
+# Call the procedure to insert rows.
+# Use 'LOCK TABLES' to make things a lot faster.
+# Note that his code doesn't reproduce for some reason:
+# INSERT INTO t1 SELECT 1000000+seq,2000000+seq,3000000+seq FROM seq_1_to_70000;
+LOCK TABLES t1 WRITE;
+CALL proc_insert_many();
+UNLOCK TABLES;
+
+# Crash and restart the server while it's still flushing index
+--exec echo "restart:--aria-encrypt-tables=1 --plugin-load-add=file_key_management --file-key-management --file-key-management-filename=$MYSQLTEST_VARDIR/key.txt" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+SET debug_dbug="d,crash_shutdown";
+--error 2013
+shutdown;
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+
+# Access the table to trigger repair; validate repaired data
+SELECT * FROM t1 ORDER BY 1 DESC LIMIT 10;
+
+# --------
+
+# Cleanup
+DROP TABLE IF EXISTS t1;
+DROP PROCEDURE IF EXISTS proc_insert_many;
diff --git a/mysql-test/suite/large_tests/t/rpl_slave_net_timeout-slave.opt b/mysql-test/suite/large_tests/t/rpl_slave_net_timeout-slave.opt
new file mode 100644
index 00000000..281566c9
--- /dev/null
+++ b/mysql-test/suite/large_tests/t/rpl_slave_net_timeout-slave.opt
@@ -0,0 +1 @@
+--net_read_timeout=5
diff --git a/mysql-test/suite/large_tests/t/rpl_slave_net_timeout.test b/mysql-test/suite/large_tests/t/rpl_slave_net_timeout.test
new file mode 100644
index 00000000..238146cd
--- /dev/null
+++ b/mysql-test/suite/large_tests/t/rpl_slave_net_timeout.test
@@ -0,0 +1,79 @@
+#
+# Testing reconnecting by slave as specified by `slave_net_timeout'
+#
+# Bug #50296 Slave reconnects earlier than the prescribed slave_net_timeout value
+#
+--source include/have_csv.inc
+--source include/master-slave.inc
+
+
+# save global env
+connection master;
+set @save_general_log = @@global.general_log;
+set @save_log_output = @@global.log_output;
+
+connection slave;
+set @save_slave_net_timeout = @@global.slave_net_timeout;
+
+connection master;
+set @@global.general_log = ON;
+set @@global.log_output = 'table,file';
+
+connection slave;
+--source include/stop_slave.inc
+#
+# if heartbeat is disabled then reconnecting to the idle master
+# should happen with `slave_net_timeout' period.
+# Since it's the real time that is measured, `slave_net_timeout'
+# merely guarantees that reconnecting can *not* happen earlier of a value specified.
+# That is there can't an exact estimate for how many time it will happen.
+#
+# The following lines verify that having idle master
+# for more than 2 * slave_net_timeout seconds and
+# slave.net_read_timeout < slave_net_timeout
+# won't cause reconnecting by the slave within at least
+# slave_net_timeout interval.
+--replace_result $MASTER_MYPORT MASTER_PORT
+eval change master to master_host = '127.0.0.1',master_port = $MASTER_MYPORT,
+master_user = 'root', master_heartbeat_period = 0;
+
+set @@global.slave_net_timeout = @@global.net_read_timeout * 2;
+let $idle_time=`select @@global.slave_net_timeout * 2`;
+
+let $slave_net_timeout = `select @@global.slave_net_timeout`;
+
+--source include/start_slave.inc
+
+--disable_query_log
+--disable_result_log
+eval select 'master is idle for ', sleep($idle_time);
+--enable_result_log
+--enable_query_log
+
+--source include/stop_slave.inc
+
+# querying general-log
+
+connection master;
+
+# In particular the last reconnection timestamp must be greater or equal to
+# the previous one + slave_net_timeout
+
+select event_time from (select event_time from mysql.general_log as t_1 where command_type like 'Connect' order by event_time desc limit 2) as t_2 order by event_time desc limit 1 into @ts_last;
+select event_time from (select event_time from mysql.general_log as t_1 where command_type like 'Connect' order by event_time desc limit 2) as t_2 order by event_time asc limit 1 into @ts_prev;
+
+--let $assert_cond= time_to_sec(@ts_last) - time_to_sec(@ts_prev) >= $slave_net_timeout
+--let $assert_text= time between last reconnection and the reconnection before that should be >= slave_net_timeout
+--source include/assert.inc
+
+# cleanup
+
+# restore global env
+connection master;
+set @@global.general_log = @save_general_log;
+set @@global.log_output = @save_log_output;
+connection slave;
+set @@global.slave_net_timeout = @save_slave_net_timeout;
+
+--let $rpl_only_running_threads= 1
+--source include/rpl_end.inc