From 7aaff451bafb4b43e1626b329e59a4c9aa7fc31d Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Tue, 4 Jun 2024 19:58:41 +0200 Subject: Merging upstream version 5:7.2.5. Signed-off-by: Daniel Baumann --- tests/unit/type/list.tcl | 89 +++++++++++++++++++++++++++++++++++++- tests/unit/type/stream-cgroups.tcl | 30 ++++++++++++- tests/unit/type/zset.tcl | 34 +++++++++++++++ 3 files changed, 151 insertions(+), 2 deletions(-) (limited to 'tests/unit/type') diff --git a/tests/unit/type/list.tcl b/tests/unit/type/list.tcl index 993b6d1..7c0fa87 100644 --- a/tests/unit/type/list.tcl +++ b/tests/unit/type/list.tcl @@ -220,6 +220,7 @@ start_server [list overrides [list save ""] ] { # checking LSET in case ziplist needs to be split test {Test LSET with packed is split in the middle} { + set original_config [config_get_set list-max-listpack-size 4] r flushdb r debug quicklist-packed-threshold 5b r RPUSH lst "aa" @@ -227,6 +228,7 @@ start_server [list overrides [list save ""] ] { r RPUSH lst "cc" r RPUSH lst "dd" r RPUSH lst "ee" + assert_encoding quicklist lst r lset lst 2 [string repeat e 10] assert_equal [r lpop lst] "aa" assert_equal [r lpop lst] "bb" @@ -234,6 +236,7 @@ start_server [list overrides [list save ""] ] { assert_equal [r lpop lst] "dd" assert_equal [r lpop lst] "ee" r debug quicklist-packed-threshold 0 + r config set list-max-listpack-size $original_config } {OK} {needs:debug} @@ -381,7 +384,63 @@ if {[lindex [r config get proto-max-bulk-len] 1] == 10000000000} { assert_equal [read_big_bulk {r rpop lst}] $str_length } {} {large-memory} - test {Test LMOVE on plain nodes over 4GB} { + test {Test LSET on plain nodes with large elements under packed_threshold over 4GB} { + r flushdb + r rpush lst a b c d e + for {set i 0} {$i < 5} {incr i} { + r write "*4\r\n\$4\r\nlset\r\n\$3\r\nlst\r\n\$1\r\n$i\r\n" + write_big_bulk 1000000000 + } + r ping + } {PONG} {large-memory} + + test {Test LSET splits a quicklist node, and then merge} { + # Test when a quicklist node can't be inserted and is split, the split + # node merges with the node before it and the `before` node is kept. + r flushdb + r rpush lst [string repeat "x" 4096] + r lpush lst a b c d e f g + r lpush lst [string repeat "y" 4096] + # now: [y...] [g f e d c b a x...] + # (node0) (node1) + # Keep inserting elements into node1 until node1 is split into two + # nodes([g] [...]), eventually node0 will merge with the [g] node. + # Since node0 is larger, after the merge node0 will be kept and + # the [g] node will be deleted. + for {set i 7} {$i >= 3} {incr i -1} { + r write "*4\r\n\$4\r\nlset\r\n\$3\r\nlst\r\n\$1\r\n$i\r\n" + write_big_bulk 1000000000 + } + assert_equal "g" [r lindex lst 1] + r ping + } {PONG} {large-memory} + + test {Test LSET splits a LZF compressed quicklist node, and then merge} { + # Test when a LZF compressed quicklist node can't be inserted and is split, + # the split node merges with the node before it and the split node is kept. + r flushdb + r config set list-compress-depth 1 + r lpush lst [string repeat "x" 2000] + r rpush lst [string repeat "y" 7000] + r rpush lst a b c d e f g + r rpush lst [string repeat "z" 8000] + r lset lst 0 h + # now: [h] [y... a b c d e f g] [z...] + # node0 node1(LZF) + # Keep inserting elements into node1 until node1 is split into two + # nodes([y...] [...]), eventually node0 will merge with the [y...] node. + # Since [y...] node is larger, after the merge node0 will be deleted and + # the [y...] node will be kept. + for {set i 7} {$i >= 3} {incr i -1} { + r write "*4\r\n\$4\r\nlset\r\n\$3\r\nlst\r\n\$1\r\n$i\r\n" + write_big_bulk 1000000000 + } + assert_equal "h" [r lindex lst 0] + r config set list-compress-depth 0 + r ping + } {PONG} {large-memory} + + test {Test LMOVE on plain nodes over 4GB} { r flushdb r RPUSH lst2{t} "aa" r RPUSH lst2{t} "bb" @@ -1186,6 +1245,34 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { r select 9 } {OK} {singledb:skip needs:debug} + test {BLPOP unblock but the key is expired and then block again - reprocessing command} { + r flushall + r debug set-active-expire 0 + set rd [redis_deferring_client] + + set start [clock milliseconds] + $rd blpop mylist 1 + wait_for_blocked_clients_count 1 + + # The exec will try to awake the blocked client, but the key is expired, + # so the client will be blocked again during the command reprocessing. + r multi + r rpush mylist a + r pexpire mylist 100 + r debug sleep 0.2 + r exec + + assert_equal {} [$rd read] + set end [clock milliseconds] + + # Before the fix in #13004, this time would have been 1200+ (i.e. more than 1200ms), + # now it should be 1000, but in order to avoid timing issues, we increase the range a bit. + assert_range [expr $end-$start] 1000 1150 + + r debug set-active-expire 1 + $rd close + } {0} {needs:debug} + foreach {pop} {BLPOP BLMPOP_LEFT} { test "$pop when new key is moved into place" { set rd [redis_deferring_client] diff --git a/tests/unit/type/stream-cgroups.tcl b/tests/unit/type/stream-cgroups.tcl index a6cc5da..46e0b05 100644 --- a/tests/unit/type/stream-cgroups.tcl +++ b/tests/unit/type/stream-cgroups.tcl @@ -475,7 +475,7 @@ start_server { $rd close } - test {Blocking XREADGROUP for stream key that has clients blocked on list - avoid endless loop} { + test {Blocking XREADGROUP for stream key that has clients blocked on stream - avoid endless loop} { r DEL mystream r XGROUP CREATE mystream mygroup $ MKSTREAM @@ -498,6 +498,34 @@ start_server { assert_equal [r ping] {PONG} } + test {Blocking XREADGROUP for stream key that has clients blocked on stream - reprocessing command} { + r DEL mystream + r XGROUP CREATE mystream mygroup $ MKSTREAM + + set rd1 [redis_deferring_client] + set rd2 [redis_deferring_client] + + $rd1 xreadgroup GROUP mygroup myuser BLOCK 0 STREAMS mystream > + wait_for_blocked_clients_count 1 + + set start [clock milliseconds] + $rd2 xreadgroup GROUP mygroup myuser BLOCK 1000 STREAMS mystream > + wait_for_blocked_clients_count 2 + + # After a while call xadd and let rd2 re-process the command. + after 200 + r xadd mystream * field value + assert_equal {} [$rd2 read] + set end [clock milliseconds] + + # Before the fix in #13004, this time would have been 1200+ (i.e. more than 1200ms), + # now it should be 1000, but in order to avoid timing issues, we increase the range a bit. + assert_range [expr $end-$start] 1000 1150 + + $rd1 close + $rd2 close + } + test {XGROUP DESTROY should unblock XREADGROUP with -NOGROUP} { r config resetstat r del mystream diff --git a/tests/unit/type/zset.tcl b/tests/unit/type/zset.tcl index 33427d8..0a42784 100644 --- a/tests/unit/type/zset.tcl +++ b/tests/unit/type/zset.tcl @@ -1942,6 +1942,34 @@ start_server {tags {"zset"}} { } } + test {BZPOPMIN unblock but the key is expired and then block again - reprocessing command} { + r flushall + r debug set-active-expire 0 + set rd [redis_deferring_client] + + set start [clock milliseconds] + $rd bzpopmin zset{t} 1 + wait_for_blocked_clients_count 1 + + # The exec will try to awake the blocked client, but the key is expired, + # so the client will be blocked again during the command reprocessing. + r multi + r zadd zset{t} 1 one + r pexpire zset{t} 100 + r debug sleep 0.2 + r exec + + assert_equal {} [$rd read] + set end [clock milliseconds] + + # Before the fix in #13004, this time would have been 1200+ (i.e. more than 1200ms), + # now it should be 1000, but in order to avoid timing issues, we increase the range a bit. + assert_range [expr $end-$start] 1000 1150 + + r debug set-active-expire 1 + $rd close + } {0} {needs:debug} + test "BZPOPMIN with same key multiple times should work" { set rd [redis_deferring_client] r del z1{t} z2{t} @@ -2211,12 +2239,18 @@ start_server {tags {"zset"}} { } {b 2 c 3} test {ZRANGESTORE BYLEX} { + set res [r zrangestore z3{t} z1{t} \[b \[c BYLEX] + assert_equal $res 2 + assert_encoding listpack z3{t} set res [r zrangestore z2{t} z1{t} \[b \[c BYLEX] assert_equal $res 2 r zrange z2{t} 0 -1 withscores } {b 2 c 3} test {ZRANGESTORE BYSCORE} { + set res [r zrangestore z4{t} z1{t} 1 2 BYSCORE] + assert_equal $res 2 + assert_encoding listpack z4{t} set res [r zrangestore z2{t} z1{t} 1 2 BYSCORE] assert_equal $res 2 r zrange z2{t} 0 -1 withscores -- cgit v1.2.3