summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/00confnames56
-rw-r--r--tests/00createnames44
-rw-r--r--tests/00linear30
-rw-r--r--tests/00multipath29
-rw-r--r--tests/00names19
-rw-r--r--tests/00raid045
-rw-r--r--tests/00raid138
-rw-r--r--tests/00raid1018
-rw-r--r--tests/00raid416
-rw-r--r--tests/00raid533
-rw-r--r--tests/00raid5-zero12
-rw-r--r--tests/00raid616
-rw-r--r--tests/00readonly33
-rw-r--r--tests/01r1fail29
-rw-r--r--tests/01r5fail27
-rw-r--r--tests/01r5integ33
-rw-r--r--tests/01r5integ.broken7
-rw-r--r--tests/01raid6integ57
-rw-r--r--tests/01raid6integ.broken7
-rw-r--r--tests/01replace52
-rw-r--r--tests/02lineargrow30
-rw-r--r--tests/02r1add40
-rw-r--r--tests/02r1grow36
-rw-r--r--tests/02r5grow53
-rw-r--r--tests/02r6grow36
-rw-r--r--tests/03assem-incr23
-rw-r--r--tests/03r0assem141
-rw-r--r--tests/03r5assem109
-rw-r--r--tests/03r5assem-failed12
-rw-r--r--tests/03r5assemV1128
-rw-r--r--tests/04r0update26
-rw-r--r--tests/04r1update15
-rw-r--r--tests/04r5swap18
-rw-r--r--tests/04r5swap.broken7
-rw-r--r--tests/04update-metadata52
-rw-r--r--tests/04update-uuid82
-rw-r--r--tests/05r1-add-internalbitmap20
-rw-r--r--tests/05r1-add-internalbitmap-v1a20
-rw-r--r--tests/05r1-add-internalbitmap-v1b20
-rw-r--r--tests/05r1-add-internalbitmap-v1c20
-rw-r--r--tests/05r1-bitmapfile49
-rw-r--r--tests/05r1-failfast74
-rw-r--r--tests/05r1-grow-external33
-rw-r--r--tests/05r1-grow-internal31
-rw-r--r--tests/05r1-grow-internal-131
-rw-r--r--tests/05r1-internalbitmap47
-rw-r--r--tests/05r1-internalbitmap-v1a48
-rw-r--r--tests/05r1-internalbitmap-v1b49
-rw-r--r--tests/05r1-internalbitmap-v1c48
-rw-r--r--tests/05r1-n3-bitmapfile53
-rw-r--r--tests/05r1-re-add39
-rw-r--r--tests/05r1-re-add-nosuper38
-rw-r--r--tests/05r1-remove-internalbitmap18
-rw-r--r--tests/05r1-remove-internalbitmap-v1a18
-rw-r--r--tests/05r1-remove-internalbitmap-v1b18
-rw-r--r--tests/05r1-remove-internalbitmap-v1c18
-rw-r--r--tests/05r5-bitmapfile49
-rw-r--r--tests/05r5-internalbitmap47
-rw-r--r--tests/05r6-bitmapfile49
-rw-r--r--tests/05r6tor027
-rw-r--r--tests/06name12
-rw-r--r--tests/06sysfs11
-rw-r--r--tests/06wrmostly13
-rw-r--r--tests/07autoassemble24
-rw-r--r--tests/07autoassemble.broken8
-rw-r--r--tests/07autodetect34
-rw-r--r--tests/07autodetect.broken5
-rw-r--r--tests/07changelevelintr61
-rw-r--r--tests/07changelevelintr.broken9
-rw-r--r--tests/07changelevels114
-rw-r--r--tests/07changelevels.broken9
-rw-r--r--tests/07layouts91
-rw-r--r--tests/07reshape5intr41
-rw-r--r--tests/07reshape5intr.broken45
-rw-r--r--tests/07revert-grow52
-rw-r--r--tests/07revert-grow.broken31
-rw-r--r--tests/07revert-inplace44
-rw-r--r--tests/07revert-shrink56
-rw-r--r--tests/07revert-shrink.broken9
-rw-r--r--tests/07testreshape545
-rw-r--r--tests/07testreshape5.broken12
-rw-r--r--tests/09imsm-assemble73
-rw-r--r--tests/09imsm-assemble.broken6
-rw-r--r--tests/09imsm-create-fail-rebuild78
-rw-r--r--tests/09imsm-create-fail-rebuild.broken5
-rw-r--r--tests/09imsm-overlap.broken7
-rw-r--r--tests/10ddf-assemble-missing61
-rw-r--r--tests/10ddf-assemble-missing.broken6
-rw-r--r--tests/10ddf-create89
-rw-r--r--tests/10ddf-create-fail-rebuild77
-rw-r--r--tests/10ddf-fail-create-race66
-rw-r--r--tests/10ddf-fail-create-race.broken7
-rw-r--r--tests/10ddf-fail-readd55
-rw-r--r--tests/10ddf-fail-readd-readonly71
-rw-r--r--tests/10ddf-fail-spare86
-rw-r--r--tests/10ddf-fail-stop-readd66
-rw-r--r--tests/10ddf-fail-twice59
-rw-r--r--tests/10ddf-fail-two-spares86
-rw-r--r--tests/10ddf-fail-two-spares.broken5
-rw-r--r--tests/10ddf-geometry82
-rw-r--r--tests/10ddf-incremental-wrong-order131
-rw-r--r--tests/10ddf-incremental-wrong-order.broken9
-rw-r--r--tests/10ddf-sudden-degraded18
-rw-r--r--tests/11spare-migration454
-rw-r--r--tests/12imsm-r0_2d-grow-r0_3d20
-rw-r--r--tests/12imsm-r0_2d-grow-r0_4d20
-rw-r--r--tests/12imsm-r0_2d-grow-r0_5d20
-rw-r--r--tests/12imsm-r0_3d-grow-r0_4d20
-rw-r--r--tests/12imsm-r5_3d-grow-r5_4d20
-rw-r--r--tests/12imsm-r5_3d-grow-r5_5d20
-rw-r--r--tests/13imsm-r0_r0_2d-grow-r0_r0_4d29
-rw-r--r--tests/13imsm-r0_r0_2d-grow-r0_r0_5d29
-rw-r--r--tests/13imsm-r0_r0_3d-grow-r0_r0_4d29
-rw-r--r--tests/13imsm-r0_r5_3d-grow-r0_r5_4d29
-rw-r--r--tests/13imsm-r0_r5_3d-grow-r0_r5_5d29
-rw-r--r--tests/13imsm-r5_r0_3d-grow-r5_r0_4d29
-rw-r--r--tests/13imsm-r5_r0_3d-grow-r5_r0_5d29
-rw-r--r--tests/14imsm-r0_3d-r5_3d-migrate-r5_4d-r5_4d29
-rw-r--r--tests/14imsm-r0_3d_no_spares-migrate-r5_3d21
-rw-r--r--tests/14imsm-r0_r0_2d-takeover-r10_4d30
-rw-r--r--tests/14imsm-r10_4d-grow-r10_5d20
-rw-r--r--tests/14imsm-r10_r5_4d-takeover-r0_2d30
-rw-r--r--tests/14imsm-r1_2d-grow-r1_3d19
-rw-r--r--tests/14imsm-r1_2d-grow-r1_3d.broken5
-rw-r--r--tests/14imsm-r1_2d-takeover-r0_2d21
-rw-r--r--tests/14imsm-r1_2d-takeover-r0_2d.broken6
-rw-r--r--tests/14imsm-r5_3d-grow-r5_5d-no-spares20
-rw-r--r--tests/14imsm-r5_3d-migrate-r4_3d21
-rw-r--r--tests/15imsm-r0_3d_64k-migrate-r0_3d_256k21
-rw-r--r--tests/15imsm-r5_3d_4k-migrate-r5_3d_256k21
-rw-r--r--tests/15imsm-r5_3d_64k-migrate-r5_3d_256k21
-rw-r--r--tests/15imsm-r5_6d_4k-migrate-r5_6d_256k21
-rw-r--r--tests/15imsm-r5_r0_3d_64k-migrate-r5_r0_3d_256k34
-rw-r--r--tests/16imsm-r0_3d-migrate-r5_4d22
-rw-r--r--tests/16imsm-r0_5d-migrate-r5_6d22
-rw-r--r--tests/16imsm-r5_3d-migrate-r0_3d21
-rw-r--r--tests/16imsm-r5_5d-migrate-r0_5d21
-rw-r--r--tests/18imsm-1d-takeover-r0_1d22
-rw-r--r--tests/18imsm-1d-takeover-r1_2d20
-rw-r--r--tests/18imsm-r0_2d-takeover-r10_4d22
-rw-r--r--tests/18imsm-r10_4d-takeover-r0_2d22
-rw-r--r--tests/18imsm-r10_4d-takeover-r0_2d.broken5
-rw-r--r--tests/18imsm-r1_2d-takeover-r0_1d21
-rw-r--r--tests/18imsm-r1_2d-takeover-r0_1d.broken6
-rw-r--r--tests/19raid6auto-repair49
-rw-r--r--tests/19raid6auto-repair.broken5
-rw-r--r--tests/19raid6check27
-rw-r--r--tests/19raid6repair56
-rw-r--r--tests/19raid6repair.broken5
-rw-r--r--tests/19repair-does-not-destroy28
-rw-r--r--tests/20raid5journal64
-rw-r--r--tests/21raid5cache87
-rw-r--r--tests/23rdev-lifetime34
-rw-r--r--tests/24raid10deadlock88
-rw-r--r--tests/24raid10deadlock.inject_error0
-rw-r--r--tests/24raid456deadlock58
-rw-r--r--tests/25raid456-recovery-while-reshape33
-rw-r--r--tests/25raid456-reshape-corrupt-data35
-rw-r--r--tests/25raid456-reshape-deadlock34
-rw-r--r--tests/25raid456-reshape-while-recovery32
-rw-r--r--tests/ToTest44
-rw-r--r--tests/env-ddf-template113
-rw-r--r--tests/env-imsm-template91
-rw-r--r--tests/func.sh354
-rw-r--r--tests/imsm-grow-template119
-rw-r--r--tests/templates/names_template75
-rw-r--r--tests/utils191
167 files changed, 7085 insertions, 0 deletions
diff --git a/tests/00confnames b/tests/00confnames
new file mode 100644
index 0000000..191a905
--- /dev/null
+++ b/tests/00confnames
@@ -0,0 +1,56 @@
+set -x -e
+. tests/templates/names_template
+
+# Test how <devname> is handled during Incremental assemblation with
+# config file and ARRAYLINE specified.
+
+names_create "/dev/md/name"
+local _UUID="$(mdadm -D --export /dev/md127 | grep MD_UUID | cut -d'=' -f2)"
+[[ "$_UUID" == "" ]] && echo "Cannot obtain UUID for $DEVNODE_NAME" && exit 1
+
+
+# 1. <devname> definition consistent with metadata name.
+names_make_conf $_UUID "/dev/md/name" $config
+mdadm -S "/dev/md127"
+mdadm -I $dev0 --config=$config
+names_verify "/dev/md127" "name" "name"
+mdadm -S "/dev/md127"
+
+# 2. Same as 1, but use short name form of <devname>.
+names_make_conf $_UUID "name" $config
+mdadm -I $dev0 --config=$config
+names_verify "/dev/md127" "name" "name"
+mdadm -S "/dev/md127"
+
+# 3. Same as 1, but use different <devname> than metadata provides.
+names_make_conf $_UUID "/dev/md/other" $config
+mdadm -I $dev0 --config=$config
+names_verify "/dev/md127" "other" "name"
+mdadm -S "/dev/md127"
+
+# 4. Same as 3, but use short name form of <devname>.
+names_make_conf $_UUID "other" $config
+mdadm -I $dev0 --config=$config
+names_verify "/dev/md127" "other" "name"
+mdadm -S "/dev/md127"
+
+# 5. Force particular node creation by setting <devname> to /dev/mdX.
+# Link is not created in this case.
+names_make_conf $_UUID "/dev/md4" $config
+mdadm -I $dev0 --config=$config
+names_verify "/dev/md4" "empty" "name"
+mdadm -S "/dev/md4"
+
+# 6. <devname> with some special symbols and locales.
+# <devname> should be ignored.
+names_make_conf $_UUID "tźż-\.,<>st+-" $config
+mdadm -I $dev0 --config=$config
+names_verify "/dev/md127" "name" "name"
+mdadm -S "/dev/md127"
+
+# 7. No <devname> set.
+# Metadata name and default node used.
+names_make_conf $_UUID "empty" $config
+mdadm -I $dev0 --config=$config
+names_verify "/dev/md127" "name" "name"
+mdadm -S "/dev/md127"
diff --git a/tests/00createnames b/tests/00createnames
new file mode 100644
index 0000000..a95e7d2
--- /dev/null
+++ b/tests/00createnames
@@ -0,0 +1,44 @@
+set -x -e
+. tests/templates/names_template
+
+# Test how <devname> and --name= are handled for create mode.
+
+# The most trivial case.
+names_create "/dev/md/name"
+names_verify "/dev/md127" "name" "name"
+mdadm -S "/dev/md127"
+
+names_create "name"
+names_verify "/dev/md127" "name" "name"
+mdadm -S "/dev/md127"
+
+# Use 'mdX' as name.
+names_create "/dev/md/md0"
+names_verify "/dev/md127" "md0" "md0"
+mdadm -S "/dev/md127"
+
+names_create "md0"
+names_verify "/dev/md127" "md0" "md0"
+mdadm -S "/dev/md127"
+
+# <devnode> is used to create MD_DEVNAME but, name is used to create MD_NAME.
+names_create "/dev/md/devnode" "name"
+names_verify "/dev/md127" "devnode" "name"
+mdadm -S "/dev/md127"
+
+names_create "devnode" "name"
+names_verify "/dev/md127" "devnode" "name"
+mdadm -S "/dev/md127"
+
+# Devnode points to /dev/ directory. MD_DEVNAME doesn't exist.
+names_create "/dev/md0"
+names_verify "/dev/md0" "empty" "0"
+mdadm -S "/dev/md0"
+
+# Devnode points to /dev/ directory and name is set.
+names_create "/dev/md0" "name"
+names_verify "/dev/md0" "empty" "name"
+mdadm -S "/dev/md0"
+
+# Devnode is a special ignore keyword. Should be rejected.
+names_create "<ignore>" "name", "true"
diff --git a/tests/00linear b/tests/00linear
new file mode 100644
index 0000000..5a11608
--- /dev/null
+++ b/tests/00linear
@@ -0,0 +1,30 @@
+
+# create a simple linear
+
+if [ "$LINEAR" != "yes" ]; then
+ echo -ne 'skipping... '
+ exit 0
+fi
+
+mdadm -CR $md0 -l linear -n3 $dev0 $dev1 $dev2
+check linear
+testdev $md0 3 $mdsize2_l 1
+mdadm -S $md0
+
+# now with version-0.90 superblock
+mdadm -CR $md0 -e0.90 --level=linear -n4 $dev0 $dev1 $dev2 $dev3
+check linear
+testdev $md0 4 $mdsize0 1
+mdadm -S $md0
+
+# now with version-1.0 superblock
+mdadm -CR $md0 -e1.0 --level=linear -n4 $dev0 $dev1 $dev2 $dev3
+check linear
+testdev $md0 4 $mdsize1 1
+mdadm -S $md0
+
+# now with no superblock
+mdadm -B $md0 -l linear -n5 $dev0 $dev1 $dev2 $dev3 $dev4
+check linear
+testdev $md0 5 $size 64
+mdadm -S $md0
diff --git a/tests/00multipath b/tests/00multipath
new file mode 100644
index 0000000..84e4d69
--- /dev/null
+++ b/tests/00multipath
@@ -0,0 +1,29 @@
+
+#
+# create a multipath, and fail and stuff
+
+if [ "$MULTIPATH" != "yes" ]; then
+ echo -ne 'skipping... '
+ exit 0
+fi
+
+mdadm -CR $md1 -l multipath -n2 $path0 $path1
+
+testdev $md1 1 $mdsize12 1
+
+mdadm $md1 -f $path0
+rotest $md1
+testdev $md1 1 $mdsize12 1
+
+mdadm $md1 -r $path0
+mdadm $md1 -a $path0
+
+rotest $md1
+testdev $md1 1 $mdsize12 1
+
+mdadm $md1 -f $path1
+mdadm $md1 -r $path1
+rotest $md1
+testdev $md1 1 $mdsize12 1
+
+mdadm -S $md1
diff --git a/tests/00names b/tests/00names
new file mode 100644
index 0000000..d996bef
--- /dev/null
+++ b/tests/00names
@@ -0,0 +1,19 @@
+set -x -e
+
+# create arrays with non-numeric names
+conf=$targetdir/mdadm.conf
+echo "CREATE names=yes" > $conf
+
+levels=(raid0 raid1 raid4 raid5 raid6)
+
+if [ "$LINEAR" == "yes" ]; then
+ levels+=( linear )
+fi
+
+for i in ${levels[@]}
+do
+ mdadm -CR --config $conf /dev/md/$i -l $i -n 4 $dev4 $dev3 $dev2 $dev1
+ check $i
+ [ -d /sys/class/block/md_$i/md ]
+ mdadm -S md_$i
+done
diff --git a/tests/00raid0 b/tests/00raid0
new file mode 100644
index 0000000..6407c32
--- /dev/null
+++ b/tests/00raid0
@@ -0,0 +1,45 @@
+
+# create a simple raid0
+
+mdadm -CR $md0 -l raid0 -n3 $dev0 $dev1 $dev2
+check raid0
+testdev $md0 3 $mdsize2_l 512
+mdadm -S $md0
+
+# verify raid0 with layouts fail for 0.90
+mdadm -CR $md0 -e0.90 -l0 -n4 $dev0 $dev1 $dev2 $dev3
+check opposite_result
+
+# now with no superblock
+mdadm -B $md0 -l0 -n5 $dev0 $dev1 $dev2 $dev3 $dev4
+check raid0
+testdev $md0 5 $size 512
+mdadm -S $md0
+
+if [ "$LINEAR" != "yes" ]; then
+ echo -ne 'skipping... '
+ exit 0
+fi
+
+# now same again with different chunk size
+for chunk in 4 32 256
+do
+ mdadm -CR $md0 -e0.90 -l linear --chunk $chunk -n3 $dev0 $dev1 $dev2
+ check linear
+ testdev $md0 3 $mdsize0 $chunk
+ mdadm -S $md0
+
+ # now with version-1 superblock
+ mdadm -CR $md0 -e1.0 -l0 -c $chunk -n4 $dev0 $dev1 $dev2 $dev3
+ check raid0
+ testdev $md0 4 $mdsize1 $chunk
+ mdadm -S $md0
+
+ # now with no superblock
+ mdadm -B $md0 -l0 -n5 --chun=$chunk $dev0 $dev1 $dev2 $dev3 $dev4
+ check raid0
+ testdev $md0 5 $size $chunk
+ mdadm -S $md0
+
+done
+exit 0
diff --git a/tests/00raid1 b/tests/00raid1
new file mode 100644
index 0000000..f6b8be1
--- /dev/null
+++ b/tests/00raid1
@@ -0,0 +1,38 @@
+
+# create a simple mirror
+# test version0, version1, and no super
+# test resync and recovery.
+
+# It's just a sanity check. This command shouldn't run successfully
+mdadm -CR $md0 -l 1 -n2 missing missing
+check opposite_result
+
+mdadm -CR $md0 -l 1 -n2 $dev0 $dev1
+check resync
+check raid1
+testdev $md0 1 $mdsize1a 64
+mdadm -S $md0
+
+# now with version-0.90 superblock, spare
+mdadm -CR $md0 -e0.90 --level=raid1 -n3 -x2 $dev0 missing missing $dev1 $dev2
+check recovery
+check raid1
+testdev $md0 1 $mdsize0 64
+mdadm -S $md0
+
+# now with no superblock
+mdadm -B $md0 -l mirror -n2 $dev0 $dev1
+check resync
+check raid1
+testdev $md0 1 $size 1
+mdadm -S $md0
+
+# again, but with no resync
+mdadm -B $md0 -l 1 --assume-clean -n2 $dev0 $dev1
+check raid1
+check nosync
+testdev $md0 1 $size 1
+mdadm -S $md0
+
+
+exit 0
diff --git a/tests/00raid10 b/tests/00raid10
new file mode 100644
index 0000000..796b970
--- /dev/null
+++ b/tests/00raid10
@@ -0,0 +1,18 @@
+
+# Create some raid10 arrays, all with 6 devices and one spare
+devs="$dev0 $dev1 $dev2 $dev3 $dev4 $dev5 $dev6"
+
+for lo in n2 n3 f2 f3
+do
+ cm=1
+ case $lo in
+ f2 ) m=3 cm=2;;
+ f3 ) m=2 cm=3;;
+ n2 ) m=3;;
+ n3 ) m=2;;
+ esac
+ mdadm --create --run --level=raid10 --layout $lo --raid-disks 6 -x 1 $md0 $devs
+ check resync ; check raid10
+ testdev $md0 $m $mdsize1 $[512*cm]
+ mdadm -S $md0
+done
diff --git a/tests/00raid4 b/tests/00raid4
new file mode 100644
index 0000000..00a14f2
--- /dev/null
+++ b/tests/00raid4
@@ -0,0 +1,16 @@
+
+# create a simple raid4 set
+
+mdadm -CfR $md0 -l 4 -n3 $dev0 $dev1 $dev2
+check resync ; check raid[45]
+testdev $md0 2 $mdsize1 512
+mdadm -S $md0
+
+# now with version-1 superblock
+mdadm -CR $md0 -e1 --level=raid4 -n4 $dev0 $dev1 $dev2 $dev3
+check recovery; check raid[45]
+testdev $md0 3 $mdsize1 512
+mdadm -S $md0
+
+
+exit 0
diff --git a/tests/00raid5 b/tests/00raid5
new file mode 100644
index 0000000..b2b7a97
--- /dev/null
+++ b/tests/00raid5
@@ -0,0 +1,33 @@
+
+# create a simple raid5 set
+
+mdadm -CfR $md0 -e 0.90 -l 5 -n3 $dev0 $dev1 $dev2
+check resync
+testdev $md0 2 $mdsize0 512
+mdadm -S $md0
+
+# now with version-1 superblock
+mdadm -CR $md0 -e1 --level=raid5 -n4 $dev0 $dev1 $dev2 $dev3
+check recovery
+testdev $md0 3 $mdsize1 512
+mdadm -S $md0
+
+# now same again with explicit layout
+
+for lo in la ra left-symmetric right-symmetric
+do
+
+ mdadm -CfR $md0 -l 5 -p $lo -n3 $dev0 $dev1 $dev2
+ check resync ; check raid5
+ testdev $md0 2 $mdsize1 512
+ mdadm -S $md0
+
+ # now with version-1 superblock
+ mdadm -CR $md0 -e1 --level=raid5 --layout $lo -n4 $dev0 $dev1 $dev2 $dev3
+ check recovery ; check raid5
+ testdev $md0 3 $mdsize1 512
+ mdadm -S $md0
+
+done
+
+exit 0
diff --git a/tests/00raid5-zero b/tests/00raid5-zero
new file mode 100644
index 0000000..7d0f05a
--- /dev/null
+++ b/tests/00raid5-zero
@@ -0,0 +1,12 @@
+
+if mdadm -CfR $md0 -l 5 -n3 $dev0 $dev1 $dev2 --write-zeroes ; then
+ check nosync
+ echo check > /sys/block/md0/md/sync_action;
+ check wait
+elif grep "zeroing [^ ]* failed: Operation not supported" \
+ $targetdir/stderr; then
+ echo "write-zeros not supported, skipping"
+else
+ echo >&2 "ERROR: mdadm return failure without not supported message"
+ exit 1
+fi
diff --git a/tests/00raid6 b/tests/00raid6
new file mode 100644
index 0000000..6977af9
--- /dev/null
+++ b/tests/00raid6
@@ -0,0 +1,16 @@
+
+# create a simple raid6 set
+
+mdadm -CfR $md0 -e0.90 -l 6 -n4 $dev0 $dev1 $dev2 $dev3
+check resync ; check raid6
+testdev $md0 2 $mdsize0 512
+mdadm -S $md0
+
+# now with version-1 superblock
+mdadm -CR $md0 -e1 --level=raid6 -n5 $dev0 $dev1 $dev2 $dev3 $dev4
+check resync ; check raid6
+testdev $md0 3 $mdsize1 512
+mdadm -S $md0
+
+
+exit 0
diff --git a/tests/00readonly b/tests/00readonly
new file mode 100644
index 0000000..80b6362
--- /dev/null
+++ b/tests/00readonly
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+levels=(raid0 raid1 raid4 raid5 raid6 raid10)
+
+if [ "$LINEAR" == "yes" ]; then
+ levels+=( linear )
+fi
+
+for metadata in 0.9 1.0 1.1 1.2
+do
+ for level in ${levels[@]}
+ do
+ if [[ $metadata == "0.9" && $level == "raid0" ]];
+ then
+ continue
+ fi
+ mdadm -CR $md0 -l $level -n 4 --metadata=$metadata \
+ $dev1 $dev2 $dev3 $dev4 --assume-clean
+ check nosync
+ check $level
+ udevadm settle
+ mdadm -ro $md0
+ check readonly
+ state=$(cat /sys/block/md0/md/array_state)
+ [ "$state" == "readonly" ] ||
+ die "array_state should be 'readonly', but is $state"
+ mdadm -w $md0
+ check $level
+ mdadm -S $md0
+ done
+done
+
+exit 0
diff --git a/tests/01r1fail b/tests/01r1fail
new file mode 100644
index 0000000..389b813
--- /dev/null
+++ b/tests/01r1fail
@@ -0,0 +1,29 @@
+
+# create a raid1, fail and remove a drive during initial sync
+# Add two more, fail and remove one
+# wait for sync to complete, fail, remove, re-add
+
+mdadm -CR $md0 -l1 -n4 $dev0 $dev1 $dev2 missing
+check resync
+mdadm $md0 --fail $dev2
+check resync
+mdadm $md0 --fail $dev1
+sleep 1
+check nosync
+check state U___
+mdadm $md0 --add $dev4 $dev3
+check recovery
+# there could be two separate recoveries, one for each dev
+check wait
+check wait
+mdadm $md0 --remove $dev2 $dev1
+check nosync
+check state UUU_
+
+mdadm --zero-superblock $dev2
+mdadm $md0 -a $dev2
+check recovery
+check wait
+check state UUUU
+
+mdadm -S $md0
diff --git a/tests/01r5fail b/tests/01r5fail
new file mode 100644
index 0000000..873dba5
--- /dev/null
+++ b/tests/01r5fail
@@ -0,0 +1,27 @@
+
+
+# create a raid5, fail and remove a drive during initial sync
+# Add two more, fail and remove one
+# wait for sync to complete, fail, remove, re-add
+
+mdadm -CR $md0 -l5 -n4 $dev0 $dev1 $dev2 $dev3
+check recovery
+mdadm $md0 --fail $dev3
+sleep 1
+check nosync
+check state UUU_
+
+mdadm $md0 --add $dev4 $dev5
+check recovery
+check wait
+mdadm $md0 --fail $dev0
+mdadm $md0 --remove $dev3 $dev0
+check recovery
+check state _UUU
+
+mdadm $md0 -a $dev3
+check recovery
+check wait
+check state UUUU
+
+mdadm -S $md0 \ No newline at end of file
diff --git a/tests/01r5integ b/tests/01r5integ
new file mode 100644
index 0000000..48676a2
--- /dev/null
+++ b/tests/01r5integ
@@ -0,0 +1,33 @@
+
+# Check integrity of raid5 in degraded mode
+# Create a 4 disk raid5, create a filesystem and
+# sha1sum it with each device failed
+
+if [ "$INTEGRITY" != "yes" ]; then
+ echo -ne 'skipping... '
+ exit 0
+fi
+
+for layout in ls rs la ra
+do
+ mdadm -CR $md0 -l5 --layout $layout -n4 $dev0 $dev1 $dev2 $dev3
+ check wait
+ tar cf - /etc > $md0
+ sum=`sha1sum $md0`
+
+ for i in $dev0 $dev1 $dev2 $dev3
+ do
+ mdadm $md0 -f $i
+ mdadm $md0 -r $i
+ blockdev --flushbufs $md0
+ sum1=`sha1sum $md0`
+ if [ "$sum" != "$sum1" ]
+ then
+ echo $sum does not match $sum1 with $i missing
+ exit 1
+ fi
+ mdadm $md0 -a $i
+ while ! (check state 'U*'); do check wait; sleep 0.2; done
+ done
+ mdadm -S $md0
+done
diff --git a/tests/01r5integ.broken b/tests/01r5integ.broken
new file mode 100644
index 0000000..2073763
--- /dev/null
+++ b/tests/01r5integ.broken
@@ -0,0 +1,7 @@
+fails rarely
+
+Fails about 1 in every 30 runs with a sha mismatch error:
+
+ c49ab26e1b01def7874af9b8a6d6d0c29fdfafe6 /dev/md0 does not match
+ 15dc2f73262f811ada53c65e505ceec9cf025cb9 /dev/md0 with /dev/loop3
+ missing
diff --git a/tests/01raid6integ b/tests/01raid6integ
new file mode 100644
index 0000000..12f4d81
--- /dev/null
+++ b/tests/01raid6integ
@@ -0,0 +1,57 @@
+
+# Check integrity of raid6 in degraded modes
+# Create a 5 disk raid6, dump some data to it, then
+# sha1sum it with different pairs of devices failed
+
+if [ "$INTEGRITY" != "yes" ]; then
+ echo -ne 'skipping... '
+ exit 0
+fi
+
+layouts='ls rs la ra'
+lv=`uname -r`
+if expr $lv '>=' 2.6.30 > /dev/null
+then
+ layouts="$layouts parity-first ddf-zero-restart ddf-N-restart ddf-N-continue \
+ left-asymmetric-6 right-asymmetric-6 left-symmetric-6 right-symmetric-6 parity-first-6"
+fi
+
+for layout in $layouts
+do
+ mdadm -CR $md0 -l6 --layout $layout -n5 $dev0 $dev1 $dev2 $dev3 $dev4
+ check wait
+ tar cf - /etc > $md0
+ sum=`sha1sum $md0`
+
+ totest=
+ for second in $dev0 $dev1 $dev2 $dev3 $dev4
+ do
+ mdadm $md0 -f $second
+ mdadm $md0 -r $second
+ blockdev --flushbufs $md0
+ sum1=`sha1sum $md0`
+ if [ "$sum" != "$sum1" ]
+ then
+ echo $sum does not match $sum1 with $second missing
+ exit 1
+ fi
+ for first in $totest
+ do
+ mdadm $md0 -f $first
+ mdadm $md0 -r $first
+ blockdev --flushbufs $md0
+ sum1=`sha1sum $md0`
+ if [ "$sum" != "$sum1" ]
+ then
+ echo $sum does not match $sum1 with $first and $second missing
+ exit 1
+ fi
+ mdadm $md0 -a $first
+ while ! (check state 'U*_U*'); do check wait; sleep 0.2; done
+ done
+ mdadm $md0 -a $second
+ while ! (check state 'U*'); do check wait; sleep 0.2; done
+ totest="$totest $second"
+ done
+ mdadm -S $md0
+done
diff --git a/tests/01raid6integ.broken b/tests/01raid6integ.broken
new file mode 100644
index 0000000..1df735f
--- /dev/null
+++ b/tests/01raid6integ.broken
@@ -0,0 +1,7 @@
+fails infrequently
+
+Fails about 1 in 5 with a sha mismatch:
+
+ 8286c2bc045ae2cfe9f8b7ae3a898fa25db6926f /dev/md0 does not match
+ a083a0738b58caab37fd568b91b177035ded37df /dev/md0 with /dev/loop2 and
+ /dev/loop3 missing
diff --git a/tests/01replace b/tests/01replace
new file mode 100644
index 0000000..6223a22
--- /dev/null
+++ b/tests/01replace
@@ -0,0 +1,52 @@
+set -x -e
+
+## test --replace for raid5 raid6 raid1 and raid10
+#1/ after replace, can remove replaced device
+#2/ after --replace-with cannot remove the 'with' device
+#3/ preserve integrity with concurrent failure
+
+for level in 1 5 6 10
+do
+ dd if=/dev/zero of=$dev4 bs=1M || true
+ dd if=/dev/zero of=$dev5 bs=1M || true
+ mdadm -CR $md0 -l $level -n4 -x2 $devlist5
+ dd if=/dev/urandom of=$md0 bs=1M || true
+ sum=`sha1sum < $md0`
+ check wait
+ mdadm $md0 --replace $dev1
+ check wait
+ mdadm $md0 --remove $dev1
+ mdadm $md0 --remove $dev5 && exit 1
+ mdadm -S $md0
+ dd if=/dev/zero of=$dev4 bs=1M || true
+ dd if=/dev/zero of=$dev5 bs=1M || true
+ mdadm -CR $md0 -l $level -n4 -x2 $devlist5
+ check wait
+ sum1=`sha1sum < $md0`
+ [ "$sum" == "$sum1" ]
+
+ mdadm $md0 --replace $dev1 --with $dev4
+ check wait
+ mdadm $md0 --remove $dev1
+ mdadm $md0 --remove $dev5
+ mdadm $md0 --remove $dev4 && exit 1
+
+ mdadm $md0 --add $dev1 $dev5
+ mdadm $md0 --replace $dev0
+ sleep 1
+ mdadm $md0 --fail $dev2
+ check wait
+ sum2=`sha1sum < $md0`
+ [ "$sum" == "$sum2" ]
+
+ mdadm $md0 --remove $dev0 $dev2
+ mdadm $md0 --add $dev0 $dev2
+ mdadm $md0 --replace $dev3
+ sleep 1
+ mdadm $md0 --fail $dev0 $dev2
+ check wait
+ sum3=`sha1sum < $md0`
+ [ "$sum" == "$sum3" ]
+
+ mdadm -S $md0
+done
diff --git a/tests/02lineargrow b/tests/02lineargrow
new file mode 100644
index 0000000..d17e232
--- /dev/null
+++ b/tests/02lineargrow
@@ -0,0 +1,30 @@
+
+# create a liner array, and add more drives to to.
+
+if [ "$LINEAR" != "yes" ]; then
+ echo -ne 'skipping... '
+ exit 0
+fi
+
+for e in 0.90 1 1.1 1.2
+do
+ case $e in
+ 0.90 ) sz=$mdsize0 ;;
+ 1 ) sz=$mdsize2_l ;;
+ 1.0 ) sz=$mdsize1 ;;
+ 1.1 ) sz=$mdsize1_l ;;
+ 1.2 ) sz=$mdsize2_l ;;
+ esac
+ mdadm -CRf $md0 --level linear -e $e --raid-disks=1 $dev1
+ testdev $md0 1 $sz 1
+
+ mdadm --grow $md0 --add $dev2
+ testdev $md0 2 $sz 1
+
+ mdadm --grow $md0 --add $dev3
+ testdev $md0 3 $sz 1
+
+ mdadm -S $md0
+ mdadm --zero /dev/loop2
+ mdadm --zero /dev/loop3
+done
diff --git a/tests/02r1add b/tests/02r1add
new file mode 100644
index 0000000..757f696
--- /dev/null
+++ b/tests/02r1add
@@ -0,0 +1,40 @@
+
+# Make a raid1, add a device, then remove it again.
+
+mdadm -CR $md0 -l1 -n2 -x1 $dev0 $dev1 $dev2
+check resync
+check wait
+check state UU
+
+mdadm --grow $md0 -n 3
+check recovery
+check wait
+check state UUU
+
+mdadm $md0 --fail $dev0
+check state _UU
+
+mdadm --grow $md0 -n 2
+check state UU
+
+mdadm -S $md0
+# same again for version-1
+
+
+mdadm -CR $md0 -l1 -n2 -e1.2 -x1 $dev0 $dev1 $dev2
+check resync
+check wait
+check state UU
+
+mdadm --grow $md0 -n 3
+check recovery
+check wait
+check state UUU
+
+mdadm $md0 --fail $dev0
+check state _UU
+
+mdadm --grow $md0 -n 2
+check state UU
+
+mdadm -S $md0
diff --git a/tests/02r1grow b/tests/02r1grow
new file mode 100644
index 0000000..5754c88
--- /dev/null
+++ b/tests/02r1grow
@@ -0,0 +1,36 @@
+
+
+# create a small raid1 array, make it larger. Then make it smaller
+
+mdadm -CR $md0 -e 0.90 --level raid1 --raid-disks 3 --size $[size/2] $dev1 $dev2 $dev3
+check wait
+check state UUU
+testdev $md0 1 $[size/2] 1
+
+mdadm --grow $md0 --size max
+check resync
+check wait
+testdev $md0 1 $mdsize0 1
+
+mdadm --grow $md0 --size $[size/2]
+check nosync
+testdev $md0 1 $[size/2] 1
+
+mdadm -S $md0
+
+# same again with version 1.1 superblock
+mdadm -CR $md0 --level raid1 --metadata=1.1 --raid-disks 3 --size $[size/2] $dev1 $dev2 $dev3
+check wait
+check state UUU
+testdev $md0 1 $[size/2] 1
+
+mdadm --grow $md0 --size max
+check resync
+check wait
+testdev $md0 1 $mdsize1_l 1
+
+mdadm --grow $md0 --size $[size/2]
+check nosync
+testdev $md0 1 $[size/2] 1
+
+mdadm -S $md0
diff --git a/tests/02r5grow b/tests/02r5grow
new file mode 100644
index 0000000..2da78ee
--- /dev/null
+++ b/tests/02r5grow
@@ -0,0 +1,53 @@
+
+
+# create a small raid5 array, make it larger. Then make it smaller
+
+mdadm -CR $md0 -e0.90 --level raid5 --chunk=64 --raid-disks 3 --size $[size/2] $dev1 $dev2 $dev3
+check wait
+check state UUU
+testdev $md0 2 $[size/2] 32
+
+mdadm --grow $md0 --size max
+check resync
+check wait
+testdev $md0 2 $mdsize0 32
+
+mdadm --grow $md0 --size $[size/2]
+check nosync
+testdev $md0 2 $[size/2] 32
+
+mdadm -S $md0
+
+# same again with version 1.1 superblock
+mdadm -CR $md0 --level raid5 --metadata=1.1 --chunk=128 --raid-disks 4 --size $[size/2] $dev1 $dev2 $dev3 $dev4
+check wait
+check state UUUU
+testdev $md0 3 $[size/2] 128
+
+mdadm --grow $md0 --size max
+check resync
+check wait
+testdev $md0 3 $[mdsize1_l] 128
+
+mdadm --grow $md0 --size $[size/2]
+check nosync
+testdev $md0 3 $[size/2] 128
+
+mdadm -S $md0
+
+# create a raid5 array and change the chunk
+mdadm -CR $md0 --level raid5 --metadata=1.1 --chunk=32 --raid-disks 3 --size $[size/2] $dev1 $dev2 $dev3
+check wait
+check state UUU
+check chunk 32
+
+mdadm $md0 --grow --chunk=64
+check reshape
+check wait
+check chunk 64
+
+mdadm -S $md0
+mdadm -A $md0 $dev1 $dev2 $dev3
+check state UUU
+check chunk 64
+mdadm -S $md0
diff --git a/tests/02r6grow b/tests/02r6grow
new file mode 100644
index 0000000..759e627
--- /dev/null
+++ b/tests/02r6grow
@@ -0,0 +1,36 @@
+
+
+# create a small raid6 array, make it larger. Then make it smaller
+
+mdadm -CR $md0 -e 0.90 --level raid6 --chunk=64 --raid-disks 4 --size $[size/2] $dev1 $dev2 $dev3 $dev4
+check wait
+check state UUUU
+testdev $md0 2 $[size/2] 32
+
+mdadm --grow $md0 --size max
+check resync
+check wait
+testdev $md0 2 $mdsize0 32
+
+mdadm --grow $md0 --size $[size/2]
+check nosync
+testdev $md0 2 $[size/2] 32
+
+mdadm -S $md0
+
+# same again with version 1.1 superblock
+mdadm -CR $md0 --level raid6 --metadata=1.1 --chunk=128 --raid-disks 4 --size $[size/2] $dev1 $dev2 $dev3 $dev4
+check wait
+check state UUUU
+testdev $md0 2 $[size/2] 128
+
+mdadm --grow $md0 --size max
+check resync
+check wait
+testdev $md0 2 $[mdsize1_l] 128
+
+mdadm --grow $md0 --size $[size/2]
+check nosync
+testdev $md0 2 $[size/2] 128
+
+mdadm -S $md0
diff --git a/tests/03assem-incr b/tests/03assem-incr
new file mode 100644
index 0000000..38880a7
--- /dev/null
+++ b/tests/03assem-incr
@@ -0,0 +1,23 @@
+set -x -e
+
+# Test interaction between -I and -A
+# there are locking issue too, but those are hard to test for.
+#
+# Here just test that a partly "-I" assembled array can
+# be completed with "-A"
+
+levels=(raid0 raid1 raid5)
+
+if [ "$LINEAR" == "yes" ]; then
+ levels+=( linear )
+fi
+
+for l in ${levels[@]}
+do
+ mdadm -CR $md0 -l $l -n5 $dev0 $dev1 $dev2 $dev3 $dev4 --assume-clean
+ mdadm -S md0
+ mdadm -I $dev1
+ mdadm -I $dev3
+ mdadm -A /dev/md0 $dev0 $dev1 $dev2 $dev3 $dev4
+ mdadm -S /dev/md0
+done
diff --git a/tests/03r0assem b/tests/03r0assem
new file mode 100644
index 0000000..f7c29e8
--- /dev/null
+++ b/tests/03r0assem
@@ -0,0 +1,141 @@
+
+# create a raid0 array from 3 devices, and assemble it in a multitude of ways.
+# explicitly list devices
+# uuid, md-minor on command line with wildcard devices
+# mdadm.conf file
+
+mdadm -CR $md2 -l0 -n3 $dev0 $dev1 $dev2
+check raid0
+tst="testdev $md2 3 $mdsize1_l 512"
+$tst
+uuid=`mdadm -Db $md2 | sed 's/.*UUID=//'`
+mdadm -S $md2
+
+mdadm -A $md2 $dev0 $dev1 $dev2
+$tst
+mdadm -S $md2
+
+mdadm -A $md2 -u $uuid $devlist
+$tst
+mdadm -S $md2
+
+mdadm --assemble $md2 --name=2 $devlist
+$tst
+mdadm -S $md2
+
+conf=$targetdir/mdadm.conf
+{
+ echo DEVICE $devlist
+ echo array $md2 UUID=$uuid
+} > $conf
+
+mdadm -As -c $conf $md2
+$tst
+mdadm -S $md2
+
+{
+ echo DEVICE $devlist
+ echo array $md2 name=2
+} > $conf
+
+mdadm -As -c $conf $md2
+$tst
+mdadm -S $md2
+
+
+{
+ echo DEVICE $devlist
+ echo array $md2 devices=$dev0,$dev1,$dev2
+} > $conf
+
+mdadm -As -c $conf $md2
+$tst
+
+echo "DEVICE $devlist" > $conf
+mdadm -Db $md2 >> $conf
+mdadm -S $md2
+
+mdadm --assemble --scan --config=$conf $md2
+$tst
+mdadm -S $md2
+
+echo " metadata=0.90 devices=$dev0,$dev1,$dev2" >> $conf
+mdadm --assemble --scan --config=$conf $md2
+$tst
+mdadm -S $md2
+
+if [ "$LINEAR" != "yes" ]; then
+ echo -ne 'skipping... '
+ exit 0
+fi
+
+### Now for version 0...
+
+mdadm --zero-superblock $dev0 $dev1 $dev2
+mdadm -CR $md2 -llinear --metadata=0.90 -n3 $dev0 $dev1 $dev2
+check linear
+tst="testdev $md2 3 $mdsize0 1"
+$tst
+
+uuid=`mdadm -Db $md2 | sed 's/.*UUID=//'`
+mdadm -S $md2
+
+mdadm -A $md2 $dev0 $dev1 $dev2
+$tst
+mdadm -S $md2
+
+mdadm -A $md2 -u $uuid $devlist
+$tst
+mdadm -S $md2
+
+mdadm --assemble $md2 --super-minor=2 $devlist #
+$tst
+mdadm -S $md2
+
+conf=$targetdir/mdadm.conf
+{
+ echo DEVICE $devlist
+ echo array $md2 UUID=$uuid
+} > $conf
+
+mdadm -As -c $conf $md2
+$tst
+mdadm -S $md2
+
+{
+ echo DEVICE $devlist
+ echo array $md2 super-minor=2
+} > $conf
+
+mdadm -As -c $conf $md2
+$tst
+mdadm -S $md2
+
+
+{
+ echo DEVICE $devlist
+ echo array $md2 devices=$dev0,$dev1,$dev2
+} > $conf
+
+mdadm -As -c $conf $md2
+$tst
+
+echo "DEVICE $devlist" > $conf
+mdadm -Db $md2 >> $conf
+mdadm -S $md2
+
+mdadm --assemble --scan --config=$conf $md2
+$tst
+mdadm -S $md2
+
+echo " metadata=1 devices=$dev0,$dev1,$dev2" >> $conf
+mdadm --assemble --scan --config=$conf $md2
+$tst
+mdadm -S $md2
+
+# Now use incremental assembly.
+mdadm -I --config=$conf $dev0
+mdadm -I --config=$conf $dev1
+mdadm -I --config=$conf $dev2
+$tst
+mdadm -S $md2
diff --git a/tests/03r5assem b/tests/03r5assem
new file mode 100644
index 0000000..0c7fb8c
--- /dev/null
+++ b/tests/03r5assem
@@ -0,0 +1,109 @@
+
+# create a raid5 array and assemble it in various ways,
+# including with missing devices.
+
+mdadm -CR -e 0.90 $md1 -l5 -n3 $dev0 $dev1 $dev2
+tst="check raid5 ;testdev $md1 2 $mdsize0 512 ; mdadm -S $md1"
+uuid=`mdadm -Db $md1 | sed 's/.*UUID=//'`
+check wait
+eval $tst
+
+mdadm -A $md1 $dev0 $dev1 $dev2
+eval $tst
+
+mdadm -A $md1 -u $uuid $devlist
+eval $tst
+
+mdadm -A $md1 -m 1 $devlist
+eval $tst
+
+
+conf=$targetdir/mdadm.conf
+{
+ echo DEVICE $devlist
+ echo array $md1 UUID=$uuid
+} > $conf
+
+mdadm -As -c $conf $md1
+eval $tst
+
+{
+ echo DEVICE $devlist
+ echo array $md1 super-minor=1
+} > $conf
+
+mdadm -As -c $conf
+eval $tst
+
+{
+ echo DEVICE $devlist
+ echo array $md1 devices=$dev0,$dev1,$dev2
+} > $conf
+
+mdadm -As -c $conf
+
+echo "DEVICE $devlist" > $conf
+mdadm -Db $md1 >> $conf
+eval $tst
+
+mdadm --assemble --scan --config=$conf $md1
+eval $tst
+
+echo " metadata=0.90 devices=$dev0,$dev1,$dev2" >> $conf
+mdadm --assemble --scan --config=$conf $md1
+eval $tst
+
+### Now with a missing device
+
+mdadm -AR $md1 $dev0 $dev2 #
+check state U_U
+eval $tst
+
+mdadm -A $md1 -u $uuid $devlist
+check state U_U
+eval $tst
+
+mdadm -A $md1 -m 1 $devlist
+check state U_U
+eval $tst
+
+
+conf=$targetdir/mdadm.conf
+{
+ echo DEVICE $devlist
+ echo array $md1 UUID=$uuid
+} > $conf
+
+mdadm -As -c $conf $md1
+check state U_U
+eval $tst
+
+{
+ echo DEVICE $devlist
+ echo array $md1 super-minor=1
+} > $conf
+
+mdadm -As -c $conf
+check state U_U
+eval $tst
+
+{
+ echo DEVICE $devlist
+ echo array $md1 devices=$dev0,$dev1,$dev2
+} > $conf
+
+mdadm -As -c $conf
+
+echo "DEVICE $devlist" > $conf
+mdadm -Db $md1 >> $conf
+check state U_U
+eval $tst
+
+mdadm --assemble --scan --config=$conf $md1
+check state U_U
+eval $tst
+
+echo " metadata=0.90 devices=$dev0,$dev1,$dev2" >> $conf
+mdadm --assemble --scan --config=$conf $md1
+check state U_U
+eval $tst
diff --git a/tests/03r5assem-failed b/tests/03r5assem-failed
new file mode 100644
index 0000000..d38241d
--- /dev/null
+++ b/tests/03r5assem-failed
@@ -0,0 +1,12 @@
+
+# Create an array, fail one device while array is active, stop array,
+# then re-assemble listing the failed device first.
+
+mdadm -CR $md1 -l5 -n4 $dev0 $dev1 $dev2 $dev3
+check wait
+
+echo 2000 > /sys/block/md1/md/safe_mode_delay
+mkfs $md1
+mdadm $md1 -f $dev0
+mdadm -S $md1
+mdadm -A $md1 $dev0 $dev1 $dev2 $dev3 || exit 1
diff --git a/tests/03r5assemV1 b/tests/03r5assemV1
new file mode 100644
index 0000000..bca0c58
--- /dev/null
+++ b/tests/03r5assemV1
@@ -0,0 +1,128 @@
+
+# create a v-1 raid5 array and assemble in various ways
+
+mdadm -CR -e1 --name one $md1 -l5 -n3 -x2 $dev0 $dev1 $dev2 $dev3 $dev4
+tst="check raid5 ;testdev $md1 2 $mdsize1 512 ; mdadm -S $md1"
+uuid=`mdadm -Db $md1 | sed 's/.*UUID=//'`
+check wait
+
+eval $tst
+
+mdadm -A $md1 $dev0 $dev1 $dev2
+mdadm $md1 --add $dev3 $dev4
+check spares 2
+eval $tst
+
+mdadm -A $md1 -u $uuid $devlist
+check spares 2
+eval $tst
+
+mdadm -A $md1 --name one $devlist
+check spares 2
+eval $tst
+
+
+conf=$targetdir/mdadm.conf
+{
+ echo DEVICE $devlist
+ echo array $md1 UUID=$uuid
+} > $conf
+
+mdadm -As -c $conf $md1
+eval $tst
+
+{
+ echo DEVICE $devlist
+ echo array $md1 name=one
+} > $conf
+
+mdadm -As -c $conf
+eval $tst
+
+{
+ echo DEVICE $devlist
+ echo array $md1 devices=$dev0,$dev1,$dev2,$dev3,$dev4
+} > $conf
+
+mdadm -As -c $conf
+
+echo "DEVICE $devlist" > $conf
+mdadm -Db $md1 >> $conf
+eval $tst
+mdadm --assemble --scan --config=$conf $md1
+eval $tst
+echo PING >&2
+
+echo " metadata=1.0 devices=$dev0,$dev1,$dev2,$dev3,$dev4" >> $conf
+mdadm --assemble --scan --config=$conf $md1
+eval $tst
+
+### Now with a missing device
+# We don't want the recovery to complete while we are
+# messing about here.
+echo 100 > /proc/sys/dev/raid/speed_limit_max
+echo 100 > /proc/sys/dev/raid/speed_limit_min
+
+mdadm -AR $md1 $dev0 $dev2 $dev3 $dev4 #
+check state U_U
+check spares 1
+eval $tst
+
+mdadm -A $md1 -u $uuid $devlist
+check state U_U
+eval $tst
+
+mdadm -A $md1 --name=one $devlist
+check state U_U
+check spares 1
+eval $tst
+
+
+conf=$targetdir/mdadm.conf
+{
+ echo DEVICE $devlist
+ echo array $md1 UUID=$uuid
+} > $conf
+
+mdadm -As -c $conf $md1
+check state U_U
+eval $tst
+
+{
+ echo DEVICE $devlist
+ echo array $md1 name=one
+} > $conf
+
+mdadm -As -c $conf
+check state U_U
+eval $tst
+
+{
+ echo DEVICE $devlist
+ echo array $md1 devices=$dev0,$dev1,$dev2
+} > $conf
+
+mdadm -As -c $conf
+
+echo "DEVICE $devlist" > $conf
+mdadm -Db $md1 >> $conf
+check state U_U
+eval $tst
+
+mdadm --assemble --scan --config=$conf $md1
+check state U_U
+eval $tst
+
+echo " metadata=1.0 devices=$dev0,$dev1,$dev2" >> $conf
+mdadm --assemble --scan --config=$conf $md1
+check state U_U
+eval $tst
+
+# And now assemble with -I
+mdadm -Ss
+mdadm -I -c $conf $dev0
+mdadm -I -c $conf $dev1
+mdadm -I -c $conf $dev2
+eval $tst
+echo 2000 > /proc/sys/dev/raid/speed_limit_max
+echo 1000 > /proc/sys/dev/raid/speed_limit_min
diff --git a/tests/04r0update b/tests/04r0update
new file mode 100644
index 0000000..c495f34
--- /dev/null
+++ b/tests/04r0update
@@ -0,0 +1,26 @@
+
+# create a raid0, re-assemble with a different super-minor
+
+if [ "$LINEAR" != "yes" ]; then
+ echo -ne 'skipping... '
+ exit 0
+fi
+
+mdadm -CR -e 0.90 $md0 -llinear -n3 $dev0 $dev1 $dev2
+testdev $md0 3 $mdsize0 1
+minor1=`mdadm -E $dev0 | sed -n -e 's/.*Preferred Minor : //p'`
+mdadm -S /dev/md0
+
+mdadm -A $md1 $dev0 $dev1 $dev2
+minor2=`mdadm -E $dev0 | sed -n -e 's/.*Preferred Minor : //p'`
+mdadm -S /dev/md1
+
+mdadm -A $md1 --update=super-minor $dev0 $dev1 $dev2
+minor3=`mdadm -E $dev0 | sed -n -e 's/.*Preferred Minor : //p'`
+mdadm -S /dev/md1
+
+case "$minor1 $minor2 $minor3" in
+ "0 0 1" ) ;;
+ * ) echo >&2 "ERROR minors should be '0 0 1' but are '$minor1 $minor2 $minor3'"
+ exit 1
+esac
diff --git a/tests/04r1update b/tests/04r1update
new file mode 100644
index 0000000..e22965b
--- /dev/null
+++ b/tests/04r1update
@@ -0,0 +1,15 @@
+set -i
+
+# create a raid1 array, let it sync, then re-assemble with a force-sync
+
+mdadm -CR $md0 -l1 -n2 $dev0 $dev1
+check wait
+mdadm -S $md0
+
+mdadm -A $md0 $dev0 $dev1
+check nosync
+mdadm -S $md0
+
+mdadm -A $md0 -U resync $dev0 $dev1
+check resync
+mdadm -S $md0
diff --git a/tests/04r5swap b/tests/04r5swap
new file mode 100644
index 0000000..5373a60
--- /dev/null
+++ b/tests/04r5swap
@@ -0,0 +1,18 @@
+
+# make a raid5 array, byte swap the superblocks, then assemble...
+
+mdadm -CR $md0 -e 0.90 -l5 -n4 $dev0 $dev1 $dev2 $dev3
+sleep 4
+mdadm -S $md0
+
+mdadm -E --metadata=0 $dev1 > $targetdir/d1
+for d in $dev0 $dev1 $dev2 $dev3
+do $dir/swap_super $d
+done
+mdadm -E --metadata=0.swap $dev1 > $targetdir/d1s
+diff -u $targetdir/d1 $targetdir/d1s
+
+mdadm --assemble --update=byteorder $md0 $dev0 $dev1 $dev2 $dev3
+sleep 3
+check recovery
+mdadm -S $md0
diff --git a/tests/04r5swap.broken b/tests/04r5swap.broken
new file mode 100644
index 0000000..e38987d
--- /dev/null
+++ b/tests/04r5swap.broken
@@ -0,0 +1,7 @@
+always fails
+
+Fails with errors:
+
+ mdadm: /dev/loop0 has no superblock - assembly aborted
+
+ ERROR: no recovery happening
diff --git a/tests/04update-metadata b/tests/04update-metadata
new file mode 100644
index 0000000..2b72a30
--- /dev/null
+++ b/tests/04update-metadata
@@ -0,0 +1,52 @@
+set -xe
+
+# test converting v0.90 to v1.0
+# check for different levels
+# check it fails for non-v0.90
+# check it fails during reshape or recovery
+# check it fails when bitmap is present
+
+dlist="$dev0 $dev1 $dev2 $dev3"
+
+for ls in linear/4 raid1/1 raid5/3 raid6/2
+do
+ s=${ls#*/} l=${ls%/*}
+ if [[ $l == 'raid1' ]]; then
+ mdadm -CR --assume-clean -e 0.90 $md0 --level $l -n 4 $dlist
+ else
+ mdadm -CR --assume-clean -e 0.90 $md0 --level $l -n 4 -c 64 $dlist
+ fi
+ testdev $md0 $s 19904 64
+ mdadm -S $md0
+ mdadm -A $md0 --update=metadata $dlist
+ testdev $md0 $s 19904 64 check
+ mdadm -S $md0
+done
+
+if mdadm -A $md0 --update=metadata $dlist
+then echo >&2 should fail with v1.0 metadata
+ exit 1
+fi
+
+mdadm -CR -e 0.90 $md0 --level=6 -n4 -c32 $dlist
+mdadm -S $md0
+
+if mdadm -A $md0 --update=metadata $dlist
+then echo >&2 should fail during resync
+ exit 1
+fi
+mdadm -A $md0 $dlist
+mdadm --wait $md0 || true
+mdadm -S $md0
+
+# should succeed now
+mdadm -A $md0 --update=metadata $dlist
+
+mdadm -S /dev/md0
+mdadm -CR --assume-clean -e 0.90 $md0 --level=6 -n4 -c32 $dlist --bitmap=internal
+mdadm -S $md0
+
+if mdadm -A $md0 --update=metadata $dlist
+then echo >&2 should fail when bitmap present
+ exit 1
+fi
diff --git a/tests/04update-uuid b/tests/04update-uuid
new file mode 100644
index 0000000..a4409e7
--- /dev/null
+++ b/tests/04update-uuid
@@ -0,0 +1,82 @@
+set -x
+
+# create an array, then change the uuid.
+
+mdadm -CR --assume-clean $md0 -l5 -n3 $dev0 $dev1 $dev2
+mdadm -S /dev/md0
+mdadm -A /dev/md0 --update=uuid --uuid=0123456789abcdef:fedcba9876543210 $dev0 $dev1 $dev2
+no_errors
+mdadm -D /dev/md0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || {
+ echo Wrong uuid; mdadm -D /dev/md0 ; exit 2;
+}
+mdadm -S /dev/md0
+
+# try v1 superblock
+
+mdadm -CR --assume-clean -e1 $md0 -l5 -n3 $dev0 $dev1 $dev2
+mdadm -S /dev/md0
+mdadm -A /dev/md0 --update=uuid --uuid=0123456789abcdef:fedcba9876543210 $dev0 $dev1 $dev2
+no_errors
+mdadm -D /dev/md0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || {
+ echo Wrong uuid; mdadm -D /dev/md0 ; exit 2;
+}
+mdadm -S /dev/md0
+
+
+# now if we have a bitmap, that needs updating too.
+rm -f $targetdir/bitmap
+mdadm -CR --assume-clean -b $targetdir/bitmap $md0 -l5 -n3 $dev0 $dev1 $dev2
+mdadm -S /dev/md0
+mdadm -A /dev/md0 -b $targetdir/bitmap --update=uuid --uuid=0123456789abcdef:fedcba9876543210 $dev0 $dev1 $dev2
+no_errors
+mdadm -D /dev/md0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || {
+ echo Wrong uuid; mdadm -D /dev/md0 ; exit 2;
+}
+if mdadm -X $targetdir/bitmap | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 ||
+ mdadm -X $targetdir/bitmap | grep -s > /dev/null 67452301:efcdab89:98badcfe:10325476
+then : ; else
+ echo Wrong uuid; mdadm -X $targetdir/bitmap ; exit 2;
+fi
+mdadm -S /dev/md0
+
+# and bitmap for version1
+rm -f $targetdir/bitmap
+mdadm -CR --assume-clean -e1.1 -b $targetdir/bitmap $md0 -l5 -n3 $dev0 $dev1 $dev2
+mdadm -S /dev/md0
+mdadm -A /dev/md0 -b $targetdir/bitmap --update=uuid --uuid=0123456789abcdef:fedcba9876543210 $dev0 $dev1 $dev2
+no_errors
+mdadm -D /dev/md0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || {
+ echo Wrong uuid; mdadm -D /dev/md0 ; exit 2;
+}
+# -X cannot tell which byteorder to use for the UUID, so allow both.
+if mdadm -X $targetdir/bitmap | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 ||
+ mdadm -X $targetdir/bitmap | grep -s > /dev/null 67452301:efcdab89:98badcfe:10325476
+then : ; else
+ echo Wrong uuid; mdadm -X $targetdir/bitmap ; exit 2;
+fi
+mdadm -S /dev/md0
+
+# Internal bitmaps too.
+mdadm -CR --assume-clean -b internal --bitmap-chunk 4 $md0 -l5 -n3 $dev0 $dev1 $dev2
+mdadm -S /dev/md0
+mdadm -A /dev/md0 --update=uuid --uuid=0123456789abcdef:fedcba9876543210 $dev0 $dev1 $dev2
+no_errors
+mdadm -D /dev/md0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || {
+ echo Wrong uuid; mdadm -D /dev/md0 ; exit 2;
+}
+mdadm -X $dev0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || {
+ echo Wrong uuid; mdadm -X $dev0; exit 2;
+}
+mdadm -S /dev/md0
+
+mdadm -CR --assume-clean -e1.2 -b internal --bitmap-chunk=4 $md0 -l5 -n3 $dev0 $dev1 $dev2
+mdadm -S /dev/md0
+mdadm -A /dev/md0 --update=uuid --uuid=0123456789abcdef:fedcba9876543210 $dev0 $dev1 $dev2
+no_errors
+mdadm -D /dev/md0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || {
+ echo Wrong uuid; mdadm -D /dev/md0 ; exit 2;
+}
+mdadm -X $dev0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || {
+ echo Wrong uuid; mdadm -X $dev0; exit 2;
+}
+mdadm -S /dev/md0
diff --git a/tests/05r1-add-internalbitmap b/tests/05r1-add-internalbitmap
new file mode 100644
index 0000000..4e20305
--- /dev/null
+++ b/tests/05r1-add-internalbitmap
@@ -0,0 +1,20 @@
+#
+# create a raid1 without any bitmap, add the bitmap and then write to
+# the device. This should catch the case where the bitmap is created
+# but not reloaded correctly, such as the case fixed by
+# 4474ca42e2577563a919fd3ed782e2ec55bf11a2
+#
+mdadm --create --run $md0 --metadata=0.9 --level=1 -n2 --delay=1 $dev1 $dev2
+check wait
+check nobitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -Gb internal --bitmap-chunk=4 $md0
+check bitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -S $md0
+
+# Re-assemble the array and verify the bitmap is still present
+mdadm --assemble $md0 $dev1 $dev2
+check bitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -S $md0
diff --git a/tests/05r1-add-internalbitmap-v1a b/tests/05r1-add-internalbitmap-v1a
new file mode 100644
index 0000000..721a41c
--- /dev/null
+++ b/tests/05r1-add-internalbitmap-v1a
@@ -0,0 +1,20 @@
+#
+# create a raid1 without any bitmap, add the bitmap and then write to
+# the device. This should catch the case where the bitmap is created
+# but not reloaded correctly, such as the case fixed by
+# 4474ca42e2577563a919fd3ed782e2ec55bf11a2
+#
+mdadm --create --run $md0 --metadata=1.0 --level=1 -n2 --delay=1 $dev1 $dev2
+check wait
+check nobitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -Gb internal --bitmap-chunk=4 $md0
+check bitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -S $md0
+
+# Re-assemble the array and verify the bitmap is still present
+mdadm --assemble $md0 $dev1 $dev2
+check bitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -S $md0
diff --git a/tests/05r1-add-internalbitmap-v1b b/tests/05r1-add-internalbitmap-v1b
new file mode 100644
index 0000000..da78fd6
--- /dev/null
+++ b/tests/05r1-add-internalbitmap-v1b
@@ -0,0 +1,20 @@
+#
+# create a raid1 without any bitmap, add the bitmap and then write to
+# the device. This should catch the case where the bitmap is created
+# but not reloaded correctly, such as the case fixed by
+# 4474ca42e2577563a919fd3ed782e2ec55bf11a2
+#
+mdadm --create --run $md0 --metadata=1.1 --level=1 -n2 --delay=1 $dev1 $dev2
+check wait
+check nobitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -Gb internal --bitmap-chunk=4 $md0
+check bitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -S $md0
+
+# Re-assemble the array and verify the bitmap is still present
+mdadm --assemble $md0 $dev1 $dev2
+check bitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -S $md0
diff --git a/tests/05r1-add-internalbitmap-v1c b/tests/05r1-add-internalbitmap-v1c
new file mode 100644
index 0000000..9f2f128
--- /dev/null
+++ b/tests/05r1-add-internalbitmap-v1c
@@ -0,0 +1,20 @@
+#
+# create a raid1 without any bitmap, add the bitmap and then write to
+# the device. This should catch the case where the bitmap is created
+# but not reloaded correctly, such as the case fixed by
+# 4474ca42e2577563a919fd3ed782e2ec55bf11a2
+#
+mdadm --create --run $md0 --metadata=1.2 --level=1 -n2 --delay=1 $dev1 $dev2
+check wait
+check nobitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -Gb internal --bitmap-chunk=4 $md0
+check bitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -S $md0
+
+# Re-assemble the array and verify the bitmap is still present
+mdadm --assemble $md0 $dev1 $dev2
+check bitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -S $md0
diff --git a/tests/05r1-bitmapfile b/tests/05r1-bitmapfile
new file mode 100644
index 0000000..f384f0e
--- /dev/null
+++ b/tests/05r1-bitmapfile
@@ -0,0 +1,49 @@
+
+#
+# create a raid1 with a bitmap file
+#
+bmf=$targetdir/bitmap
+rm -f $bmf
+mdadm --create --run $md0 --level=1 -n2 --delay=1 --bitmap $bmf $dev1 $dev2
+check wait
+testdev $md0 1 $mdsize1a 64
+mdadm -S $md0
+
+mdadm --assemble $md0 --bitmap=$bmf $dev1 $dev2
+testdev $md0 1 $mdsize1a 64
+dirty1=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+sleep 4
+dirty2=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+if [ $dirty1 -lt 400 -o $dirty2 -ne 0 ]
+then echo >&2 "ERROR bad 'dirty' counts: $dirty1 and $dirty2"
+ exit 1
+fi
+mdadm $md0 -f $dev1
+testdev $md0 1 $mdsize1a 64
+sleep 4
+dirty3=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+if [ $dirty3 -lt 400 ]
+then
+ echo >&2 "ERROR dirty count $dirty3 is too small"
+ exit 2
+fi
+
+mdadm -S $md0
+
+mdadm --assemble -R $md0 --bitmap=$bmf $dev2
+dirty4=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+mdadm --zero $dev1 # force --add, not --re-add
+mdadm $md0 --add $dev1
+#it is too fast# check recovery
+
+check wait
+sleep 4
+dirty5=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+if [ $dirty4 -lt 400 -o $dirty5 -ne 0 ]
+then echo echo >&2 "ERROR bad 'dirty' counts at end: $dirty4 $dirty5"
+ exit 1
+fi
+
+mdadm -S $md0
diff --git a/tests/05r1-failfast b/tests/05r1-failfast
new file mode 100644
index 0000000..823dd6f
--- /dev/null
+++ b/tests/05r1-failfast
@@ -0,0 +1,74 @@
+
+# create a simple mirror and check failfast flag works
+mdadm -CR $md0 -e1.2 --level=raid1 --failfast -n2 $dev0 $dev1
+check raid1
+if grep -v failfast /sys/block/md0/md/rd*/state > /dev/null
+then
+ die "failfast missing"
+fi
+
+# Removing works with the failfast flag
+mdadm $md0 -f $dev0
+mdadm $md0 -r $dev0
+if grep -v failfast /sys/block/md0/md/rd1/state > /dev/null
+then
+ die "failfast missing"
+fi
+
+# Adding works with the failfast flag
+mdadm $md0 -a --failfast $dev0
+check wait
+if grep -v failfast /sys/block/md0/md/rd0/state > /dev/null
+then
+ die "failfast missing"
+fi
+
+mdadm -S $md0
+
+# Assembling works with the failfast flag
+mdadm -A $md0 $dev0 $dev1
+check raid1
+if grep -v failfast /sys/block/md0/md/rd*/state > /dev/null
+then
+ die "failfast missing"
+fi
+
+# Adding works with the nofailfast flag
+mdadm $md0 -f $dev0
+mdadm $md0 -r $dev0
+mdadm $md0 -a --nofailfast $dev0
+check wait
+if grep failfast /sys/block/md0/md/rd0/state > /dev/null
+then
+ die "failfast should be missing"
+fi
+
+# Assembling with one faulty slave works with the failfast flag
+mdadm $md0 -f $dev0
+mdadm $md0 -r $dev0
+mdadm -S $md0
+mdadm -A $md0 $dev0 $dev1
+check raid1
+mdadm -S $md0
+
+# Spare works with the failfast flag
+mdadm -CR $md0 -e1.2 --level=raid1 --failfast -n2 $dev0 $dev1
+check raid1
+mdadm $md0 -a --failfast $dev2
+check wait
+check spares 1
+if grep -v failfast /sys/block/md0/md/rd*/state > /dev/null
+then
+ die "failfast missing"
+fi
+
+# Grow works with the failfast flag
+mdadm -G $md0 --raid-devices=3
+check wait
+if grep -v failfast /sys/block/md0/md/rd*/state > /dev/null
+then
+ die "failfast missing"
+fi
+mdadm -S $md0
+
+exit 0
diff --git a/tests/05r1-grow-external b/tests/05r1-grow-external
new file mode 100644
index 0000000..69da3e9
--- /dev/null
+++ b/tests/05r1-grow-external
@@ -0,0 +1,33 @@
+
+#
+# create a raid1 array, add an external bitmap
+#
+mdadm --create --run $md0 -l 1 -n 2 $dev1 $dev2
+check wait
+testdev $md0 1 $mdsize1a 64
+
+bmf=$targetdir/bm
+rm -f $bmf
+#mdadm -E $dev1
+mdadm --grow $md0 --bitmap=$bmf --delay=1 || { mdadm -X $bmf ; exit 1; }
+dirty1=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+sleep 4
+dirty2=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+testdev $md0 1 $mdsize1a 64
+dirty3=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+sleep 4
+dirty4=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+#echo $dirty1 $dirty2 $dirty3 $dirty4
+if [ $dirty2 -ne 0 -o $dirty4 -ne 0 -o $dirty3 -lt 400 ]
+then
+ echo bad dirty counts
+ exit 1
+fi
+
+# now to remove the bitmap
+check bitmap
+mdadm --grow $md0 --bitmap=none
+check nobitmap
+mdadm -S $md0
diff --git a/tests/05r1-grow-internal b/tests/05r1-grow-internal
new file mode 100644
index 0000000..24b3aec
--- /dev/null
+++ b/tests/05r1-grow-internal
@@ -0,0 +1,31 @@
+
+#
+# create a raid1 array, add an internal bitmap
+#
+mdadm --create --run $md0 -l 1 -n 2 $dev1 $dev2
+check wait
+testdev $md0 1 $mdsize1a 64
+
+#mdadm -E $dev1
+mdadm --grow $md0 --bitmap=internal --bitmap-chunk=4 --delay=1 || { mdadm -X $dev2 ; exit 1; }
+dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+sleep 4
+dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+testdev $md0 1 $mdsize1a 64
+dirty3=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+sleep 4
+dirty4=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+#echo $dirty1 $dirty2 $dirty3 $dirty4
+if [ $dirty2 -ne 0 -o $dirty4 -ne 0 -o $dirty3 -lt 400 ]
+then
+ echo bad dirty counts
+ exit 1
+fi
+
+# now to remove the bitmap
+check bitmap
+mdadm --grow $md0 --bitmap=none
+check nobitmap
+mdadm -S $md0
diff --git a/tests/05r1-grow-internal-1 b/tests/05r1-grow-internal-1
new file mode 100644
index 0000000..2f0d823
--- /dev/null
+++ b/tests/05r1-grow-internal-1
@@ -0,0 +1,31 @@
+
+#
+# create a raid1 array, version 1 superblock, add an internal bitmap
+#
+mdadm --create --run $md0 -e1 -l 1 -n 2 $dev1 $dev2
+check wait
+testdev $md0 1 $mdsize1b 64
+
+#mdadm -E $dev1
+mdadm --grow $md0 --bitmap=internal --bitmap-chunk=4 --delay=1
+dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+sleep 4
+dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+testdev $md0 1 $mdsize1b 64
+dirty3=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+sleep 4
+dirty4=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+#echo $dirty1 $dirty2 $dirty3 $dirty4
+if [ $dirty2 -ne 0 -o $dirty4 -ne 0 -o $dirty3 -lt 400 ]
+then
+ echo bad dirty counts
+ exit 1
+fi
+
+# now to remove the bitmap
+check bitmap
+mdadm --grow $md0 --bitmap=none
+check nobitmap
+mdadm -S $md0
diff --git a/tests/05r1-internalbitmap b/tests/05r1-internalbitmap
new file mode 100644
index 0000000..dd7232a
--- /dev/null
+++ b/tests/05r1-internalbitmap
@@ -0,0 +1,47 @@
+
+#
+# create a raid1 with an internal bitmap
+#
+mdadm --create -e0.90 --run $md0 --level=1 -n2 --delay=1 --bitmap internal --bitmap-chunk=4 $dev1 $dev2
+check wait
+testdev $md0 1 $mdsize0 64
+mdadm -S $md0
+
+mdadm --assemble $md0 $dev1 $dev2
+testdev $md0 1 $mdsize0 64
+dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+sleep 4
+dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+if [ $dirty1 -lt 400 -o $dirty2 -ne 0 ]
+then echo >&2 "ERROR bad 'dirty' counts: $dirty1 and $dirty2"
+ exit 1
+fi
+mdadm $md0 -f $dev1
+testdev $md0 1 $mdsize0 64
+sleep 4
+dirty3=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+if [ $dirty3 -lt 400 ]
+then
+ echo >&2 "ERROR dirty count $dirty3 is too small"
+ exit 2
+fi
+
+mdadm -S $md0
+
+mdadm --assemble -R $md0 $dev2
+mdadm --zero-superblock $dev1
+mdadm $md0 --add $dev1
+check recovery
+
+dirty4=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+check wait
+sleep 4
+dirty5=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+if [ $dirty4 -lt 400 -o $dirty5 -ne 0 ]
+then echo echo >&2 "ERROR bad 'dirty' counts at end: $dirty4 $dirty5"
+ exit 1
+fi
+
+mdadm -S $md0
diff --git a/tests/05r1-internalbitmap-v1a b/tests/05r1-internalbitmap-v1a
new file mode 100644
index 0000000..3ddc082
--- /dev/null
+++ b/tests/05r1-internalbitmap-v1a
@@ -0,0 +1,48 @@
+
+#
+# create a raid1 with an internal bitmap
+#
+mdadm --create --run $md0 --metadata=1.0 --level=1 -n2 --delay=1 --bitmap internal --bitmap-chunk=4 $dev1 $dev2
+check wait
+check bitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -S $md0
+
+mdadm --assemble $md0 $dev1 $dev2
+testdev $md0 1 $mdsize1b 64
+dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+sleep 4
+dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+if [ $dirty1 -lt 400 -o $dirty2 -ne 0 ]
+then echo >&2 "ERROR bad 'dirty' counts: $dirty1 and $dirty2"
+ exit 1
+fi
+mdadm $md0 -f $dev1
+testdev $md0 1 $mdsize1b 64
+sleep 4
+dirty3=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+if [ $dirty3 -lt 400 ]
+then
+ echo >&2 "ERROR dirty count $dirty3 is too small"
+ exit 2
+fi
+
+mdadm -S $md0
+
+mdadm --zero-superblock $dev1
+mdadm --assemble -R $md0 $dev2
+mdadm $md0 --add $dev1
+check recovery
+
+dirty4=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+check wait
+sleep 4
+dirty5=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+if [ $dirty4 -lt 400 -o $dirty5 -ne 0 ]
+then echo echo >&2 "ERROR bad 'dirty' counts at end: $dirty4 $dirty5"
+ exit 1
+fi
+
+mdadm -S $md0
diff --git a/tests/05r1-internalbitmap-v1b b/tests/05r1-internalbitmap-v1b
new file mode 100644
index 0000000..40f7abe
--- /dev/null
+++ b/tests/05r1-internalbitmap-v1b
@@ -0,0 +1,49 @@
+
+#
+# create a raid1 with an internal bitmap
+#
+mdadm --create --run $md0 --metadata=1.1 --level=1 -n2 --delay=1 --bitmap internal --bitmap-chunk=4 $dev1 $dev2
+check wait
+check bitmap
+testdev $md0 1 $mdsize11 64
+mdadm -S $md0
+
+mdadm --assemble $md0 $dev1 $dev2
+check bitmap
+testdev $md0 1 $mdsize11 64
+dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+sleep 4
+dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+if [ $dirty1 -lt 400 -o $dirty2 -ne 0 ]
+then echo >&2 "ERROR bad 'dirty' counts: $dirty1 and $dirty2"
+ exit 1
+fi
+mdadm $md0 -f $dev1
+testdev $md0 1 $mdsize11 64
+sleep 4
+dirty3=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+if [ $dirty3 -lt 400 ]
+then
+ echo >&2 "ERROR dirty count $dirty3 is too small"
+ exit 2
+fi
+
+mdadm -S $md0
+
+mdadm --zero-superblock $dev1
+mdadm --assemble -R $md0 $dev2
+mdadm $md0 --add $dev1
+check recovery
+
+dirty4=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+check wait
+sleep 4
+dirty5=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+if [ $dirty4 -lt 400 -o $dirty5 -ne 0 ]
+then echo echo >&2 "ERROR bad 'dirty' counts at end: $dirty4 $dirty5"
+ exit 1
+fi
+
+mdadm -S $md0
diff --git a/tests/05r1-internalbitmap-v1c b/tests/05r1-internalbitmap-v1c
new file mode 100644
index 0000000..2eaea59
--- /dev/null
+++ b/tests/05r1-internalbitmap-v1c
@@ -0,0 +1,48 @@
+
+#
+# create a raid1 with an internal bitmap
+#
+mdadm --create --run $md0 --metadata=1.2 --level=1 -n2 --delay=1 --bitmap internal --bitmap-chunk 4 $dev1 $dev2
+check wait
+check bitmap
+testdev $md0 1 $mdsize12 64
+mdadm -S $md0
+
+mdadm --assemble $md0 $dev1 $dev2
+testdev $md0 1 $mdsize12 64
+dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+sleep 4
+dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+if [ $dirty1 -lt 400 -o $dirty2 -ne 0 ]
+then echo >&2 "ERROR bad 'dirty' counts: $dirty1 and $dirty2"
+ exit 1
+fi
+mdadm $md0 -f $dev1
+testdev $md0 1 $mdsize12 64
+sleep 4
+dirty3=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+if [ $dirty3 -lt 400 ]
+then
+ echo >&2 "ERROR dirty count $dirty3 is too small"
+ exit 2
+fi
+
+mdadm -S $md0
+
+mdadm --zero-superblock $dev1
+mdadm --assemble -R $md0 $dev2
+mdadm $md0 --add $dev1
+check recovery
+
+dirty4=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+check wait
+sleep 4
+dirty5=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+if [ $dirty4 -lt 400 -o $dirty5 -ne 0 ]
+then echo echo >&2 "ERROR bad 'dirty' counts at end: $dirty4 $dirty5"
+ exit 1
+fi
+
+mdadm -S $md0
diff --git a/tests/05r1-n3-bitmapfile b/tests/05r1-n3-bitmapfile
new file mode 100644
index 0000000..f1c3f1e
--- /dev/null
+++ b/tests/05r1-n3-bitmapfile
@@ -0,0 +1,53 @@
+
+#
+# create a raid1 with 3 devices and a bitmap file
+# make sure resync does right thing.
+#
+#
+bmf=$targetdir/bitmap
+rm -f $bmf
+mdadm --create -e0.90 --run $md0 --level=1 -n3 --delay=1 --bitmap $bmf $dev1 $dev2 $dev3
+check wait
+testdev $md0 1 $mdsize0 64
+mdadm -S $md0
+
+mdadm --assemble $md0 --bitmap=$bmf $dev1 $dev2 $dev3
+testdev $md0 1 $mdsize0 64
+dirty1=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+sleep 4
+dirty2=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+if [ $dirty1 -lt 400 -o $dirty2 -ne 0 ]
+then echo >&2 "ERROR bad 'dirty' counts: $dirty1 and $dirty2"
+ exit 1
+fi
+mdadm $md0 -f $dev2
+testdev $md0 1 $mdsize0 64
+sleep 4
+dirty3=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+if [ $dirty3 -lt 400 ]
+then
+ echo >&2 "ERROR dirty count $dirty3 is too small"
+ exit 2
+fi
+
+mdadm -S $md0
+
+mdadm --assemble -R $md0 --bitmap=$bmf $dev1 $dev3
+check nosync
+mdadm --zero-superblock $dev2
+mdadm $md0 --add $dev2
+check recovery
+
+dirty4=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+check wait
+sleep 4
+dirty5=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+if [ $dirty4 -lt 400 -o $dirty5 -ne 0 ]
+then echo echo >&2 "ERROR bad 'dirty' counts at end: $dirty4 $dirty5"
+ exit 1
+fi
+
+mdadm -S $md0
+exit 0
diff --git a/tests/05r1-re-add b/tests/05r1-re-add
new file mode 100644
index 0000000..fa6bbcb
--- /dev/null
+++ b/tests/05r1-re-add
@@ -0,0 +1,39 @@
+
+#
+# create a raid1, remove a drive, and readd it.
+# resync should be instant.
+# Then do some IO first. Resync should still be very fast
+#
+
+mdadm -CR $md0 -l1 -n2 -binternal --bitmap-chunk=4 -d1 $dev1 $dev2
+check resync
+check wait
+testdev $md0 1 $mdsize1a 64
+sleep 4
+
+mdadm $md0 -f $dev2
+sleep 1
+mdadm $md0 -r $dev2
+mdadm $md0 -a $dev2
+#cat /proc/mdstat
+check nosync
+
+mdadm $md0 -f $dev2
+sleep 1
+mdadm $md0 -r $dev2
+testdev $md0 1 $mdsize1a 64
+mdadm $md0 -a $dev2
+check wait
+blockdev --flushbufs $dev1 $dev2
+cmp --ignore-initial=$[64*512] --bytes=$[$mdsize0*1024] $dev1 $dev2
+
+mdadm $md0 -f $dev2; sleep 1
+mdadm $md0 -r $dev2
+if dd if=/dev/zero of=$md0 ; then : ; fi
+blockdev --flushbufs $md0 # ensure writes have been sent.
+mdadm $md0 -a $dev2
+check recovery
+check wait
+blockdev --flushbufs $dev1 $dev2
+cmp --ignore-initial=$[64*512] --bytes=$[$mdsize0*1024] $dev1 $dev2
+mdadm -S $md0
diff --git a/tests/05r1-re-add-nosuper b/tests/05r1-re-add-nosuper
new file mode 100644
index 0000000..058d602
--- /dev/null
+++ b/tests/05r1-re-add-nosuper
@@ -0,0 +1,38 @@
+
+#
+# create a raid1, remove a drive, and readd it.
+# resync should be instant.
+# Then do some IO first. Resync should still be very fast
+#
+bmf=$targetdir/bitmap2
+rm -f $bmf
+mdadm -B $md0 -l1 -n2 -b$bmf -d1 $dev1 $dev2
+check resync
+check wait
+testdev $md0 1 $size 1
+sleep 4
+
+mdadm $md0 -f $dev2
+sleep 1
+mdadm $md0 -r $dev2
+mdadm $md0 --re-add $dev2
+check nosync
+
+mdadm $md0 -f $dev2
+sleep 1
+mdadm $md0 -r $dev2
+testdev $md0 1 $size 1
+mdadm $md0 --re-add $dev2
+check wait
+cmp --bytes=$[$mdsize0*1024] $dev1 $dev2
+
+mdadm $md0 -f $dev2; sleep 1
+mdadm $md0 -r $dev2
+if dd if=/dev/zero of=$md0 ; then : ; fi
+blockdev --flushbufs $md0 # make sure writes have been sent
+mdadm $md0 --re-add $dev2
+check recovery
+check wait
+# should BLKFLSBUF and then read $dev1/$dev2...
+cmp --bytes=$[$mdsize0*1024] $file1 $file2
+mdadm -S $md0
diff --git a/tests/05r1-remove-internalbitmap b/tests/05r1-remove-internalbitmap
new file mode 100644
index 0000000..712fd56
--- /dev/null
+++ b/tests/05r1-remove-internalbitmap
@@ -0,0 +1,18 @@
+#
+# create a raid1 with bitmap, remove the bitmap and verify it is still
+# gone when re-assembling the array
+#
+mdadm --create --run $md0 --metadata=0.9 --level=1 -n2 --bitmap internal --bitmap-chunk=4 --delay=1 $dev1 $dev2
+check wait
+check bitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -Gb none $md0
+check nobitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -S $md0
+
+# Re-assemble the array and verify the bitmap is still present
+mdadm --assemble $md0 $dev1 $dev2
+check nobitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -S $md0
diff --git a/tests/05r1-remove-internalbitmap-v1a b/tests/05r1-remove-internalbitmap-v1a
new file mode 100644
index 0000000..a4a9aaf
--- /dev/null
+++ b/tests/05r1-remove-internalbitmap-v1a
@@ -0,0 +1,18 @@
+#
+# create a raid1 with bitmap, remove the bitmap and verify it is still
+# gone when re-assembling the array
+#
+mdadm --create --run $md0 --metadata=1.0 --level=1 -n2 --bitmap internal --bitmap-chunk=4 --delay=1 $dev1 $dev2
+check wait
+check bitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -Gb none $md0
+check nobitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -S $md0
+
+# Re-assemble the array and verify the bitmap is still present
+mdadm --assemble $md0 $dev1 $dev2
+check nobitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -S $md0
diff --git a/tests/05r1-remove-internalbitmap-v1b b/tests/05r1-remove-internalbitmap-v1b
new file mode 100644
index 0000000..c0918eb
--- /dev/null
+++ b/tests/05r1-remove-internalbitmap-v1b
@@ -0,0 +1,18 @@
+#
+# create a raid1 with bitmap, remove the bitmap and verify it is still
+# gone when re-assembling the array
+#
+mdadm --create --run $md0 --metadata=1.1 --level=1 -n2 --bitmap internal --bitmap-chunk=4 --delay=1 $dev1 $dev2
+check wait
+check bitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -Gb none $md0
+check nobitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -S $md0
+
+# Re-assemble the array and verify the bitmap is still present
+mdadm --assemble $md0 $dev1 $dev2
+check nobitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -S $md0
diff --git a/tests/05r1-remove-internalbitmap-v1c b/tests/05r1-remove-internalbitmap-v1c
new file mode 100644
index 0000000..15f1fbb
--- /dev/null
+++ b/tests/05r1-remove-internalbitmap-v1c
@@ -0,0 +1,18 @@
+#
+# create a raid1 with bitmap, remove the bitmap and verify it is still
+# gone when re-assembling the array
+#
+mdadm --create --run $md0 --metadata=1.2 --level=1 -n2 --bitmap internal --bitmap-chunk=4 --delay=1 $dev1 $dev2
+check wait
+check bitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -Gb none $md0
+check nobitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -S $md0
+
+# Re-assemble the array and verify the bitmap is still present
+mdadm --assemble $md0 $dev1 $dev2
+check nobitmap
+testdev $md0 1 $mdsize1b 64
+mdadm -S $md0
diff --git a/tests/05r5-bitmapfile b/tests/05r5-bitmapfile
new file mode 100644
index 0000000..6d173d8
--- /dev/null
+++ b/tests/05r5-bitmapfile
@@ -0,0 +1,49 @@
+
+#
+# create a raid1 with a bitmap file
+#
+bmf=$targetdir/bitmap
+rm -f $bmf
+mdadm --create --run $md0 --level=5 -n3 --delay=1 --bitmap $bmf $dev1 $dev2 $dev3
+check wait
+testdev $md0 2 $mdsize1 512
+mdadm -S $md0
+
+mdadm --assemble $md0 --bitmap=$bmf $dev1 $dev2 $dev3
+testdev $md0 2 $mdsize1 512
+dirty1=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+sleep 4
+dirty2=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+if [ $dirty1 -lt 400 -o $dirty2 -ne 0 ]
+then echo >&2 "ERROR bad 'dirty' counts: $dirty1 and $dirty2"
+ exit 1
+fi
+mdadm $md0 -f $dev1
+testdev $md0 2 $mdsize1 512
+sleep 4
+dirty3=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+if [ $dirty3 -lt 400 ]
+then
+ echo >&2 "ERROR dirty count $dirty3 is too small"
+ exit 2
+fi
+
+mdadm -S $md0
+
+mdadm --assemble -R $md0 --bitmap=$bmf $dev2 $dev3
+mdadm --zero $dev1 # force add, not re-add
+mdadm $md0 --add $dev1
+check recovery
+
+dirty4=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+check wait
+sleep 4
+dirty5=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+if [ $dirty4 -lt 400 -o $dirty5 -ne 0 ]
+then echo echo >&2 "ERROR bad 'dirty' counts at end: $dirty4 $dirty5"
+ exit 1
+fi
+
+mdadm -S $md0
diff --git a/tests/05r5-internalbitmap b/tests/05r5-internalbitmap
new file mode 100644
index 0000000..13dc592
--- /dev/null
+++ b/tests/05r5-internalbitmap
@@ -0,0 +1,47 @@
+
+#
+# create a raid1 with an internal bitmap
+#
+mdadm --create --run $md0 --level=5 -n3 --delay=1 --bitmap internal --bitmap-chunk=4 $dev1 $dev2 $dev3
+check wait
+testdev $md0 2 $mdsize1 512
+mdadm -S $md0
+
+mdadm --assemble $md0 $dev1 $dev2 $dev3
+testdev $md0 2 $mdsize1 512
+dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+sleep 4
+dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+if [ $dirty1 -lt 400 -o $dirty2 -ne 0 ]
+then echo >&2 "ERROR bad 'dirty' counts: $dirty1 and $dirty2"
+ exit 1
+fi
+mdadm $md0 -f $dev1
+testdev $md0 2 $mdsize1 512
+sleep 4
+dirty3=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+if [ $dirty3 -lt 400 ]
+then
+ echo >&2 "ERROR dirty count $dirty3 is too small"
+ exit 2
+fi
+
+mdadm -S $md0
+
+mdadm --assemble -R $md0 $dev2 $dev3
+mdadm --zero $dev1 # force --add, not --re-add
+mdadm $md0 --add $dev1
+check recovery
+
+dirty4=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+check wait
+sleep 4
+dirty5=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+if [ $dirty4 -lt 400 -o $dirty5 -ne 0 ]
+then echo echo >&2 "ERROR bad 'dirty' counts at end: $dirty4 $dirty5"
+ exit 1
+fi
+
+mdadm -S $md0
diff --git a/tests/05r6-bitmapfile b/tests/05r6-bitmapfile
new file mode 100644
index 0000000..d11896d
--- /dev/null
+++ b/tests/05r6-bitmapfile
@@ -0,0 +1,49 @@
+
+#
+# create a raid1 with a bitmap file
+#
+bmf=$targetdir/bitmap
+rm -f $bmf
+mdadm --create --run $md0 --level=6 -n4 --delay=1 --bitmap $bmf $dev1 $dev2 $dev3 $dev4
+check wait
+testdev $md0 2 $mdsize1 512
+mdadm -S $md0
+
+mdadm --assemble $md0 --bitmap=$bmf $dev1 $dev2 $dev3 $dev4
+testdev $md0 2 $mdsize1 512
+dirty1=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+sleep 4
+dirty2=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+if [ $dirty1 -lt 400 -o $dirty2 -ne 0 ]
+then echo >&2 "ERROR bad 'dirty' counts: $dirty1 and $dirty2"
+ exit 1
+fi
+mdadm $md0 -f $dev3
+testdev $md0 2 $mdsize1 512
+sleep 4
+dirty3=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+if [ $dirty3 -lt 400 ]
+then
+ echo >&2 "ERROR dirty count $dirty3 is too small"
+ exit 2
+fi
+
+mdadm -S $md0
+
+mdadm --assemble -R $md0 --bitmap=$bmf $dev1 $dev2 $dev4
+mdadm --zero $dev3 # force --add, not --re-add
+mdadm $md0 --add $dev3
+check recovery
+
+dirty4=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+check wait
+sleep 4
+dirty5=`mdadm -X $bmf | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'`
+
+if [ $dirty4 -lt 400 -o $dirty5 -ne 0 ]
+then echo echo >&2 "ERROR bad 'dirty' counts at end: $dirty4 $dirty5"
+ exit 1
+fi
+
+mdadm -S $md0
diff --git a/tests/05r6tor0 b/tests/05r6tor0
new file mode 100644
index 0000000..2fd51f2
--- /dev/null
+++ b/tests/05r6tor0
@@ -0,0 +1,27 @@
+set -x -e
+
+# reshape a RAID6 to RAID5 and then RAID0.
+# then reshape back up to RAID5 and RAID5
+
+mdadm -CR $md0 -l6 -n5 $dev0 $dev1 $dev2 $dev3 $dev4
+check wait; sleep 1
+check raid6
+testdev $md0 3 19456 512
+mdadm -G $md0 -l5
+check wait; sleep 1
+check raid5
+testdev $md0 3 19456 512
+mdadm -G $md0 -l0
+check wait; sleep 1
+check raid0
+testdev $md0 3 19456 512
+mdadm -G $md0 -l5 --add $dev3 $dev4
+check wait; sleep 1
+check raid5
+check algorithm 2
+testdev $md0 3 19456 512
+mdadm -G $md0 -l 6
+check wait; sleep 1
+check raid6
+check algorithm 2
+testdev $md0 3 19456 512
diff --git a/tests/06name b/tests/06name
new file mode 100644
index 0000000..86eaab6
--- /dev/null
+++ b/tests/06name
@@ -0,0 +1,12 @@
+set -x
+
+# create an array with a name
+
+mdadm -CR $md0 -l0 -n2 --metadata=1 --name="Fred" $dev0 $dev1
+mdadm -E $dev0 | grep 'Name : Fred' > /dev/null || exit 1
+mdadm -D $md0 | grep 'Name : Fred' > /dev/null || exit 1
+mdadm -S $md0
+
+mdadm -A $md0 --name="Fred" $devlist
+#mdadm -Db $md0
+mdadm -S $md0
diff --git a/tests/06sysfs b/tests/06sysfs
new file mode 100644
index 0000000..af63ef4
--- /dev/null
+++ b/tests/06sysfs
@@ -0,0 +1,11 @@
+exit 0
+mdadm -CR $md0 -l1 -n3 $dev1 $dev2 $dev3
+
+ls -Rl /sys/block/md0
+
+cat /sys/block/md0/md/level
+cat /sys/block/md0/md/raid_disks
+
+mdadm -S $md0
+
+exit 1
diff --git a/tests/06wrmostly b/tests/06wrmostly
new file mode 100644
index 0000000..968c197
--- /dev/null
+++ b/tests/06wrmostly
@@ -0,0 +1,13 @@
+
+# create a raid1 array with a wrmostly device
+
+mdadm -CR $md0 -l1 -n3 $dev0 $dev1 --write-mostly $dev2
+testdev $md0 1 $mdsize1a 64
+
+# unfortunately, we cannot measure if any read requests are going to $dev2
+
+mdadm -S $md0
+
+mdadm -CR $md0 -l1 -n3 --write-behind --bitmap=internal --bitmap-chunk=4 $dev0 $dev1 --write-mostly $dev2
+testdev $md0 1 $mdsize1a 64
+mdadm -S $md0
diff --git a/tests/07autoassemble b/tests/07autoassemble
new file mode 100644
index 0000000..e689be7
--- /dev/null
+++ b/tests/07autoassemble
@@ -0,0 +1,24 @@
+
+# create two raid1s, build a raid0 on top, then
+# tear it down and get auto-assemble to rebuild it.
+
+mdadm -CR $md1 -l1 -n2 $dev0 $dev1 --homehost=testing
+mdadm -CR $md2 -l1 -n2 $dev2 $dev3 --homehost=testing
+mdadm -CR $md0 -l0 -n2 $md1 $md2 --homehost=testing
+
+mdadm -Ss
+mdadm -As -c /dev/null --homehost=testing -vvv
+testdev $md1 1 $mdsize1a 64
+testdev $md2 1 $mdsize1a 64
+testdev $md0 2 $mdsize11a 512
+mdadm -Ss
+
+mdadm --zero-superblock $dev0 $dev1 $dev2 $dev3
+## Now the raid0 uses one stacked and one not
+mdadm -CR $md1 -l1 -n2 $dev0 $dev1 --homehost=testing
+mdadm -CR $md0 -l0 -n2 $md1 $dev2 --homehost=testing
+mdadm -Ss
+mdadm -As -c /dev/null --homehost=testing -vvv
+testdev $md1 1 $mdsize1a 64
+testdev $md0 1 $[mdsize1a+mdsize11a] 512
+mdadm -Ss
diff --git a/tests/07autoassemble.broken b/tests/07autoassemble.broken
new file mode 100644
index 0000000..8be0940
--- /dev/null
+++ b/tests/07autoassemble.broken
@@ -0,0 +1,8 @@
+always fails
+
+Prints lots of messages, but the array doesn't assemble. Error
+possibly related to:
+
+ mdadm: /dev/md/1 is busy - skipping
+ mdadm: no recogniseable superblock on /dev/md/testing:0
+ mdadm: /dev/md/2 is busy - skipping
diff --git a/tests/07autodetect b/tests/07autodetect
new file mode 100644
index 0000000..917e0d6
--- /dev/null
+++ b/tests/07autodetect
@@ -0,0 +1,34 @@
+
+#
+# Test in-kernel autodetect.
+# Create a partitionable array on each of two devices,
+# put a partition on each, create an array, and see if we can
+# use autodetect to restart the array.
+
+if lsmod | grep md_mod > /dev/null 2>&1
+then
+ echo md is a module - cannot test autodetect
+ exit 0
+fi
+
+
+mdadm -CR -e 0 $mdp0 -l0 -f -n1 $dev0
+mdadm -CR -e 0 $mdp1 -l0 -f -n1 $dev1
+udevadm settle
+sfdisk $mdp0 >&2 << END
+,,FD
+END
+sfdisk $mdp1 >&2 << END
+,,FD
+END
+udevadm settle
+mdadm -CR -e 0 $md0 -l1 -n2 ${mdp0}p1 ${mdp1}p1
+check resync
+check raid1
+check wait
+mdadm -S $md0
+mdadm --auto-detect
+check raid1
+
+mdadm -Ss
+exit 0
diff --git a/tests/07autodetect.broken b/tests/07autodetect.broken
new file mode 100644
index 0000000..294954a
--- /dev/null
+++ b/tests/07autodetect.broken
@@ -0,0 +1,5 @@
+always fails
+
+Fails with error:
+
+ ERROR: no resync happening
diff --git a/tests/07changelevelintr b/tests/07changelevelintr
new file mode 100644
index 0000000..18c6309
--- /dev/null
+++ b/tests/07changelevelintr
@@ -0,0 +1,61 @@
+
+#
+# test that we can stop and restart a level change.
+# just test a few in-place changes, and a few
+# size-reducing changes.
+
+
+checkgeo() {
+ # check the geometry of an array
+ # level raid_disks chunk_size layout
+ dev=$1
+ shift
+ sleep 0.5
+ check wait
+ sleep 1
+ for attr in level raid_disks chunk_size layout
+ do
+ if [ $# -gt 0 ] ; then
+ val=$1
+ shift
+ if [ " `cat /sys/block/$dev/md/$attr`" != " $val" ]
+ then echo "$attr doesn't match for $dev"
+ exit 1
+ fi
+ fi
+ done
+}
+
+restart() {
+ sleep 0.5
+ check reshape
+ mdadm -S $md0
+ mdadm -A $md0 $devs --backup-file=$bu
+ sleep 0.5
+ check reshape
+}
+
+bu=/tmp/md-backup
+rm -f $bu
+devs="$dev0 $dev1 $dev2 $dev3 $dev4"
+mdadm -CR $md0 -l5 -n5 -c 256 $devs
+checkgeo md0 raid5 5 $[256*1024] 2
+
+mdadm -G $md0 -c 128 --backup-file=$bu
+restart
+checkgeo md0 raid5 5 $[128*1024] 2
+
+mdadm -G $md0 --layout rs --backup-file=$bu
+restart
+checkgeo md0 raid5 5 $[128*1024] 3
+
+mdadm -G $md0 --array-size 58368
+mdadm -G $md0 --raid-disks 4 -c 64 --backup-file=$bu
+restart
+checkgeo md0 raid5 4 $[64*1024] 3
+
+devs="$dev0 $dev1 $dev2 $dev3"
+mdadm -G $md0 --array-size 19456
+mdadm -G $md0 -n 2 -c 256 --backup-file=$bu
+restart
+checkgeo md0 raid5 2 $[256*1024] 3
diff --git a/tests/07changelevelintr.broken b/tests/07changelevelintr.broken
new file mode 100644
index 0000000..284b490
--- /dev/null
+++ b/tests/07changelevelintr.broken
@@ -0,0 +1,9 @@
+always fails
+
+Fails with errors:
+
+ mdadm: this change will reduce the size of the array.
+ use --grow --array-size first to truncate array.
+ e.g. mdadm --grow /dev/md0 --array-size 56832
+
+ ERROR: no reshape happening
diff --git a/tests/07changelevels b/tests/07changelevels
new file mode 100644
index 0000000..a328874
--- /dev/null
+++ b/tests/07changelevels
@@ -0,0 +1,114 @@
+
+# Test changing of level, chunksize etc.
+# Create a RAID1, convert to RAID5, add a disk, add another disk
+# convert to RAID6, back to RAID5 and ultimately to RAID1
+
+testK=$[64*3*6]
+dd if=/dev/urandom of=/tmp/RandFile bs=1024 count=$testK
+export MDADM_GROW_VERIFY=1
+
+dotest() {
+ sleep 2
+ check wait
+ testdev $md0 $1 19968 64 nd
+ blockdev --flushbufs $md0
+ cmp -s -n $[textK*1024] $md0 /tmp/RandFile || { echo cmp failed; exit 2; }
+ # write something new - shift chars 4 space
+ tr ' -~' '$-~ -#' < /tmp/RandFile > /tmp/RandFile2
+ mv /tmp/RandFile2 /tmp/RandFile
+ dd if=/tmp/RandFile of=$md0
+}
+
+checkgeo() {
+ # check the geometry of an array
+ # level raid_disks chunk_size layout
+ dev=$1
+ shift
+ sleep 0.5
+ check wait
+ sleep 1
+ for attr in level raid_disks chunk_size layout
+ do
+ if [ $# -gt 0 ] ; then
+ val=$1
+ shift
+ if [ " `cat /sys/block/$dev/md/$attr`" != " $val" ]
+ then echo "$attr doesn't match for $dev"
+ exit 1
+ fi
+ fi
+ done
+}
+
+
+bu=/tmp/md-test-backup
+rm -f $bu
+mdadm -CR $md0 -l1 -n2 -x1 $dev0 $dev1 $dev2 -z 19968
+testdev $md0 1 $mdsize1a 64
+dd if=/tmp/RandFile of=$md0
+dotest 1
+
+mdadm --grow $md0 -l5 -n3 --chunk 64
+dotest 2
+
+mdadm $md0 --add $dev3 $dev4
+mdadm --grow $md0 -n4 --chunk 32
+dotest 3
+
+mdadm -G $md0 -l6 --backup-file $bu
+dotest 3
+
+mdadm -G /dev/md0 --array-size 39936
+mdadm -G $md0 -n4 --backup-file $bu
+checkgeo md0 raid6 4 $[32*1024]
+dotest 2
+
+mdadm -G $md0 -l5 --backup-file $bu
+checkgeo md0 raid5 3 $[32*1024]
+dotest 2
+
+mdadm -G /dev/md0 --array-size 19968
+mdadm -G $md0 -n2 --backup-file $bu
+checkgeo md0 raid5 2 $[32*1024]
+dotest 1
+
+mdadm -G --level=1 $md0
+dotest 1
+
+# now repeat that last few steps only with a degraded array.
+mdadm -S $md0
+mdadm -CR $md0 -l6 -n5 $dev0 $dev1 $dev2 $dev3 $dev4
+dd if=/tmp/RandFile of=$md0
+dotest 3
+
+mdadm $md0 --fail $dev0
+
+mdadm -G /dev/md0 --array-size 37888
+mdadm -G $md0 -n4 --backup-file $bu
+dotest 2
+checkgeo md0 raid6 4 $[512*1024]
+mdadm $md0 --fail $dev4
+
+mdadm $md0 --fail $dev3
+# now double-degraded.
+# switch layout to a DDF layout and back to make sure that works.
+
+mdadm -G /dev/md0 --layout=ddf-N-continue --backup-file $bu
+checkgeo md0 raid6 4 $[512*1024] 10
+dotest 2
+mdadm -G /dev/md0 --layout=ra --backup-file $bu
+checkgeo md0 raid6 4 $[512*1024] 1
+dotest 2
+
+mdadm -G $md0 -l5 --backup-file $bu
+dotest 2
+
+mdadm -G /dev/md0 --array-size 18944
+mdadm -G $md0 -n2 --backup-file $bu
+dotest 1
+checkgeo md0 raid5 2 $[512*1024]
+mdadm $md0 --fail $dev2
+
+mdadm -G --level=1 $md0
+dotest 1
+checkgeo md0 raid1 2
diff --git a/tests/07changelevels.broken b/tests/07changelevels.broken
new file mode 100644
index 0000000..9b930d9
--- /dev/null
+++ b/tests/07changelevels.broken
@@ -0,0 +1,9 @@
+always fails
+
+Fails with errors:
+
+ mdadm: /dev/loop0 is smaller than given size. 18976K < 19968K + metadata
+ mdadm: /dev/loop1 is smaller than given size. 18976K < 19968K + metadata
+ mdadm: /dev/loop2 is smaller than given size. 18976K < 19968K + metadata
+
+ ERROR: /dev/md0 isn't a block device.
diff --git a/tests/07layouts b/tests/07layouts
new file mode 100644
index 0000000..acd1a80
--- /dev/null
+++ b/tests/07layouts
@@ -0,0 +1,91 @@
+
+# check that kernel an restripe interpret all the different layouts
+# the same
+# This involves changing the layout to each different possibility
+# while MDADM_GROW_VERIFY is set.
+
+testK=$[64*3*6]
+dd if=/dev/urandom of=/tmp/RandFile bs=1024 count=$testK
+export MDADM_GROW_VERITY=1
+
+
+dotest() {
+ sleep 0.5
+ check wait
+ testdev $md0 $1 $mdsize1 512 nd
+ blockdev --flushbufs $md0
+ cmp -s -n $[textK*1024] $md0 /tmp/RandFile || { echo cmp failed; exit 2; }
+ # write something new - shift chars 4 space
+ tr ' -~' '$-~ -#' < /tmp/RandFile > /tmp/RandFile2
+ mv /tmp/RandFile2 /tmp/RandFile
+ dd if=/tmp/RandFile of=$md0
+}
+
+checkgeo() {
+ # check the geometry of an array
+ # level raid_disks chunk_size layout
+ dev=$1
+ shift
+ sleep 0.5
+ check wait
+ for attr in level raid_disks chunk_size layout
+ do
+ if [ $# -gt 0 ] ; then
+ val=$1
+ shift
+ if [ " `sed 's/ .*//' /sys/block/$dev/md/$attr`" != " $val" ]
+ then echo "$attr doesn't match for $dev"
+ exit 1
+ fi
+ fi
+ done
+}
+
+
+bu=/tmp/md-test-backup
+rm -f $bu
+
+# first a degraded 5 device raid5
+mdadm -CR $md0 -l5 -n5 $dev0 $dev1 missing $dev2 $dev3
+dd if=/tmp/RandFile of=$md0
+dotest 4
+
+l5[0]=la
+l5[1]=ra
+l5[2]=ls
+l5[3]=rs
+l5[4]=parity-first
+l5[5]=parity-last
+for layout in 0 1 2 3 4 5 0
+do
+ mdadm -G $md0 --layout=${l5[$layout]} --backup-file $bu
+ checkgeo md0 raid5 5 $[512*1024] $layout
+ dotest 4
+done
+
+mdadm -S $md0
+# now a doubly degraded raid6
+mdadm -CR $md0 -l6 -n5 $dev0 missing $dev2 missing $dev4
+dd if=/tmp/RandFile of=$md0
+dotest 3
+
+l6[0]=la
+l6[1]=ra
+l6[2]=ls
+l6[3]=rs
+l6[4]=parity-first
+l6[5]=parity-last
+l6[8]=ddf-zero-restart
+l6[9]=ddf-N-restart
+l6[10]=ddf-N-continue
+l6[16]=left-asymmetric-6
+l6[17]=right-asymmetric-6
+l6[18]=left-symmetric-6
+l6[19]=right-symmetric-6
+l6[20]=parity-first-6
+for layout in 0 1 2 3 4 5 8 9 10 16 17 18 19 20 0
+do
+ mdadm -G $md0 --layout=${l6[$layout]} --backup-file $bu
+ checkgeo md0 raid6 5 $[512*1024] $layout
+ dotest 3
+done
diff --git a/tests/07reshape5intr b/tests/07reshape5intr
new file mode 100644
index 0000000..0f4803a
--- /dev/null
+++ b/tests/07reshape5intr
@@ -0,0 +1,41 @@
+
+#
+# test interrupting and restarting raid5 reshape.
+set -x
+devs="$dev1"
+st=UU
+for disks in 2 3 4 5
+do
+ eval devs=\"$devs \$dev$disks\"
+ st=U$st
+ for d in $devs
+ do dd if=/dev/urandom of=$d bs=1024 || true
+ done
+
+ case $disks in
+ 2 | 3) chunk=1024;;
+ 4 ) chunk=512;;
+ 5 ) chunk=256;;
+ esac
+
+ mdadm -CR $md0 -amd -l5 -c $chunk -n$disks --assume-clean $devs
+ mdadm $md0 --add $dev6
+ echo 20 > /proc/sys/dev/raid/speed_limit_min
+ echo 20 > /proc/sys/dev/raid/speed_limit_max
+ mdadm --grow $md0 -n $[disks+1]
+ check reshape
+ check state $st
+ mdadm --stop $md0
+ mdadm --assemble $md0 $devs $dev6
+ check reshape
+ echo 1000 > /proc/sys/dev/raid/speed_limit_min
+ echo 2000 > /proc/sys/dev/raid/speed_limit_max
+ check wait
+ while ! echo check > /sys/block/md0/md/sync_action; do sleep 0.1; done
+ check wait
+ mm=`cat /sys/block/md0/md/mismatch_cnt`
+ if [ $mm -gt 0 ]
+ then echo >&2 "ERROR mismatch_cnt non-zero : $mm" ; exit 1
+ fi
+ mdadm -S $md0
+done
diff --git a/tests/07reshape5intr.broken b/tests/07reshape5intr.broken
new file mode 100644
index 0000000..efe52a6
--- /dev/null
+++ b/tests/07reshape5intr.broken
@@ -0,0 +1,45 @@
+always fails
+
+This patch, recently added to md-next causes the test to always fail:
+
+7e6ba434cc60 ("md: don't unregister sync_thread with reconfig_mutex
+held")
+
+The new error is simply:
+
+ ERROR: no reshape happening
+
+Before the patch, the error seen is below.
+
+--
+
+fails infrequently
+
+Fails roughly 1 in 4 runs with errors:
+
+ mdadm: Merging with already-assembled /dev/md/0
+ mdadm: cannot re-read metadata from /dev/loop6 - aborting
+
+ ERROR: no reshape happening
+
+Also have seen a random deadlock:
+
+ INFO: task mdadm:109702 blocked for more than 30 seconds.
+ Not tainted 5.18.0-rc3-eid-vmlocalyes-dbg-00095-g3c2b5427979d #2040
+ "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+ task:mdadm state:D stack: 0 pid:109702 ppid: 1 flags:0x00004000
+ Call Trace:
+ <TASK>
+ __schedule+0x67e/0x13b0
+ schedule+0x82/0x110
+ mddev_suspend+0x2e1/0x330
+ suspend_lo_store+0xbd/0x140
+ md_attr_store+0xcb/0x130
+ sysfs_kf_write+0x89/0xb0
+ kernfs_fop_write_iter+0x202/0x2c0
+ new_sync_write+0x222/0x330
+ vfs_write+0x3bc/0x4d0
+ ksys_write+0xd9/0x180
+ __x64_sys_write+0x43/0x50
+ do_syscall_64+0x3b/0x90
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
diff --git a/tests/07revert-grow b/tests/07revert-grow
new file mode 100644
index 0000000..c8c4e85
--- /dev/null
+++ b/tests/07revert-grow
@@ -0,0 +1,52 @@
+set -e -x
+
+# revert a reshape that is increasing the number of devices,
+# raid5, raid6, and raid10
+
+# metadate 0.90 cannot handle RAID10 growth
+# metadata 1.0 doesn't get a default headspace, is don't try it either.
+
+for metadata in 0.90 1.1 1.2
+do
+# RAID5
+mdadm -CR --assume-clean $md0 -l5 -n4 -x1 $devlist4 --metadata=$metadata
+check raid5
+testdev $md0 3 $mdsize1 512
+mdadm -G $md0 -n 5
+sleep 3
+mdadm -S $md0
+mdadm -A $md0 --update=revert-reshape $devlist4 --backup-file=/tmp/md-backup
+check wait
+check raid5
+testdev $md0 3 $mdsize1 512
+mdadm -S $md0
+
+# RAID6
+mdadm -CR --assume-clean $md0 -l6 -n4 -x1 $devlist4 --metadata=$metadata
+check raid6
+testdev $md0 2 $mdsize1 512
+mdadm -G $md0 -n 5
+sleep 3
+mdadm -S $md0
+mdadm -A $md0 --update=revert-reshape $devlist4 --backup-file=/tmp/md-backup
+check wait
+check raid6
+testdev $md0 2 $mdsize1 512
+mdadm -S $md0
+
+if [ $metadata = 0.90 ]; then continue; fi
+
+# RAID10
+mdadm -CR --assume-clean $md0 -l10 -n4 -x1 $devlist4 --metadata=$metadata
+check raid10
+testdev $md0 2 $mdsize1 512
+mdadm -G $md0 -n 5
+sleep 3
+mdadm -S $md0
+strace -o /tmp/str ./mdadm -A $md0 --update=revert-reshape $devlist4
+check wait
+check raid10
+testdev $md0 2 $mdsize1 512
+mdadm -S $md0
+
+done
diff --git a/tests/07revert-grow.broken b/tests/07revert-grow.broken
new file mode 100644
index 0000000..9b6db86
--- /dev/null
+++ b/tests/07revert-grow.broken
@@ -0,0 +1,31 @@
+always fails
+
+This patch, recently added to md-next causes the test to always fail:
+
+7e6ba434cc60 ("md: don't unregister sync_thread with reconfig_mutex held")
+
+The errors are:
+
+ mdadm: No active reshape to revert on /dev/loop0
+ ERROR: active raid5 not found
+
+Before the patch, the error seen is below.
+
+--
+
+fails rarely
+
+Fails about 1 in every 30 runs with errors:
+
+ mdadm: Merging with already-assembled /dev/md/0
+ mdadm: backup file /tmp/md-backup inaccessible: No such file or directory
+ mdadm: failed to add /dev/loop1 to /dev/md/0: Invalid argument
+ mdadm: failed to add /dev/loop2 to /dev/md/0: Invalid argument
+ mdadm: failed to add /dev/loop3 to /dev/md/0: Invalid argument
+ mdadm: failed to add /dev/loop0 to /dev/md/0: Invalid argument
+ mdadm: /dev/md/0 assembled from 1 drive - need all 5 to start it
+ (use --run to insist).
+
+ grep: /sys/block/md*/md/sync_action: No such file or directory
+
+ ERROR: active raid5 not found
diff --git a/tests/07revert-inplace b/tests/07revert-inplace
new file mode 100644
index 0000000..a73eb97
--- /dev/null
+++ b/tests/07revert-inplace
@@ -0,0 +1,44 @@
+set -e -x
+
+# revert a reshape that is not changing the number of data devices,
+# raid5, raid6, and raid10
+
+# RAID5 -> RAID6
+mdadm -CR --assume-clean $md0 -l5 -n4 -x1 $devlist4
+check raid5
+testdev $md0 3 $mdsize1 512
+mdadm -G $md0 -l 6
+sleep 2
+mdadm -S $md0
+mdadm -A $md0 --update=revert-reshape $devlist4 --backup-file=/tmp/md-backup
+check wait
+check raid6
+check algorithm 18
+testdev $md0 3 $mdsize1 512
+mdadm -S $md0
+
+# RAID6 -> RAID5
+mdadm -CR --assume-clean $md0 -l6 -n5 $devlist4
+check raid6
+testdev $md0 3 $mdsize1 512
+mdadm -G $md0 -l 5
+sleep 2
+mdadm -S $md0
+mdadm -A $md0 --update=revert-reshape $devlist4 --backup-file=/tmp/md-backup
+check wait
+check raid6
+testdev $md0 3 $mdsize1 512
+mdadm -S $md0
+
+# RAID10 - decrease chunk size
+mdadm -CR --assume-clean $md0 -l10 -n6 -c 64 $devlist5
+check raid10
+testdev $md0 3 $mdsize1 64
+mdadm -G $md0 -c 32
+sleep 2
+mdadm -S $md0
+strace -o /tmp/str ./mdadm -A $md0 --update=revert-reshape $devlist5
+check wait
+check raid10
+testdev $md0 3 $mdsize1 64
+mdadm -S $md0
diff --git a/tests/07revert-shrink b/tests/07revert-shrink
new file mode 100644
index 0000000..62b5ae0
--- /dev/null
+++ b/tests/07revert-shrink
@@ -0,0 +1,56 @@
+set -e -x
+
+# revert a reshape that is decreasing the number of devices,
+# raid5, raid6, and raid10
+
+bu=$targetdir/md-backup
+rm -f $bu
+# RAID5
+mdadm -CR --assume-clean $md0 -l5 -n5 $devlist4
+check raid5
+testdev $md0 4 $mdsize1 512
+mdadm --grow $md0 --array-size 56832
+testdev $md0 3 $mdsize1 512
+mdadm -G $md0 -n 4 --backup=$bu
+sleep 3
+mdadm -S $md0
+mdadm -A $md0 --update=revert-reshape $devlist4 --backup-file=$bu
+check wait
+check raid5
+fsck -f -n $md0
+testdev $md0 4 $mdsize1 512
+mdadm -S $md0
+
+#FIXME
+rm -f $bu
+# RAID6
+mdadm -CR --assume-clean $md0 -l6 -n5 $devlist4
+check raid6
+testdev $md0 3 $mdsize1 512
+mdadm --grow $md0 --array-size 37888
+testdev $md0 2 $mdsize1 512
+mdadm -G $md0 -n 4 --backup=$bu
+sleep 2
+mdadm -S $md0
+mdadm -A $md0 --update=revert-reshape $devlist4 --backup-file=$bu
+check wait
+check raid6
+fsck -f -n $md0
+testdev $md0 3 $mdsize1 512
+mdadm -S $md0
+
+# RAID10
+mdadm -CR --assume-clean $md0 -l10 -n6 $devlist5
+check raid10
+testdev $md0 3 $mdsize1 512
+mdadm --grow $md0 --array-size 36864
+testdev $md0 2 $mdsize1 512
+mdadm -G $md0 -n 4
+sleep 3
+mdadm -S $md0
+mdadm -A $md0 --update=revert-reshape $devlist5
+check wait
+check raid10
+fsck -f -n $md0
+testdev $md0 3 $mdsize1 512
+mdadm -S $md0
diff --git a/tests/07revert-shrink.broken b/tests/07revert-shrink.broken
new file mode 100644
index 0000000..c33c39e
--- /dev/null
+++ b/tests/07revert-shrink.broken
@@ -0,0 +1,9 @@
+always fails
+
+Fails with errors:
+
+ mdadm: this change will reduce the size of the array.
+ use --grow --array-size first to truncate array.
+ e.g. mdadm --grow /dev/md0 --array-size 53760
+
+ ERROR: active raid5 not found
diff --git a/tests/07testreshape5 b/tests/07testreshape5
new file mode 100644
index 0000000..0e1f25f
--- /dev/null
+++ b/tests/07testreshape5
@@ -0,0 +1,45 @@
+
+#
+# test the reshape code by using test_reshape and the
+# kernel md code to move data into and out of variously
+# shaped md arrays.
+set -x
+layouts=(la ra ls rs)
+for level in 5 6
+do
+for chunk in 4 8 16 32 64 128
+do
+ devs="$dev1"
+ for disks in 2 3 4 5 6
+ do
+ eval devs=\"$devs \$dev$disks\"
+ if [ " $level $disks" = " 6 3" -o " $level $disks" = " 6 2" ]
+ then continue
+ fi
+ for nlayout in 0 1 2 3
+ do
+ layout=${layouts[$nlayout]}
+
+ size=$[chunk*(disks-(level-4))*disks]
+
+ # test restore: make a raid5 from a file, then do a compare
+ dd if=/dev/urandom of=/tmp/RandFile bs=1024 count=$size
+ $dir/test_stripe restore /tmp/RandFile $disks $[chunk*1024] $level $nlayout 0 $[size*1024] $devs
+ mdadm -CR -e 1.0 $md0 -amd -l$level -n$disks --assume-clean -c $chunk -p $layout $devs
+ cmp -s -n $[size*1024] $md0 /tmp/RandFile || { echo cmp failed ; exit 2; }
+
+ # FIXME check parity
+
+ # test save
+ dd if=/dev/urandom of=$md0 bs=1024 count=$size
+ blockdev --flushbufs $md0 $devs; sync
+ > /tmp/NewRand
+ $dir/test_stripe save /tmp/NewRand $disks $[chunk*1024] $level $nlayout 0 $[size*1024] $devs
+ cmp -s -n $[size*1024] $md0 /tmp/NewRand || { echo cmp failed ; exit 2; }
+ mdadm -S $md0
+ udevadm settle
+ done
+ done
+done
+done
+exit 0
diff --git a/tests/07testreshape5.broken b/tests/07testreshape5.broken
new file mode 100644
index 0000000..a8ce03e
--- /dev/null
+++ b/tests/07testreshape5.broken
@@ -0,0 +1,12 @@
+always fails
+
+Test seems to run 'test_stripe' at $dir directory, but $dir is never
+set. If $dir is adjusted to $PWD, the test still fails with:
+
+ mdadm: /dev/loop2 is not suitable for this array.
+ mdadm: create aborted
+ ++ return 1
+ ++ cmp -s -n 8192 /dev/md0 /tmp/RandFile
+ ++ echo cmp failed
+ cmp failed
+ ++ exit 2
diff --git a/tests/09imsm-assemble b/tests/09imsm-assemble
new file mode 100644
index 0000000..d7028c6
--- /dev/null
+++ b/tests/09imsm-assemble
@@ -0,0 +1,73 @@
+# validate the prodigal member disk scenario i.e. a former container
+# member is returned after having been rebuilt on another system
+
+
+imsm_check_hold() {
+ if mdadm --remove $1 $2; then
+ echo "$2 removal from $1 should have been blocked" >&2
+ cat /proc/mdstat >&2
+ mdadm -E $2
+ exit 1
+ fi
+}
+
+imsm_check_removal() {
+ if ! mdadm --remove $1 $2 ; then
+ echo "$2 removal from $1 should have succeeded" >&2
+ cat /proc/mdstat >&2
+ mdadm -E $2
+ exit 1
+ fi
+}
+
+export IMSM_DEVNAME_AS_SERIAL=1
+export IMSM_TEST_OROM=1
+export IMSM_NO_PLATFORM=1
+container=/dev/md/container
+member=/dev/md/vol0
+
+
+num_disks=4
+size=$((10*1024))
+mdadm -CR $container -e imsm -n $num_disks $dev0 $dev1 $dev2 $dev3
+mdadm -CR $member $dev0 $dev2 -n 2 -l 1 -z $size
+mdadm --wait $member || true
+mdadm -Ss
+
+# make dev0 and dev1 a new rebuild family
+mdadm -A $container $dev0 $dev1
+mdadm -IR $container
+mdadm --wait ${member}_0 || true
+mdadm -Ss
+
+# make dev2 and dev3 a new rebuild family
+mdadm -A $container $dev2 $dev3
+mdadm -IR $container
+mdadm --wait ${member}_0 || true
+mdadm -Ss
+
+# reassemble and make sure one of the families falls out
+mdadm -A $container $dev0 $dev1 $dev2 $dev3
+mdadm -IR $container
+testdev ${member}_0 1 $size 64
+if mdadm --remove $container $dev0 ; then
+ # the dev[23] family won
+ imsm_check_removal $container $dev1
+ imsm_check_hold $container $dev2
+ imsm_check_hold $container $dev3
+else
+ # the dev[01] family won
+ imsm_check_hold $container $dev1
+ imsm_check_removal $container $dev2
+ imsm_check_removal $container $dev3
+fi
+mdadm -Ss
+
+# reassemble with a new id for the dev[23] family
+mdadm -A $container $dev0 $dev1
+mdadm -IR $container
+mdadm -A ${container}2 $dev2 $dev3 --update=uuid
+mdadm -IR ${container}2
+
+testdev ${member}_0 1 $size 64
+testdev ${member}_1 1 $size 64
diff --git a/tests/09imsm-assemble.broken b/tests/09imsm-assemble.broken
new file mode 100644
index 0000000..a6d4d5c
--- /dev/null
+++ b/tests/09imsm-assemble.broken
@@ -0,0 +1,6 @@
+fails infrequently
+
+Fails roughly 1 in 10 runs with errors:
+
+ mdadm: /dev/loop2 is still in use, cannot remove.
+ /dev/loop2 removal from /dev/md/container should have succeeded
diff --git a/tests/09imsm-create-fail-rebuild b/tests/09imsm-create-fail-rebuild
new file mode 100644
index 0000000..f09b437
--- /dev/null
+++ b/tests/09imsm-create-fail-rebuild
@@ -0,0 +1,78 @@
+# sanity check array creation
+
+imsm_check_hold() {
+ if mdadm --remove $1 $2; then
+ echo "$2 removal from $1 should have been blocked" >&2
+ cat /proc/mdstat >&2
+ mdadm -E $2
+ exit 1
+ fi
+}
+
+imsm_check_removal() {
+ if ! mdadm --remove $1 $2 ; then
+ echo "$2 removal from $1 should have succeeded" >&2
+ cat /proc/mdstat >&2
+ mdadm -E $2
+ exit 1
+ fi
+}
+
+. tests/env-imsm-template
+
+# IMSM rounds to multiples of one mebibyte - 1024K
+DEV_ROUND_K=1024
+
+num_disks=2
+mdadm -CR $container -e imsm -n $num_disks $dev0 $dev1
+imsm_check container $num_disks
+
+# RAID0 + RAID1
+size=9000
+level=0
+chunk=64
+offset=0
+mdadm -CR $member0 $dev0 $dev1 -n $num_disks -l $level -z $size -c $chunk
+imsm_check member $member0 $num_disks $level $size $((size*2)) $offset $chunk
+testdev $member0 $num_disks $size $chunk
+
+offset=$(((size & ~(1024 - 1)) + 4096))
+size=4000
+level=1
+chunk=0
+mdadm -CR $member1 $dev0 $dev1 -n $num_disks -l $level -z $size
+imsm_check member $member1 $num_disks $level $size $size $offset $chunk
+testdev $member1 1 $size 64
+check wait
+
+mdadm -Ss
+
+# RAID10 + RAID5
+num_disks=4
+mdadm -CR $container -e imsm -n $num_disks $dev0 $dev1 $dev2 $dev3
+imsm_check container $num_disks
+
+size=9000
+level=10
+chunk=64
+offset=0
+mdadm -CR $member0 $dev0 $dev1 $dev2 $dev3 -n $num_disks -l $level -z $size -c $chunk
+imsm_check member $member0 $num_disks $level $size $((size*2)) $offset $chunk
+testdev $member0 $((num_disks-2)) $size $chunk
+
+offset=$(((size & ~(1024 - 1)) + 4096))
+size=4000
+level=5
+mdadm -CR $member1 $dev0 $dev1 $dev2 $dev3 -n $num_disks -l $level -z $size -c $chunk
+imsm_check member $member1 $num_disks $level $size $((size*3)) $offset $chunk
+testdev $member1 $((num_disks-1)) $size $chunk
+check wait
+
+# FAIL / REBUILD
+imsm_check_hold $container $dev0
+mdadm --fail $member0 $dev0
+mdadm --wait-clean --scan || true
+imsm_check_removal $container $dev0
+mdadm --add $container $dev4
+check wait
+imsm_check_hold $container $dev4
diff --git a/tests/09imsm-create-fail-rebuild.broken b/tests/09imsm-create-fail-rebuild.broken
new file mode 100644
index 0000000..40c4b29
--- /dev/null
+++ b/tests/09imsm-create-fail-rebuild.broken
@@ -0,0 +1,5 @@
+always fails
+
+Fails with error:
+
+ **Error**: Array size mismatch - expected 3072, actual 16384
diff --git a/tests/09imsm-overlap.broken b/tests/09imsm-overlap.broken
new file mode 100644
index 0000000..e7ccab7
--- /dev/null
+++ b/tests/09imsm-overlap.broken
@@ -0,0 +1,7 @@
+always fails
+
+Fails with errors:
+
+ **Error**: Offset mismatch - expected 15360, actual 0
+ **Error**: Offset mismatch - expected 15360, actual 0
+ /dev/md/vol3 failed check
diff --git a/tests/10ddf-assemble-missing b/tests/10ddf-assemble-missing
new file mode 100644
index 0000000..4bf21b2
--- /dev/null
+++ b/tests/10ddf-assemble-missing
@@ -0,0 +1,61 @@
+# An array is assembled incompletely.
+# Re missing disks get marked as missing and are not allowed back in
+
+. tests/env-ddf-template
+tmp=$(mktemp /tmp/mdtest-XXXXXX)
+rm -f $tmp /var/tmp/mdmon.log
+ret=0
+
+mdadm -CR $container -e ddf -n 4 $dev8 $dev9 $dev10 $dev11
+ddf_check container 4
+
+mdadm -CR $member1 -n 4 -l 10 $dev8 $dev10 $dev9 $dev11 -z 10000
+mdadm -CR $member0 -n 2 -l 1 $dev8 $dev9 -z 10000
+
+mdadm --wait $member0 || true
+mdadm --wait $member1 || true
+
+mdadm -Ss
+sleep 1
+
+# Add all devices except those for $member0
+mdadm -I $dev10
+mdadm -I $dev11
+
+# Start runnable members
+mdadm -IRs || true
+mdadm -Ss
+
+#[ -f /var/tmp/mdmon.log ] && cat /var/tmp/mdmon.log
+
+# Now reassemble
+# This should work because BVDs weren't written to
+for d in $dev8 $dev9 $dev10 $dev11; do
+ mdadm -I $d
+done
+mdadm -Ss
+
+# Expect consistent state
+for d in $dev10 $dev11; do
+ mdadm -E $d>$tmp
+ egrep 'state\[0\] : Degraded, Consistent' $tmp || {
+ ret=1
+ echo ERROR: $member0 has unexpected state on $d
+ }
+ egrep 'state\[1\] : Optimal, Consistent' $tmp || {
+ ret=1
+ echo ERROR: $member1 has unexpected state on $d
+ }
+
+ if [ x$(egrep -c 'active/Online$' $tmp) != x2 ]; then
+ ret=1
+ echo ERROR: unexpected number of online disks on $d
+ fi
+done
+
+if [ $ret -ne 0 ]; then
+ mdadm -E $dev10
+ mdadm -E $dev8
+fi
+rm -f $tmp /var/tmp/mdmon.log
+[ $ret -eq 0 ]
diff --git a/tests/10ddf-assemble-missing.broken b/tests/10ddf-assemble-missing.broken
new file mode 100644
index 0000000..bfd8d10
--- /dev/null
+++ b/tests/10ddf-assemble-missing.broken
@@ -0,0 +1,6 @@
+always fails
+
+Fails with errors:
+
+ ERROR: /dev/md/vol0 has unexpected state on /dev/loop10
+ ERROR: unexpected number of online disks on /dev/loop10
diff --git a/tests/10ddf-create b/tests/10ddf-create
new file mode 100644
index 0000000..44e9544
--- /dev/null
+++ b/tests/10ddf-create
@@ -0,0 +1,89 @@
+#
+# Test basic DDF functionality.
+#
+# Create a container with 5 drives
+# create a small raid0 across them all,
+# then a small raid10 using 4 drives, then a 2disk raid1
+# and a 3disk raid5 using the remaining space
+#
+# add some data, tear down the array, reassemble
+# and make sure it is still there.
+set -e
+. tests/env-ddf-template
+sda=$(get_rootdev) || exit 1
+
+mdadm -CR /dev/md/ddf0 -e ddf -n 5 $dev8 $dev9 $dev10 $dev11 $dev12
+mdadm -CR r5 -l5 -n5 /dev/md/ddf0 -z 5000
+if mdadm -CR r5 -l1 -n2 /dev/md/ddf0 -z 5000
+then echo >&2 create with same name should fail ; exit 1
+fi
+mdadm -CR r10 -l10 -n4 -pn2 /dev/md/ddf0 -z 5000
+mdadm -CR r1 -l1 -n2 /dev/md/ddf0
+mdadm -CR r0 -l0 -n3 /dev/md/ddf0
+testdev /dev/md/r5 4 5000 512
+testdev /dev/md/r10 2 5000 512
+# r0/r10 will use 4608 due to chunk size, so that leaves 23552 for the rest
+testdev /dev/md/r1 1 23552 64
+testdev /dev/md/r0 3 23552 512
+dd if=$sda of=/dev/md/r0 || true
+dd if=$sda of=/dev/md/r10 || true
+dd if=$sda of=/dev/md/r1 || true
+dd if=$sda of=/dev/md/r5 || true
+
+s0=`sha1sum /dev/md/r0`
+s10=`sha1sum /dev/md/r10`
+s1=`sha1sum /dev/md/r1`
+s5=`sha1sum /dev/md/r5`
+
+
+mdadm -Ss
+mdadm -A /dev/md/ddf0 $dev8 $dev9 $dev10 $dev11 $dev12
+mdadm -I /dev/md/ddf0
+
+udevadm settle
+s0a=`sha1sum /dev/md/r0`
+s10a=`sha1sum /dev/md/r10`
+s1a=`sha1sum /dev/md/r1`
+s5a=`sha1sum /dev/md/r5`
+
+if [ "$s0" != "$s0a" ]; then
+ echo r0 did not match ; exit 1;
+fi
+if [ "$s10" != "$s10a" ]; then
+ echo r10 did not match ; exit 1;
+fi
+if [ "$s1" != "$s1a" ]; then
+ echo r1 did not match ; exit 1;
+fi
+if [ "$s5" != "$s5a" ]; then
+ echo r5 did not match ; exit 1;
+fi
+
+# failure status just means it has completed already, so ignore it.
+mdadm --wait /dev/md/r1 || true
+mdadm --wait /dev/md/r10 || true
+mdadm --wait /dev/md/r5 || true
+
+mdadm -Dbs > /var/tmp/mdadm.conf
+
+mdadm -Ss
+
+# Now try to assemble using mdadm.conf
+mdadm -Asc /var/tmp/mdadm.conf
+check nosync # This failed once. The raid5 was resyncing.
+udevadm settle
+mdadm -Dbs | sort > /tmp/mdadm.conf
+sort /var/tmp/mdadm.conf | diff /tmp/mdadm.conf -
+mdadm -Ss
+
+# and now assemble fully incrementally.
+for i in $dev8 $dev9 $dev10 $dev11 $dev12
+do
+ mdadm -I $i -c /var/tmp/mdadm.conf
+done
+check nosync
+udevadm settle
+mdadm -Dbs | sort > /tmp/mdadm.conf
+sort /var/tmp/mdadm.conf | diff /tmp/mdadm.conf -
+mdadm -Ss
+rm /tmp/mdadm.conf /var/tmp/mdadm.conf
diff --git a/tests/10ddf-create-fail-rebuild b/tests/10ddf-create-fail-rebuild
new file mode 100644
index 0000000..a8e8ced
--- /dev/null
+++ b/tests/10ddf-create-fail-rebuild
@@ -0,0 +1,77 @@
+# sanity check array creation
+
+ddf_check_hold() {
+ if mdadm --remove $1 $2; then
+ echo "$2 removal from $1 should have been blocked" >&2
+ cat /proc/mdstat >&2
+ mdadm -E $2
+ exit 1
+ fi
+}
+
+ddf_check_removal() {
+ if ! mdadm --remove $1 $2 ; then
+ echo "$2 removal from $1 should have succeeded" >&2
+ cat /proc/mdstat >&2
+ mdadm -E $2
+ exit 1
+ fi
+}
+
+. tests/env-ddf-template
+
+num_disks=2
+mdadm -CR $container -e ddf -n $num_disks $dev8 $dev9
+ddf_check container $num_disks
+
+# RAID0 + RAID1
+size=9000
+level=0
+chunk=64
+offset=0
+layout=0
+mdadm -CR $member0 $dev8 $dev9 -n $num_disks -l $level -z $size -c $chunk
+ddf_check member $member0 $num_disks $level $size $((size*2)) $offset $chunk $layout
+testdev $member0 $num_disks $size $chunk
+
+offset=$(((size & ~(chunk - 1))))
+size=4000
+level=1
+chunk=0
+mdadm -CR $member1 $dev8 $dev9 -n $num_disks -l $level -z $size
+ddf_check member $member1 $num_disks $level $size $size $offset $chunk $layout
+testdev $member1 1 $size 1
+check wait
+
+mdadm -Ss
+
+# RAID10 + RAID5
+num_disks=4
+mdadm -CR $container -e ddf -n $num_disks $dev8 $dev9 $dev10 $dev11
+ddf_check container $num_disks
+
+size=9000
+level=10
+chunk=64
+offset=0
+layout=2
+mdadm -CR $member0 $dev8 $dev9 $dev10 $dev11 -n $num_disks -l $level -z $size -c $chunk
+ddf_check member $member0 $num_disks $level $size $((size*2)) $offset $chunk $layout
+testdev $member0 $((num_disks-2)) $size $chunk
+
+offset=$(((size & ~(chunk - 1))))
+size=4000
+level=5
+mdadm -CR $member1 $dev8 $dev9 $dev10 $dev11 -n $num_disks -l $level -z $size -c $chunk
+ddf_check member $member1 $num_disks $level $size $((size*3)) $offset $chunk $layout
+testdev $member1 $((num_disks-1)) $size $chunk
+check wait
+
+# FAIL / REBUILD
+ddf_check_hold $container $dev8
+mdadm --fail $member0 $dev8
+mdadm --wait-clean --scan || true
+ddf_check_removal $container $dev8
+mdadm --add $container $dev12
+check wait
+ddf_check_hold $container $dev12
diff --git a/tests/10ddf-fail-create-race b/tests/10ddf-fail-create-race
new file mode 100644
index 0000000..bd5dfb5
--- /dev/null
+++ b/tests/10ddf-fail-create-race
@@ -0,0 +1,66 @@
+# This test creates a RAID1, fails a disk, and immediately
+# (simultaneously) creates a new array. This tests for a possible
+# race where the meta data reflecting the disk failure may not
+# be written when the 2nd array is created.
+. tests/env-ddf-template
+
+mdadm --zero-superblock $dev8 $dev9 $dev10 $dev11 $dev12 $dev13
+
+mdadm -CR $container -e ddf -l container -n 2 $dev11 $dev12
+#$dir/mdadm -CR $member0 -l raid1 -n 2 $container -z 10000 >/tmp/mdmon.txt 2>&1
+mdadm -CR $member0 -l raid1 -n 2 $container -z 10000
+check wait
+fail0=$dev11
+mdadm --fail $member0 $fail0 &
+
+# The test can succeed two ways:
+# 1) mdadm -C member1 fails - in this case the meta data
+# was already on disk when the create attempt was made
+# 2) mdadm -C succeeds in the first place (meta data not on disk yet),
+# but mdmon detects the problem and sets the disk faulty.
+
+if mdadm -CR $member1 -l raid1 -n 2 $container; then
+
+ echo create should have failed / race condition?
+
+ check wait
+ set -- $(get_raiddisks $member0)
+ d0=$1
+ ret=0
+ if [ $1 = $fail0 -o $2 = $fail0 ]; then
+ ret=1
+ else
+ set -- $(get_raiddisks $member1)
+ if [ $1 = $fail0 -o $2 = $fail0 ]; then
+ ret=1
+ fi
+ fi
+ if [ $ret -eq 1 ]; then
+ echo ERROR: failed disk $fail0 is still a RAID member
+ echo $member0: $(get_raiddisks $member0)
+ echo $member1: $(get_raiddisks $member1)
+ fi
+ tmp=$(mktemp /tmp/mdest-XXXXXX)
+ mdadm -E $d0 >$tmp
+ if [ x$(grep -c 'state\[[01]\] : Degraded' $tmp) != x2 ]; then
+ echo ERROR: non-degraded array found
+ mdadm -E $d0
+ ret=1
+ fi
+ if ! grep -q '^ *0 *[0-9a-f]\{8\} .*Offline, Failed' $tmp; then
+ echo ERROR: disk 0 not marked as failed in meta data
+ mdadm -E $d0
+ ret=1
+ fi
+ rm -f $tmp
+else
+ ret=0
+fi
+
+[ -f /tmp/mdmon.txt ] && {
+ cat /tmp/mdmon.txt
+ rm -f /tmp/mdmon.txt
+}
+
+[ $ret -eq 0 ]
+
diff --git a/tests/10ddf-fail-create-race.broken b/tests/10ddf-fail-create-race.broken
new file mode 100644
index 0000000..6c0df02
--- /dev/null
+++ b/tests/10ddf-fail-create-race.broken
@@ -0,0 +1,7 @@
+usually fails
+
+Fails about 9 out of 10 times with many errors:
+
+ mdadm: cannot open MISSING: No such file or directory
+ ERROR: non-degraded array found
+ ERROR: disk 0 not marked as failed in meta data
diff --git a/tests/10ddf-fail-readd b/tests/10ddf-fail-readd
new file mode 100644
index 0000000..9cd7893
--- /dev/null
+++ b/tests/10ddf-fail-readd
@@ -0,0 +1,55 @@
+# Simple fail / re-add test
+. tests/env-ddf-template
+
+tmp=$(mktemp /tmp/mdtest-XXXXXX)
+rm -f $tmp
+
+mdadm --zero-superblock $dev8 $dev9
+mdadm -CR $container -e ddf -l container -n 2 $dev8 $dev9
+
+mdadm -CR $member0 -l raid1 -n 2 $container
+#$dir/mdadm -CR $member0 -l raid1 -n 2 $container >/tmp/mdmon.txt 2>&1
+
+mke2fs -F $member0
+check wait
+
+set -- $(get_raiddisks $member0)
+fail0=$1
+mdadm $member0 --fail $fail0
+
+sleep 1
+mdadm $container --remove $fail0
+
+set -- $(get_raiddisks $member0)
+case $1 in MISSING) shift;; esac
+good0=$1
+
+# We re-add the disk now
+mdadm $container --add $fail0
+
+sleep 1
+mdadm --wait $member0 || true
+
+ret=0
+set -- $(get_raiddisks $member0)
+case $1:$2 in
+ $dev8:$dev9|$dev9:$dev8);;
+ *) echo ERROR: bad raid disks "$@"; ret=1;;
+esac
+
+mdadm -Ss
+for x in $@; do
+ mdadm -E $x >$tmp
+ if ! grep -q 'state\[0\] : Optimal, Consistent' $tmp; then
+ echo ERROR: member 0 should be optimal in meta data on $x
+ ret=1
+ fi
+done
+
+rm -f $tmp
+if [ $ret -ne 0 ]; then
+ mdadm -E $dev8
+ mdadm -E $dev9
+fi
+
+[ $ret -eq 0 ]
diff --git a/tests/10ddf-fail-readd-readonly b/tests/10ddf-fail-readd-readonly
new file mode 100644
index 0000000..6a74d9c
--- /dev/null
+++ b/tests/10ddf-fail-readd-readonly
@@ -0,0 +1,71 @@
+# Simple fail / re-add test
+. tests/env-ddf-template
+
+tmp=$(mktemp /tmp/mdtest-XXXXXX)
+rm -f $tmp
+
+mdadm --zero-superblock $dev8 $dev9
+mdadm -CR $container -e ddf -l container -n 2 $dev8 $dev9
+
+mdadm -CR $member0 -l raid1 -n 2 $container
+#$dir/mdadm -CR $member0 -l raid1 -n 2 $container >/tmp/mdmon.txt 2>&1
+
+check wait
+
+set -- $(get_raiddisks $member0)
+fail0=$1
+mdadm $member0 --fail $fail0
+
+sleep 1
+set -- $(get_raiddisks $member0)
+case $1 in MISSING) shift;; esac
+good0=$1
+
+# Check that the meta data now show one disk as failed
+ret=0
+for x in $@; do
+ mdadm -E $x >$tmp
+ if ! grep -q 'state\[0\] : Degraded, Consistent' $tmp; then
+ echo ERROR: member 0 should be degraded in meta data on $x
+ ret=1
+ fi
+ phys=$(grep $x $tmp)
+ case $x:$phys in
+ $fail0:*active/Offline,\ Failed);;
+ $good0:*active/Online);;
+ *) echo ERROR: wrong phys disk state for $x
+ ret=1
+ ;;
+ esac
+done
+
+mdadm $container --remove $fail0
+
+# We re-add the disk now
+mdadm $container --add $fail0
+
+sleep 1
+mdadm --wait $member0 || true
+
+set -- $(get_raiddisks $member0)
+case $1:$2 in
+ $dev8:$dev9|$dev9:$dev8);;
+ *) echo ERROR: bad raid disks "$@"; ret=1;;
+esac
+
+mdadm -Ss
+for x in $@; do
+ mdadm -E $x >$tmp
+ if ! grep -q 'state\[0\] : Optimal, Consistent' $tmp; then
+ echo ERROR: member 0 should be optimal in meta data on $x
+ ret=1
+ fi
+done
+
+rm -f $tmp
+if [ $ret -ne 0 ]; then
+ mdadm -E $dev8
+ mdadm -E $dev9
+fi
+
+[ $ret -eq 0 ]
diff --git a/tests/10ddf-fail-spare b/tests/10ddf-fail-spare
new file mode 100644
index 0000000..ab737ca
--- /dev/null
+++ b/tests/10ddf-fail-spare
@@ -0,0 +1,86 @@
+# Test suggested by Albert Pauw: Create, fail one disk, have mdmon
+# activate the spare,
+# then run create again. Shouldn't use the failed disk for Create,
+. tests/env-ddf-template
+
+tmp=$(mktemp /tmp/mdtest-XXXXXX)
+rm -f $tmp
+
+mdadm --zero-superblock $dev8 $dev9 $dev10 $dev11 $dev12 $dev13
+mdadm -CR $container -e ddf -l container -n 5 $dev8 $dev9 $dev10 $dev11 $dev12
+
+mdadm -CR $member0 -l raid1 -n 2 $container
+#$dir/mdadm -CR $member0 -l raid1 -n 2 $container >/tmp/mdmon.txt 2>&1
+
+check wait
+
+set -- $(get_raiddisks $member0)
+fail0=$1
+mdadm --fail $member0 $fail0
+
+# To make sure the spare is activated, we may have to sleep
+# 2s has always been enough for me
+sleep 2
+check wait
+
+# This test can succeed both ways - if spare was activated
+# before new array was created, we see only member 0.
+# otherwise, we see both, adn member0 is degraded because the
+# new array grabbed the spare
+# which case occurs depends on the sleep time above.
+ret=0
+if mdadm -CR $member1 -l raid5 -n 3 $container; then
+ # Creation successful - must have been quicker than spare activation
+
+ check wait
+ set -- $(get_raiddisks $member1)
+ if [ $1 = $fail0 -o $2 = $fail0 -o $3 = $fail0 ]; then
+ echo ERROR: $member1 must not contain $fail0: $@
+ ret=1
+ fi
+ d1=$1
+ mdadm -E $d1 >$tmp
+ if ! grep -q 'state\[1\] : Optimal, Consistent' $tmp; then
+ echo ERROR: member 1 should be optimal in meta data
+ ret=1
+ fi
+ state0=Degraded
+else
+ # Creation unsuccessful - spare was used for member 0
+ state0=Optimal
+fi
+
+# need to delay a little bit, sometimes the meta data aren't
+# up-to-date yet
+sleep 0.5
+set -- $(get_raiddisks $member0)
+if [ $1 = $fail0 -o $2 = $fail0 ]; then
+ echo ERROR: $member0 must not contain $fail0: $@
+ ret=1
+fi
+d0=$1
+
+[ -f $tmp ] || mdadm -E $d0 >$tmp
+
+if ! grep -q 'state\[0\] : '$state0', Consistent' $tmp; then
+ echo ERROR: member 0 should be $state0 in meta data
+ ret=1
+fi
+if ! grep -q 'Offline, Failed' $tmp; then
+ echo ERROR: Failed disk expected in meta data
+ ret=1
+fi
+if [ $ret -eq 1 ]; then
+ cat /proc/mdstat
+ mdadm -E $d0
+ mdadm -E $d1
+ mdadm -E $fail0
+fi
+
+[ -f /tmp/mdmon.txt ] && {
+ cat /tmp/mdmon.txt
+ rm -f /tmp/mdmon.txt
+}
+
+rm -f $tmp
+[ $ret -eq 0 ]
diff --git a/tests/10ddf-fail-stop-readd b/tests/10ddf-fail-stop-readd
new file mode 100644
index 0000000..f8ebe17
--- /dev/null
+++ b/tests/10ddf-fail-stop-readd
@@ -0,0 +1,66 @@
+# Simple fail / re-add test
+. tests/env-ddf-template
+
+tmp=$(mktemp /tmp/mdtest-XXXXXX)
+rm -f $tmp
+
+mdadm --zero-superblock $dev8 $dev9
+mdadm -CR $container -e ddf -l container -n 2 $dev8 $dev9
+
+mdadm -CR $member0 -l raid1 -n 2 $container
+#$dir/mdadm -CR $member0 -l raid1 -n 2 $container >/tmp/mdmon.txt 2>&1
+
+# Write to the array
+mke2fs -F $member0
+check wait
+
+set -- $(get_raiddisks $member0)
+fail0=$1
+mdadm $member0 --fail $fail0
+
+sleep 1
+mdadm $container --remove $fail0
+
+set -- $(get_raiddisks $member0)
+case $1 in MISSING) shift;; esac
+good0=$1
+
+mdadm -Ss
+
+sleep 1
+# Now simulate incremental assembly
+mdadm -I $good0
+mdadm -IRs || true
+
+# Write to the array
+mke2fs -F $member0
+
+# We re-add the disk now
+mdadm $container --add $fail0
+
+sleep 1
+mdadm --wait $member0 || true
+
+ret=0
+set -- $(get_raiddisks $member0)
+case $1:$2 in
+ $dev8:$dev9|$dev9:$dev8);;
+ *) echo ERROR: bad raid disks "$@"; ret=1;;
+esac
+
+mdadm -Ss
+for x in $@; do
+ mdadm -E $x >$tmp
+ if ! grep -q 'state\[0\] : Optimal, Consistent' $tmp; then
+ echo ERROR: member 0 should be optimal in meta data on $x
+ ret=1
+ fi
+done
+
+rm -f $tmp
+if [ $ret -ne 0 ]; then
+ mdadm -E $dev8
+ mdadm -E $dev9
+fi
+
+[ $ret -eq 0 ]
diff --git a/tests/10ddf-fail-twice b/tests/10ddf-fail-twice
new file mode 100644
index 0000000..6af1943
--- /dev/null
+++ b/tests/10ddf-fail-twice
@@ -0,0 +1,59 @@
+. tests/env-ddf-template
+
+num_disks=5
+mdadm -CR $container -e ddf -n $num_disks $dev8 $dev9 $dev10 $dev11 $dev12
+ddf_check container $num_disks
+
+mdadm -CR $member0 -n 2 -l 1 $container
+mdadm -CR $member1 -n 3 -l 5 $container
+
+mdadm --wait $member1 $member0 || mdadm --wait $member1 $member0 || true
+
+set -- $(get_raiddisks $member0)
+fail0=$1
+mdadm $member0 --fail $fail0
+set -- $(get_raiddisks $member1)
+fail1=$1
+mdadm $member1 --fail $fail1
+
+mdadm $container --add $dev13
+
+mdadm --wait $member1 $member0 || mdadm --wait $member1 $member0 || true
+
+
+devs0="$(get_raiddisks $member0)"
+devs1="$(get_raiddisks $member1)"
+
+present=$(($(get_present $member0) + $(get_present $member1)))
+[ $present -eq 4 ] || {
+ echo expected 4 present disks, got $present
+ devices for $member0: $devs0
+ devices for $member1: $devs1
+ exit 1
+}
+
+if echo "$devs0" | grep -q MISSING; then
+ good=1
+ bad=0
+else
+ good=0
+ bad=1
+fi
+
+# find a good device
+eval "set -- \$devs$good"
+check=$1
+
+tmp=$(mktemp /tmp/mdtest-XXXXXX)
+mdadm -E $check >$tmp
+
+{ grep -q 'state\['$bad'\] : Degraded, Consistent' $tmp &&
+ grep -q 'state\['$good'\] : Optimal, Consistent' $tmp; } || {
+ echo unexpected meta data state on $check
+ mdadm -E $check
+ rm -f $tmp
+ exit 1
+}
+
+rm -f $tmp
+exit 0
diff --git a/tests/10ddf-fail-two-spares b/tests/10ddf-fail-two-spares
new file mode 100644
index 0000000..e00810d
--- /dev/null
+++ b/tests/10ddf-fail-two-spares
@@ -0,0 +1,86 @@
+# Simulate two disks failing shorty after each other
+. tests/env-ddf-template
+sda=$(get_rootdev) || exit 1
+tmp=$(mktemp /tmp/mdtest-XXXXXX)
+
+mdadm --zero-superblock $dev8 $dev9 $dev10 $dev11 $dev12 $dev13
+mdadm -CR $container -e ddf -l container -n 6 \
+ $dev8 $dev9 $dev10 $dev11 $dev12 $dev13
+
+#fast_sync
+
+mdadm -CR $member0 -l raid6 -n 4 $dev10 $dev11 $dev12 $dev13 -z 16384
+#$dir/mdadm -CR $member0 -l raid6 -n 4 $dev10 $dev11 $dev12 $dev13 -z 16384 \
+# >/tmp/mdmon.txt 2>&1
+mdadm -CR $member1 -l raid10 -n 4 $dev10 $dev11 $dev12 $dev13 -z 16384
+
+dd if=$sda of=$member0 bs=1M count=32
+dd if=$sda of=$member1 bs=1M skip=16 count=16
+
+check wait
+
+sum0=$(sha1sum $member0)
+sum1=$(sha1sum $member1)
+
+mdadm --fail $member1 $dev11
+sleep 1
+mdadm --fail $member1 $dev12
+
+# We will have 4 resync procedures, 2 spares for 2 arrays.
+mdadm --wait $member1 $member0 || true
+mdadm --wait $member1 $member0 || true
+
+devs0="$(get_raiddisks $member0)"
+devs1="$(get_raiddisks $member1)"
+expected="$dev10
+$dev13
+$dev8
+$dev9"
+
+ret=0
+if [ "$(echo "$devs0" | sort)" != "$expected" \
+ -o "$(echo "$devs1" | sort)" != "$expected" ]; then
+ echo ERROR: unexpected members
+ echo $member0: $devs0
+ echo $member1: $devs1
+ ret=1
+fi
+
+mdadm -E $dev10 >$tmp
+if ! grep -q 'state\[0\] : Optimal, Consistent' $tmp; then
+ echo ERROR: $member0 should be optimal in meta data
+ ret=1
+fi
+if ! grep -q 'state\[1\] : Optimal, Consistent' $tmp; then
+ echo ERROR: $member1 should be optimal in meta data
+ ret=1
+fi
+if [ x"$(grep -c active/Online $tmp)" != x4 ]; then
+ echo ERROR: expected 4 online disks
+ ret=1
+fi
+if [ x"$(grep -c "Offline, Failed" $tmp)" != x2 ]; then
+ echo ERROR: expected 2 failed disks
+ ret=1
+fi
+
+sum0a=$(sha1sum $member0)
+sum1a=$(sha1sum $member1)
+
+if [ "$sum0" != "$sum0a" -o "$sum1" != "$sum1a" ]; then
+ echo ERROR: checksum mismatch
+ ret=1
+fi
+
+if [ $ret -eq 1 ]; then
+ cat /proc/mdstat
+ cat $tmp
+fi
+
+[ -f /tmp/mdmon.txt ] && {
+ cat /tmp/mdmon.txt
+ rm -f /tmp/mdmon.txt
+}
+rm -f $tmp
+
+[ $ret -eq 0 ]
diff --git a/tests/10ddf-fail-two-spares.broken b/tests/10ddf-fail-two-spares.broken
new file mode 100644
index 0000000..eeea56d
--- /dev/null
+++ b/tests/10ddf-fail-two-spares.broken
@@ -0,0 +1,5 @@
+fails infrequently
+
+Fails roughly 1 in 3 with error:
+
+ ERROR: /dev/md/vol1 should be optimal in meta data
diff --git a/tests/10ddf-geometry b/tests/10ddf-geometry
new file mode 100644
index 0000000..b0cce2f
--- /dev/null
+++ b/tests/10ddf-geometry
@@ -0,0 +1,82 @@
+#
+# Test various RAID geometries, creation and deletion of subarrays
+#
+
+assert_fail() {
+ if mdadm "$@"; then
+ echo mdadm "$@" must fail
+ return 1
+ else
+ return 0
+ fi
+}
+
+assert_kill() {
+ local dev=$1 n=$2
+ mdadm -S $dev
+ mdadm --kill-subarray=$n /dev/md/ddf0
+ if mdadm -Dbs | grep -q $dev; then
+ echo >&2 $dev should be deleted
+ return 1
+ fi
+ return 0
+}
+
+set -e
+mdadm -CR /dev/md/ddf0 -e ddf -n 6 $dev8 $dev9 $dev10 $dev11 $dev12 $dev13
+
+# RAID1 geometries
+# Use different sizes to make offset calculation harder
+mdadm -CR l1s -l1 -n2 /dev/md/ddf0 -z 8000
+mdadm -CR l1m -l1 -n3 $dev8 $dev9 $dev10 -z 10000
+assert_fail -CR badl1 -l1 -n4 /dev/md/ddf0
+
+# RAID10 geometries
+mdadm -CR l10_0 -l10 -n3 /dev/md/ddf0 -z 1000
+mdadm -CR l10_1 -l10 -n5 /dev/md/ddf0 -z 1000
+assert_fail mdadm -CR badl10 -l10 -n4 -pn3 /dev/md/ddf0
+mdadm -CR l10_2 -l10 -n6 -pn2 /dev/md/ddf0 -z 4000
+mdadm -CR l10_3 -l10 -n6 -pn3 /dev/md/ddf0 -z 4000
+
+assert_fail -CR l10_2 -l10 -n6 -pn2 /dev/md/ddf0 -z 5000
+assert_kill /dev/md/l10_2 4
+# gone now, must be able to create it again
+mdadm -CR l10_2 -l10 -n6 -pn2 /dev/md/ddf0 -z 5000
+
+# Now stop and reassemble
+mdadm -Ss
+mdadm -A /dev/md/ddf0 $dev8 $dev9 $dev10 $dev11 $dev12 $dev13
+
+# Same as above, on inactive container
+assert_fail -CR l10_3 -l10 -n6 -pn2 /dev/md/ddf0 -z 5000
+# Kill subarray without having started anything (no mdmon)
+mdadm --kill-subarray=5 /dev/md/ddf0
+mdadm -I /dev/md/ddf0
+mdadm -CR l10_3 -l10 -n6 -pn3 /dev/md/ddf0 -z 5000
+
+assert_kill /dev/md/l10_2 4
+assert_kill /dev/md/l10_3 5
+
+# RAID5 geometries
+mdadm -CR l5la -l5 -n3 --layout=ddf-N-restart /dev/md/ddf0 -z 5000
+mdadm -CR l5ra -l5 -n3 --layout=ddf-zero-restart /dev/md/ddf0 -z 5000
+mdadm -CR l5ls -l5 -n3 --layout=ddf-N-continue /dev/md/ddf0 -z 5000
+assert_fail -CR l5rs -l5 -n3 -prs /dev/md/ddf0 -z 5000
+
+# Stop and reassemble
+mdadm -Ss
+mdadm -A /dev/md/ddf0 $dev8 $dev9 $dev10 $dev11 $dev12 $dev13
+mdadm -I /dev/md/ddf0
+
+assert_kill /dev/md/l5la 4
+assert_kill /dev/md/l5ls 6
+assert_kill /dev/md/l5ra 5
+
+# RAID6 geometries
+assert_fail -CR l6la -l6 -n3 -pla /dev/md/ddf0 -z 5000
+assert_fail -CR l6rs -l5 -n4 -prs /dev/md/ddf0 -z 5000
+mdadm -CR l6la -l6 -n4 --layout=ddf-N-restart /dev/md/ddf0 -z 5000
+mdadm -CR l6ra -l6 -n4 --layout=ddf-zero-restart $dev8 $dev9 $dev10 $dev11 -z 5000
+mdadm -CR l6ls -l6 -n4 --layout=ddf-N-continue $dev13 $dev8 $dev9 $dev12 -z 5000
+
+mdadm -Ss
diff --git a/tests/10ddf-incremental-wrong-order b/tests/10ddf-incremental-wrong-order
new file mode 100644
index 0000000..9ecf6bc
--- /dev/null
+++ b/tests/10ddf-incremental-wrong-order
@@ -0,0 +1,131 @@
+# An array is assembled incompletely. Some disks will
+# have later metadata than others.
+# The array is then reassembled in the "wrong" order -
+# older meta data first.
+# This FAILS with mdadm 3.3
+. tests/env-ddf-template
+tmp=$(mktemp /tmp/mdtest-XXXXXX)
+rm -f $tmp /var/tmp/mdmon.log
+ret=0
+
+mdadm -CR $container -e ddf -n 4 $dev8 $dev9 $dev10 $dev11
+ddf_check container 4
+
+mdadm -CR $member1 -n 4 -l 10 $dev8 $dev10 $dev9 $dev11 -z 10000
+mdadm -CR $member0 -n 2 -l 1 $dev8 $dev9 -z 10000
+
+mdadm --wait $member0 || true
+mdadm --wait $member1 || true
+
+mke2fs -F $member0
+mke2fs -F $member1
+sha_0a=$(sha1_sum $member0)
+sha_1a=$(sha1_sum $member1)
+
+mdadm -Ss
+sleep 1
+
+# Add all devices except those for $member0
+mdadm -I $dev10
+mdadm -I $dev11
+
+# Start runnable members ($member1) and write
+mdadm -IRs || true
+e2fsck -fy $member1
+sha_1b=$(sha1_sum $member1)
+
+mdadm -Ss
+sleep 1
+
+# Seq number should be different now
+seq8a=$(mdadm -E $dev8 | sed -n 's/^ *Seq : //p')
+seq10a=$(mdadm -E $dev10 | sed -n 's/^ *Seq : //p')
+
+if [ $seq8a -ge $seq10a ]; then
+ ret=1
+ echo ERROR: sequential number of $dev10 not bigger than $dev8
+fi
+if [ x$sha_1a = x$sha_1b ]; then
+ ret=1
+ echo ERROR: sha1sums equal after write
+fi
+
+#[ -f /var/tmp/mdmon.log ] && cat /var/tmp/mdmon.log
+
+# Now reassemble
+# Note that we add the previously missing disks first.
+# $dev10 should have a higher seq number than $dev8
+for d in $dev8 $dev9 $dev10 $dev11; do
+ mdadm -I $d
+done
+
+mdadm -IRs || true
+sha_0c=$(sha1_sum $member0)
+sha_1c=$(sha1_sum $member1)
+
+mdadm -Ss
+sleep 1
+
+seq8c=$(mdadm -E $dev8 | sed -n 's/^ *Seq : //p')
+seq10c=$(mdadm -E $dev10 | sed -n 's/^ *Seq : //p')
+
+if [ x$sha_0a != x$sha_0c ]; then
+ ret=1
+ echo ERROR: sha1sum of $member0 has changed
+fi
+if [ x$sha_1b != x$sha_1c ]; then
+ ret=1
+ echo ERROR: sha1sum of $member1 has changed
+fi
+if [ \( $seq10a -ge $seq10c \) -o \( $seq8c -ne $seq10c \) ]; then
+ ret=1
+ echo ERROR: sequential numbers are wrong
+fi
+
+# Expect consistent state
+for d in $dev10 $dev8; do
+ mdadm -E $d>$tmp
+ for x in 0 1; do
+ egrep 'state\['$x'\] : Optimal, Consistent' $tmp || {
+ ret=1
+ echo ERROR: $member0 has unexpected state on $d
+ }
+ done
+ if [ x$(egrep -c 'active/Online$' $tmp) != x4 ]; then
+ ret=1
+ echo ERROR: unexpected number of online disks on $d
+ fi
+done
+
+# Now try assembly
+if mdadm -A $container $dev8 $dev9 $dev10 $dev11; then
+ mdadm -IR $container
+ sha_0d=$(sha1_sum $member0)
+ sha_1d=$(sha1_sum $member1)
+ mdadm -Ss
+ sleep 1
+ seq8d=$(mdadm -E $dev8 | sed -n 's/^ *Seq : //p')
+ seq10d=$(mdadm -E $dev10 | sed -n 's/^ *Seq : //p')
+ if [ x$sha_0a != x$sha_0d ]; then
+ ret=1
+ echo ERROR: sha1sum of $member0 has changed
+ fi
+ if [ x$sha_1b != x$sha_1d ]; then
+ ret=1
+ echo ERROR: sha1sum of $member1 has changed
+ fi
+ if [ \( $seq10a -ge $seq10d \) -o \( $seq8d -ne $seq10d \) ]; then
+ ret=1
+ echo ERROR: sequential numbers are wrong
+ fi
+else
+ ret=1
+ echo ERROR: assembly failed
+fi
+
+if [ $ret -ne 0 ]; then
+ mdadm -E $dev10
+ mdadm -E $dev8
+fi
+rm -f $tmp /var/tmp/mdmon.log
+[ $ret -eq 0 ]
diff --git a/tests/10ddf-incremental-wrong-order.broken b/tests/10ddf-incremental-wrong-order.broken
new file mode 100644
index 0000000..a5af3ba
--- /dev/null
+++ b/tests/10ddf-incremental-wrong-order.broken
@@ -0,0 +1,9 @@
+always fails
+
+Fails with errors:
+ ERROR: sha1sum of /dev/md/vol0 has changed
+ ERROR: /dev/md/vol0 has unexpected state on /dev/loop10
+ ERROR: unexpected number of online disks on /dev/loop10
+ ERROR: /dev/md/vol0 has unexpected state on /dev/loop8
+ ERROR: unexpected number of online disks on /dev/loop8
+ ERROR: sha1sum of /dev/md/vol0 has changed
diff --git a/tests/10ddf-sudden-degraded b/tests/10ddf-sudden-degraded
new file mode 100644
index 0000000..dc692ae
--- /dev/null
+++ b/tests/10ddf-sudden-degraded
@@ -0,0 +1,18 @@
+#
+# An array is assembled with one device missing.
+# The other device must be marked as Failed in metadata
+
+. tests/env-ddf-template
+
+mdadm -CR $container -e ddf -n 2 $dev8 $dev9
+ddf_check container 2
+
+mdadm -CR $member1 -n 2 -l1 $dev8 $dev9
+mdadm --wait $member1 || true
+mdadm -Ss
+
+mdadm -I $dev8
+mdadm -R $container
+mkfs $member1
+# There must be a missing device recorded
+mdadm --examine $dev8 | grep 'Raid Devices.*--' || exit 1
diff --git a/tests/11spare-migration b/tests/11spare-migration
new file mode 100644
index 0000000..24b6ec6
--- /dev/null
+++ b/tests/11spare-migration
@@ -0,0 +1,454 @@
+# Set of tests for autorebuild functionality using mdadm -F
+# To be able to test ddf one must have all loop devices of bigger size, with the ones
+# above number 7 bigger again by any amount (this is not changed for now as it
+# could affect other tests)
+
+export IMSM_DEVNAME_AS_SERIAL=1
+export IMSM_TEST_OROM=1
+export IMSM_NO_PLATFORM=1
+
+. tests/utils
+set -ex
+verbose="yes"
+sleeptime=10
+
+# if listfailed=yes then don't exit if test failed due to wrong
+# spare-migration and just print a list at the end. Other errors still
+# stop the test.
+# if listfailed=no then exit on first failure
+listfailed="yes"
+
+# start Monitor, set monitorpid
+# uses global scan variable
+# all parameters are numbers of devices to be monitored. only used when $scan="no"
+# eg. monitor 0 1 will start monitoring of containers c0, c1 and subarrays v0, v1
+monitor(){
+ [ -z $monitorpid ] || return
+ if [ "$scan" == "yes" ]; then
+ $mdadm -F -d 1 --scan --mail root@localhost -c $config &
+ monitorpid=$!
+ return
+ fi
+ unset mddevs
+ while [ -n "$1" ]
+ do
+ eval container=\$c$1
+ eval volumes=\$v$1
+ mddevs="$mddevs /dev/$container"
+ if [ "$container" != "$volumes" ]; then
+ for vol in $volumes; do
+ mddevs="$mddevs /dev/$vol"
+ done
+ fi
+ shift
+ done
+ if [ -n "$mddevs" ]; then
+ if [ "$verbose" != "yes" ]; then
+ $mdadm -F -d 1 $mddevs -c $config >&2 &
+ monitorpid=$!
+ else
+ $mdadm -F -t -d 1 $mddevs -c $config &
+ monitorpid=$!
+ fi
+ fi
+ [ "$verbose" != "yes" ] || echo $mddevs $monitorpid
+}
+
+test0()
+{
+dsc "Test 0: No config file, no spare should be moved"
+> $config
+setupdevs 0 0 1 $platform
+setupdevs 1 3 4 $platform
+monitor 0 1
+mdadm -a /dev/$c1 $dev2
+mdadm --fail /dev/$v0 $dev0
+# check that spare loop2 was not moved from container c1 to container c0
+chksparemoved $c1 $c0 $dev2 n
+tidyup
+}
+
+test0a()
+{
+dsc "Test 0a: No domains in config file, no spare should be moved"
+setupdevs 0 0 1 $platform
+setupdevs 1 3 4 $platform
+createconfig a
+monitor 0 1
+mdadm -a /dev/$c1 $dev2
+mdadm --fail /dev/$v0 $dev0
+# check that spare loop2 was not moved from container c1 to container c0
+chksparemoved $c1 $c0 $dev2 n
+tidyup
+}
+
+test1()
+{
+dsc "Test 1: Common domain, add disk to one container and fail first one in another container, spare should be moved"
+setupdevs 0 0 1 $platform
+setupdevs 1 3 4 $platform
+# create config file with arrays and common domain
+createconfig a
+createconfig domain-$platform $platform spare 0 1 2 3 4
+monitor 0 1
+mdadm -a /dev/$c1 $dev2
+mdadm --fail /dev/$v0 $dev0
+# check that spare loop2 was moved from container c1 to container c0
+chksparemoved $c1 $c0 $dev2
+tidyup
+}
+
+test1a()
+{
+dsc "Test 1a: Common domain, add disk to one container and fail second one in another container, spare should be moved"
+setupdevs 0 0 1 $platform
+setupdevs 1 3 4 $platform
+createconfig a
+createconfig domain-$platform $platform spare 0 1 2 3 4
+monitor 0 1
+mdadm -a /dev/$c1 $dev2
+mdadm --fail /dev/$v0 $dev1
+# check that spare loop2 was moved from container c1 to container c0
+chksparemoved $c1 $c0 $dev2
+tidyup
+}
+
+test2()
+{
+dsc "Test 2: Common domain, fail disk in one container and add one to another container, spare should be moved"
+setupdevs 0 0 1 $platform
+setupdevs 1 3 4 $platform
+createconfig a
+createconfig domain-$platform $platform spare 0 1 2 3 4
+monitor 0 1
+mdadm --fail /dev/$v0 $dev1
+mdadm -a /dev/$c1 $dev2
+chksparemoved $c1 $c0 $dev2
+tidyup
+}
+
+test3()
+{
+dsc "Test 3: Two domains, fail a disk in one domain, add a disk to another domain, the spare should not be moved"
+setupdevs 0 0 1 $platform
+setupdevs 1 3 4 $platform
+# create config file with 2 domains
+createconfig a
+createconfig domain-$platform"1" $platform spare 0 1 2
+createconfig domain-$platform"2" $platform spare 3 4 5
+monitor 0 1
+mdadm --fail /dev/$v0 $dev1
+mdadm -a /dev/$c1 $dev5
+chksparemoved $c1 $c0 $dev5 n
+tidyup
+}
+
+test4()
+{
+dsc "Test 4: One domain holds one container, fail a disk in domain, and add disk to a container not described by domain, move if metadata allows"
+setupdevs 0 0 1 $platform
+setupdevs 1 3 4 $platform
+createconfig a
+createconfig domain-$platform $platform spare 0 1 2
+monitor 0 1
+mdadm --fail /dev/$v0 $dev1
+mdadm -a /dev/$c1 $dev5
+unset shouldmove
+[ "$platform" == "imsm" ] || shouldmove="n"
+chksparemoved $c1 $c0 $dev5 $shouldmove
+tidyup
+}
+
+test5()
+{
+dsc "Test 5: Two domains, two containers in each domain"
+setupdevs 0 0 1 $platform
+setupdevs 1 3 4 $platform
+setupdevs 2 5 6 $platform
+setupdevs 3 8 10 $platform
+# 2 and 9 for spares
+createconfig a
+createconfig domain-$platform"1" $platform spare 0 1 2 3 4
+createconfig domain-$platform"2" $platform spare 5 6 8 9 10
+monitor 0 1 2 3
+test5a
+test5b
+test5c
+tidyup
+}
+
+test5a()
+{
+dsc "Test 5a: Two containers in each domain, add spare loop2 to domain1 and fail disk in the other domain, the spare should not be moved"
+mdadm -a /dev/$c0 $dev2
+mdadm --fail /dev/$v2 $dev5
+chksparemoved $c0 $c2 $dev2 n
+}
+
+test5b()
+{
+dsc "Test 5b: Fail disk in the same domain but different container, spare loop2 should be moved"
+mdadm --fail /dev/$v1 $dev3
+chksparemoved $c0 $c1 $dev2
+}
+
+test5c()
+{
+dsc "Test 5c: Add spare loop9 to different container in domain with degraded array, spare should be moved"
+mdadm -a /dev/$c3 $dev9
+chksparemoved $c3 $c2 $dev9
+}
+
+test6()
+{
+dsc "Test 6: One domain has two containers, fail a disk in one container, there is a spare in other container too small to use for rebuild"
+setupdevs 0 0 1 $platform
+setupdevs 1 8 9 $platform
+# all devices in one domain
+createconfig a
+createconfig domain-$platform $platform spare 0 1 2 8 9
+monitor 0 1
+mdadm -a /dev/$c0 $dev2
+mdadm --fail /dev/$v1 $dev8
+chksparemoved $c0 $c1 $dev2 n
+tidyup
+}
+
+test7()
+{
+dsc "Test 7: One domain, add small spare to container, fail disk in array, spare not used, add suitable spare to other container, spare should be moved"
+setupdevs 0 0 1 $platform
+setupdevs 1 8 9 $platform
+createconfig a
+createconfig domain-$platform $platform spare 0 1 2 8 9 10
+monitor 0 1
+mdadm -a /dev/$c1 $dev2
+mdadm --fail /dev/$v1 $dev8
+mdadm -a /dev/$c0 $dev10
+chksparemoved $c0 $c1 $dev10
+tidyup
+}
+
+
+test7a()
+{
+dsc "Test 7a: Small spare in parent, suitable one in other container, $dev2 in $c1 is not in common domain"
+setupdevs 0 0 1 $platform
+setupdevs 1 8 9 $platform
+#all $platform devices in one domain
+createconfig a
+createconfig domain-$platform"1" $platform spare 0 1 8 9 10
+createconfig domain-$platform"2" $platform spare 2
+monitor 0 1
+mdadm -a /dev/$c1 $dev2
+chkspare $c1 $dev2
+mdadm --fail /dev/$v1 $dev8
+mdadm -a /dev/$c0 $dev10
+chksparemoved $c0 $c1 $dev10
+tidyup
+}
+
+test8()
+{
+# ddf does not have getinfo_super_disks implemented so skip this test
+return
+dsc "Test 8: imsm and ddf - spare should not be migrated"
+setupdevs 0 10 11 imsm
+setupdevs 1 8 9 ddf
+createconfig a
+createconfig domain0 noplatform spare 8 9 10 11 12
+monitor 0 1
+mdadm -a /dev/$c1 $dev12
+mdadm --fail /dev/$v0 $dev10
+chksparemoved $c1 $c0 $dev12 n
+tidyup
+}
+
+test9()
+{
+dsc "Test 9: imsm and native 1.2 - one domain, no metadata specified, spare should be moved"
+setupdevs 0 10 11 imsm
+setupdevs 1 8 9 1.2
+createconfig a
+createconfig domain0 noplatform spare 8 9 10 11 12
+monitor 0 1
+mdadm -a /dev/$c1 $dev12
+mdadm --fail /dev/$v0 $dev10
+chksparemoved $c1 $c0 $dev12
+tidyup
+}
+
+test9a()
+{
+dsc "Test 9a: imsm and native 1.2 - spare in global domain, should be moved"
+setupdevs 0 10 11 imsm
+setupdevs 1 8 9 1.2
+createconfig a
+createconfig domain-global noplatform spare 8 9 10 11 12
+createconfig domain-1.2 1.2 spare 8 9
+createconfig domain-imsm imsm spare 10 11
+monitor 0 1
+mdadm -a /dev/$c1 $dev12
+mdadm --fail /dev/$v0 $dev10
+chksparemoved $c1 $c0 $dev12
+tidyup
+}
+
+test10()
+{
+dsc "Test 10: Two arrays on the same devices in container"
+setupdevs 0 0 1 $platform 10000
+setupdevs 1 3 4 $platform
+createconfig a
+createconfig domain-$platform $platform spare 0 1 2 3 4 5
+monitor 0 1
+mdadm -a /dev/$c1 $dev2
+mdadm --fail /dev/md/sub0_ $dev0
+chksparemoved $c1 $c0 $dev2
+if [ $failed -eq 0 ]; then
+# now fail the spare and see if we get another one
+ mdadm --fail /dev/md/sub0_ $dev2
+ mdadm -a /dev/$c1 $dev5
+ chksparemoved $c1 $c0 $dev5
+fi
+tidyup
+}
+
+test11()
+{
+dsc "Test 11: Failed spare from other container should not be used"
+setupdevs 0 0 1 $platform
+setupdevs 1 3 4 $platform
+createconfig a
+createconfig domain-$platform $platform spare 0 1 2 3 4
+monitor 0 1
+mdadm -a /dev/$c1 $dev2
+mdadm --fail /dev/$v1 $dev3
+#wait until recovery finishes so no degraded array in c1
+check wait
+mdadm --fail /dev/$v0 $dev0
+chksparemoved $c1 $c0 $dev3 n
+tidyup
+}
+
+test12()
+{
+dsc "Test 12: Only one spare should be taken for rebuild, second not needed"
+setupdevs 0 0 1 $platform
+setupdevs 1 3 4 $platform
+createconfig a
+createconfig domain-$platform $platform spare 0 1 2 3 4 5
+monitor 0 1
+mdadm -a /dev/$c1 $dev2
+mdadm -a /dev/$c1 $dev5
+mdadm --fail /dev/$v0 $dev0
+sleep $sleeptime
+chkarray $dev2 n
+sc1=$c
+chkarray $dev5 n
+sc2=$c
+[ "$sc1" != "$sc2" ] || err "both spares in the same container $sc1"
+tidyup
+}
+
+test13()
+{
+dsc "Test 13: Common domain, two containers, fail a disk in container, action is below spare, the spare should be moved regadless of action"
+setupdevs 0 0 1 $platform
+setupdevs 1 4 5 $platform
+# same domain but different action on 4 5 6
+createconfig a
+createconfig domain-$platform $platform spare 0 1
+createconfig domain-$platform $platform include 4 5 6
+monitor 0 1
+mdadm -a /dev/$c1 $dev6
+mdadm --fail /dev/$v0 $dev0
+chksparemoved $c1 $c0 $d6
+tidyup
+}
+
+test14()
+{
+dsc "Test 14: One domain, small array on big disks, check if small spare is accepted"
+setupdevs 0 8 9 $platform 10000 1
+setupdevs 1 0 1 $platform
+createconfig a
+createconfig domain-$platform $platform spare 0 1 2 8 9
+monitor 0 1
+mdadm -a /dev/$c1 $dev2
+mdadm --fail /dev/$v0 $dev9
+chksparemoved $c1 $c0 $d2
+tidyup
+}
+
+test15()
+{
+dsc "Test 15: spare in global domain for $platform metadata, should be moved"
+# this is like 9a but only one metadata used
+setupdevs 0 10 11 $platform
+setupdevs 1 8 9 $platform
+createconfig a
+createconfig domain-global $platform spare 8 9 10 11 12
+createconfig domain-1 $platform spare 8 9
+createconfig domain-2 $platform spare 10 11
+monitor 0 1
+mdadm -a /dev/$c1 $dev12
+mdadm --fail /dev/$v0 $dev10
+chksparemoved $c1 $c0 $dev12
+tidyup
+}
+
+try()
+{
+test0
+test0a
+test1
+test1a
+test2
+test3
+test4
+test5
+test6
+if [ "$platform" != "1.2" ]; then
+# this is because we can't have a small spare added to native array
+ test7
+ test7a
+fi
+test8
+test9
+test9a
+if [ "$platform" != "1.2" ]; then
+# we can't create two subarrays on the same devices for native (without
+# partitions)
+ test10
+fi
+test11
+test12
+test13
+test14
+test15
+}
+
+try_failed()
+{
+platform="1.2"
+scan="no"
+test5
+test9
+test13
+scan="yes"
+test9
+}
+
+#try_failed
+
+for scan in no yes; do
+ for platform in 1.2 imsm; do
+ try
+ done
+done
+
+[ $listfailed == "no" ] || [ -z $flist ] || echo -e "\n FAILED TESTS: $flist"
+
+#cat $targetdir/log
+rm -f /dev/disk/by-path/loop*
diff --git a/tests/12imsm-r0_2d-grow-r0_3d b/tests/12imsm-r0_2d-grow-r0_3d
new file mode 100644
index 0000000..3c6cf74
--- /dev/null
+++ b/tests/12imsm-r0_2d-grow-r0_3d
@@ -0,0 +1,20 @@
+. tests/env-imsm-template
+
+# RAID 0 volume, 2 disks grow to RAID 0 volume, 3 disks
+# POSITIVE test
+
+num_disks=2
+device_list="$dev0 $dev1"
+spare_list="$dev2"
+
+# Before: RAID 0 volume, 2 disks, 64k chunk size
+vol0_level=0
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$num_disks
+vol0_offset=0
+
+# After: RAID 0 volume, 3 disks, 64k chunk size
+vol0_new_num_comps=$((num_disks + 1))
+
+. tests/imsm-grow-template 0 0
diff --git a/tests/12imsm-r0_2d-grow-r0_4d b/tests/12imsm-r0_2d-grow-r0_4d
new file mode 100644
index 0000000..e4fccda
--- /dev/null
+++ b/tests/12imsm-r0_2d-grow-r0_4d
@@ -0,0 +1,20 @@
+. tests/env-imsm-template
+
+# RAID 0 volume, 2 disks grow to RAID 0 volume, 4 disks
+# POSITIVE test
+
+num_disks=2
+device_list="$dev0 $dev1"
+spare_list="$dev2 $dev3"
+
+# Before: RAID 0 volume, 2 disks, 64k chunk size
+vol0_level=0
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$num_disks
+vol0_offset=0
+
+# After: RAID 0 volume, 4 disks, 64k chunk size
+vol0_new_num_comps=$((num_disks + 2))
+
+. tests/imsm-grow-template 0 0
diff --git a/tests/12imsm-r0_2d-grow-r0_5d b/tests/12imsm-r0_2d-grow-r0_5d
new file mode 100644
index 0000000..388a5bb
--- /dev/null
+++ b/tests/12imsm-r0_2d-grow-r0_5d
@@ -0,0 +1,20 @@
+. tests/env-imsm-template
+
+# RAID 0 volume, 2 disks grow to RAID 0 volume, 5 disks
+# POSITIVE test
+
+num_disks=2
+device_list="$dev0 $dev1"
+spare_list="$dev2 $dev3 $dev4"
+
+# Before: RAID 0 volume, 2 disks, 64k chunk size
+vol0_level=0
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$num_disks
+vol0_offset=0
+
+# After: RAID 0 volume, 5 disks, 64k chunk size
+vol0_new_num_comps=$((num_disks + 3))
+
+. tests/imsm-grow-template 0 0
diff --git a/tests/12imsm-r0_3d-grow-r0_4d b/tests/12imsm-r0_3d-grow-r0_4d
new file mode 100644
index 0000000..7065f07
--- /dev/null
+++ b/tests/12imsm-r0_3d-grow-r0_4d
@@ -0,0 +1,20 @@
+. tests/env-imsm-template
+
+# RAID 0 volume, 3 disks grow to RAID 0 volume, 4 disks
+# POSITIVE test
+
+num_disks=3
+device_list="$dev0 $dev1 $dev2"
+spare_list="$dev3"
+
+# Before: RAID 0 volume, 3 disks, 64k chunk size
+vol0_level=0
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$num_disks
+vol0_offset=0
+
+# After: RAID 0 volume, 4 disks, 64k chunk size
+vol0_new_num_comps=$((num_disks + 1))
+
+. tests/imsm-grow-template 0 0
diff --git a/tests/12imsm-r5_3d-grow-r5_4d b/tests/12imsm-r5_3d-grow-r5_4d
new file mode 100644
index 0000000..097da0a
--- /dev/null
+++ b/tests/12imsm-r5_3d-grow-r5_4d
@@ -0,0 +1,20 @@
+. tests/env-imsm-template
+
+# RAID 5 volume, 3 disks grow to RAID 5 volume, 4 disks
+# POSITIVE test
+
+num_disks=3
+device_list="$dev0 $dev1 $dev2"
+spare_list="$dev3"
+
+# Before: RAID 5 volume, 3 disks, 64k chunk size
+vol0_level=5
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$((num_disks - 1))
+vol0_offset=0
+
+# After: RAID 5 volume, 4 disks, 64k chunk size
+vol0_new_num_comps=$num_disks
+
+. tests/imsm-grow-template 0 0
diff --git a/tests/12imsm-r5_3d-grow-r5_5d b/tests/12imsm-r5_3d-grow-r5_5d
new file mode 100644
index 0000000..2e5c7d2
--- /dev/null
+++ b/tests/12imsm-r5_3d-grow-r5_5d
@@ -0,0 +1,20 @@
+. tests/env-imsm-template
+
+# RAID 5 volume, 3 disks grow to RAID 5 volume, 5 disks
+# POSITIVE test
+
+num_disks=3
+device_list="$dev0 $dev1 $dev2"
+spare_list="$dev3 $dev4"
+
+# Before: RAID 5 volume, 3 disks, 64k chunk size
+vol0_level=5
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$((num_disks - 1))
+vol0_offset=0
+
+# After: RAID 5 volume, 5 disks, 64k chunk size
+vol0_new_num_comps=$((num_disks + 1))
+
+. tests/imsm-grow-template 0 0
diff --git a/tests/13imsm-r0_r0_2d-grow-r0_r0_4d b/tests/13imsm-r0_r0_2d-grow-r0_r0_4d
new file mode 100644
index 0000000..66ceeb3
--- /dev/null
+++ b/tests/13imsm-r0_r0_2d-grow-r0_r0_4d
@@ -0,0 +1,29 @@
+. tests/env-imsm-template
+
+# Grow the container (arrays inside) from 2 disks to 4 disks
+# POSITIVE test
+
+num_disks=2
+device_list="$dev0 $dev1"
+spare_list="$dev2 $dev3"
+
+# Before: RAID 0 volume in slot #0, 2 disks, 128k chunk size
+# RAID 0 volume in slot #1, 2 disks, 64k chunk size
+vol0_level=0
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=128
+vol0_num_comps=$num_disks
+vol0_offset=0
+
+vol1_level=0
+vol1_comp_size=$((5 * 1024))
+vol1_chunk=64
+vol1_num_comps=$num_disks
+vol1_offset=$((vol0_comp_size + 4096))
+
+# After: RAID 0 volume in slot #0, 4 disks, 128k chunk size
+# RAID 0 volume in slot #1, 4 disks, 64k chunk size
+vol0_new_num_comps=$((num_disks + 2))
+vol1_new_num_comps=$vol0_new_num_comps
+
+. tests/imsm-grow-template 0 0
diff --git a/tests/13imsm-r0_r0_2d-grow-r0_r0_5d b/tests/13imsm-r0_r0_2d-grow-r0_r0_5d
new file mode 100644
index 0000000..0da9ef3
--- /dev/null
+++ b/tests/13imsm-r0_r0_2d-grow-r0_r0_5d
@@ -0,0 +1,29 @@
+. tests/env-imsm-template
+
+# Grow both members from 2 disks to 5 disks
+# POSITIVE test
+
+num_disks=2
+device_list="$dev0 $dev1"
+spare_list="$dev2 $dev3 $dev4"
+
+# Before: RAID 0 volume in slot #0, 2 disks, 64k chunk size
+# RAID 0 volume in slot #1, 2 disks, 256k chunk size
+vol0_level=0
+vol0_comp_size=$((4 * 1024))
+vol0_chunk=64
+vol0_num_comps=$num_disks
+vol0_offset=0
+
+vol1_level=0
+vol1_comp_size=$((6 * 1024))
+vol1_chunk=256
+vol1_num_comps=$num_disks
+vol1_offset=$((vol0_comp_size + 4096))
+
+# After: RAID 0 volume in slot #0, 5 disks, 64k chunk size
+# RAID 0 volume in slot #1, 5 disks, 256k chunk size
+vol0_new_num_comps=$((num_disks + 3))
+vol1_new_num_comps=$vol0_new_num_comps
+
+. tests/imsm-grow-template 0 0
diff --git a/tests/13imsm-r0_r0_3d-grow-r0_r0_4d b/tests/13imsm-r0_r0_3d-grow-r0_r0_4d
new file mode 100644
index 0000000..1ff6025
--- /dev/null
+++ b/tests/13imsm-r0_r0_3d-grow-r0_r0_4d
@@ -0,0 +1,29 @@
+. tests/env-imsm-template
+
+# Grow a container (arrays inside) from 3 disks to 4 disks
+# POSITIVE test
+
+num_disks=3
+device_list="$dev0 $dev1 $dev2"
+spare_list="$dev3"
+
+# Before: RAID 0 volume in slot #0, 3 disks, 128k chunk size
+# RAID 0 volume in slot #1, 3 disks, 512k chunk size
+vol0_level=0
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=128
+vol0_num_comps=$num_disks
+vol0_offset=0
+
+vol1_level=0
+vol1_comp_size=$((5 * 1024))
+vol1_chunk=128
+vol1_num_comps=$num_disks
+vol1_offset=$((vol0_comp_size + 4096))
+
+# After: RAID0 volume in slot #0, 4 disks, 128k chunk size
+# RAID0 volume in slot #1, 4 disks, 512k chunk size
+vol0_new_num_comps=$((num_disks + 1))
+vol1_new_num_comps=$vol0_new_num_comps
+
+. tests/imsm-grow-template 0 0
diff --git a/tests/13imsm-r0_r5_3d-grow-r0_r5_4d b/tests/13imsm-r0_r5_3d-grow-r0_r5_4d
new file mode 100644
index 0000000..2977f36
--- /dev/null
+++ b/tests/13imsm-r0_r5_3d-grow-r0_r5_4d
@@ -0,0 +1,29 @@
+. tests/env-imsm-template
+
+# Grow the container (arrays inside) from 3 disks to 4 disks
+# POSITIVE test
+
+num_disks=3
+device_list="$dev0 $dev1 $dev2"
+spare_list="$dev3"
+
+# Before: RAID 0 volume in slot #0, 3 disks, 64k chunk size
+# RAID 5 volume in slot #1, 3 disks, 128k chunk size
+vol0_level=0
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$num_disks
+vol0_offset=0
+
+vol1_level=5
+vol1_comp_size=$((5 * 1024))
+vol1_chunk=128
+vol1_num_comps=$((num_disks - 1))
+vol1_offset=$((vol0_comp_size + 4096))
+
+# After: RAID 0 volume in slot #0, 4 disks, 64k chunk size
+# RAID 5 volume in slot #1, 4 disks, 128k chunk size
+vol1_new_num_comps=$num_disks
+vol0_new_num_comps=$((num_disks + 1))
+
+. tests/imsm-grow-template 0 0
diff --git a/tests/13imsm-r0_r5_3d-grow-r0_r5_5d b/tests/13imsm-r0_r5_3d-grow-r0_r5_5d
new file mode 100644
index 0000000..ff15ad0
--- /dev/null
+++ b/tests/13imsm-r0_r5_3d-grow-r0_r5_5d
@@ -0,0 +1,29 @@
+. tests/env-imsm-template
+
+# Grow the container (arrays inside) from 3 disks to 5 disks
+# POSITIVE test
+
+num_disks=3
+device_list="$dev0 $dev1 $dev2"
+spare_list="$dev3 $dev4"
+
+# Before: RAID 0 volume in slot #0, 3 disks, 256k chunk size
+# RAID 5 volume in slot #1, 3 disks, 512k chunk size
+vol0_level=0
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=128
+vol0_num_comps=$num_disks
+vol0_offset=0
+
+vol1_level=5
+vol1_comp_size=$((5 * 1024))
+vol1_chunk=128
+vol1_num_comps=$((num_disks - 1))
+vol1_offset=$((vol0_comp_size + 4096))
+
+# After: RAID 0 volume in slot #0, 5 disks, 256k chunk size
+# RAID 5 volume in slot #1, 5 disks, 512k chunk size
+vol0_new_num_comps=$((num_disks + 2))
+vol1_new_num_comps=$((num_disks + 1))
+
+. tests/imsm-grow-template 0 0
diff --git a/tests/13imsm-r5_r0_3d-grow-r5_r0_4d b/tests/13imsm-r5_r0_3d-grow-r5_r0_4d
new file mode 100644
index 0000000..9fed88a
--- /dev/null
+++ b/tests/13imsm-r5_r0_3d-grow-r5_r0_4d
@@ -0,0 +1,29 @@
+. tests/env-imsm-template
+
+# Grow the container (arrays inside) from 3 disks to 4 disks
+# POSITIVE test
+
+num_disks=3
+device_list="$dev0 $dev1 $dev2"
+spare_list="$dev3"
+
+# Before: RAID 5 volume in slot #0, 3 disks, 64k chunk size
+# RAID 0 volume in slot #1, 3 disks, 64k chunk size
+vol0_level=5
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$((num_disks - 1))
+vol0_offset=0
+
+vol1_level=0
+vol1_comp_size=$((5 * 1024))
+vol1_chunk=64
+vol1_offset=$((vol0_comp_size + 4096))
+vol1_num_comps=$num_disks
+
+# After: RAID 5 volume in slot #0, 4 disks, 64k chunk size
+# RAID 0 volume in slot #1, 4 disks, 64k chunk size
+vol0_new_num_comps=$num_disks
+vol1_new_num_comps=$((num_disks + 1))
+
+. tests/imsm-grow-template 0 0
diff --git a/tests/13imsm-r5_r0_3d-grow-r5_r0_5d b/tests/13imsm-r5_r0_3d-grow-r5_r0_5d
new file mode 100644
index 0000000..e8beddc
--- /dev/null
+++ b/tests/13imsm-r5_r0_3d-grow-r5_r0_5d
@@ -0,0 +1,29 @@
+. tests/env-imsm-template
+
+# Grow the container (arrays inside) from 3 disks to 5 disks
+# POSITIVE test
+
+num_disks=3
+device_list="$dev0 $dev1 $dev2"
+spare_list="$dev3 $dev4"
+
+# Before: RAID 5 volume in slot #0, 3 disks, 128k chunk size
+# RAID 0 volume in slot #1, 3 disks, 256k chunk size
+vol0_level=5
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$((num_disks - 1))
+vol0_offset=0
+
+vol1_level=0
+vol1_comp_size=$((5 * 1024))
+vol1_chunk=64
+vol1_offset=$((vol0_comp_size + 4096))
+vol1_num_comps=$num_disks
+
+# After: RAID 5 volume in slot #0, 5 disks, 128k chunk size
+# RAID 0 volume in slot #1, 5 disks, 256k chunk size
+vol0_new_num_comps=$((num_disks + 1))
+vol1_new_num_comps=$((num_disks + 2))
+
+. tests/imsm-grow-template 0 0
diff --git a/tests/14imsm-r0_3d-r5_3d-migrate-r5_4d-r5_4d b/tests/14imsm-r0_3d-r5_3d-migrate-r5_4d-r5_4d
new file mode 100644
index 0000000..cb7328a
--- /dev/null
+++ b/tests/14imsm-r0_3d-r5_3d-migrate-r5_4d-r5_4d
@@ -0,0 +1,29 @@
+. tests/env-imsm-template
+
+# RAID 0 and RAID 5 volumes (3 disks) migrate to RAID 5 and RAID 5 volumes (4 disks)
+# NEGATIVE test - migration is not allowed if there is more then one array in a container
+
+num_disks=3
+device_list="$dev0 $dev1 $dev2"
+spare_list="$dev3"
+
+# Before: RAID 0 volume, 3 disks, 64k chunk size, as member #0
+vol0_level=0
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$num_disks
+vol0_offset=0
+
+# Extra: RAID 5 volume, 3 disks, 64k chunk size, as member #1
+vol1_level=5
+vol1_comp_size=$((5 * 1024))
+vol1_chunk=64
+vol1_num_comps=$((num_disks - 1))
+vol1_offset=$((vol0_comp_size + 4096))
+
+# After: RAID 5 volume, 4 disks, 64k chunk size (only member #0)
+vol0_new_level=5
+vol0_new_num_comps=$num_disks
+vol0_new_chunk=64
+
+. tests/imsm-grow-template 1 1
diff --git a/tests/14imsm-r0_3d_no_spares-migrate-r5_3d b/tests/14imsm-r0_3d_no_spares-migrate-r5_3d
new file mode 100644
index 0000000..10bbab6
--- /dev/null
+++ b/tests/14imsm-r0_3d_no_spares-migrate-r5_3d
@@ -0,0 +1,21 @@
+. tests/env-imsm-template
+
+# RAID 0 volume (3 disks, no spares) migrate to RAID 5 volume (3 disks)
+# NEGATIVE test
+
+num_disks=3
+device_list="$dev0 $dev1 $dev2"
+
+# Before: RAID 0 volume, 3 disks, 64k chunk size
+vol0_level=0
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$num_disks
+vol0_offset=0
+
+# After: RAID 5, 3 disks, 64k chunk size
+vol0_new_level=5
+vol0_new_num_comps=$((num_disks - 1))
+vol0_new_chunk=64
+
+. tests/imsm-grow-template 1
diff --git a/tests/14imsm-r0_r0_2d-takeover-r10_4d b/tests/14imsm-r0_r0_2d-takeover-r10_4d
new file mode 100644
index 0000000..d068abb
--- /dev/null
+++ b/tests/14imsm-r0_r0_2d-takeover-r10_4d
@@ -0,0 +1,30 @@
+. tests/env-imsm-template
+
+
+# Two RAID 0 volumes (2 disks) migrate to RAID 10 volume (4 disks)
+# NEGATIVE test
+
+num_disks=2
+device_list="$dev0 $dev1"
+
+# Before: RAID 0 volume in slot #0, 2 disks, 64k chunk size
+# RAID 0 volume in slot #1, 2 disks, 64k chunk size
+vol0_level=0
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$num_disks
+vol0_offset=0
+
+# Before: RAID 0 volume, disks, 64k chunk size
+vol1_level=0
+vol1_comp_size=$((5 * 1024))
+vol1_chunk=64
+vol1_num_comps=num_disks
+vol1_offset=$(( $vol0_comp_size + 4096 ))
+
+# After: RAID 10, 4 disks, 64k chunk size
+vol0_new_level=10
+vol0_new_num_comps=$((num_disks - 1))
+vol0_new_chunk=64
+
+. tests/imsm-grow-template 1 1
diff --git a/tests/14imsm-r10_4d-grow-r10_5d b/tests/14imsm-r10_4d-grow-r10_5d
new file mode 100644
index 0000000..bcbe147
--- /dev/null
+++ b/tests/14imsm-r10_4d-grow-r10_5d
@@ -0,0 +1,20 @@
+. tests/env-imsm-template
+
+# RAID 10 volume, 4 disks grow to RAID 10 volume, 5 disks
+# NEGATIVE test
+
+num_disks=4
+device_list="$dev0 $dev1 $dev2 $dev3"
+spare_list="$dev4"
+
+# Before: RAID 10 volume, 4 disks, 128k chunk size
+vol0_level=10
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=128
+vol0_num_comps=$((num_disks - 2))
+vol0_offset=0
+
+# After: RAID 10 volume, 5 disks, 128k chunks size (test should fail)
+vol0_new_num_comps=$((num_disks + 1))
+
+. tests/imsm-grow-template 1 0
diff --git a/tests/14imsm-r10_r5_4d-takeover-r0_2d b/tests/14imsm-r10_r5_4d-takeover-r0_2d
new file mode 100644
index 0000000..720e575
--- /dev/null
+++ b/tests/14imsm-r10_r5_4d-takeover-r0_2d
@@ -0,0 +1,30 @@
+. tests/env-imsm-template
+
+
+# Two RAID volumes: RAID10 and RAID5 (4 disks) migrate to RAID 0 volume (2 disks)
+# NEGATIVE test
+
+num_disks=4
+device_list="$dev0 $dev1 $dev2 $dev3"
+
+# Before: RAID 10 volume in slot #0, 4 disks, 64k chunk size
+# RAID 5 volume in slot #1, 4 disks, 64k chunk size
+vol0_level=10
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$(( $num_disks - 2 ))
+vol0_offset=0
+
+# Before: RAID 0 volume, disks, 64k chunk size
+vol1_level=5
+vol1_comp_size=$((5 * 1024))
+vol1_chunk=64
+vol1_num_comps=$(( $num_disks - 1 ))
+vol1_offset=$(( $vol0_comp_size + 4096 ))
+
+# After: RAID 10, 4 disks, 64k chunk size
+vol0_new_level=0
+vol0_new_num_comps=2
+vol0_new_chunk=64
+
+. tests/imsm-grow-template 1 1
diff --git a/tests/14imsm-r1_2d-grow-r1_3d b/tests/14imsm-r1_2d-grow-r1_3d
new file mode 100644
index 0000000..be20ab8
--- /dev/null
+++ b/tests/14imsm-r1_2d-grow-r1_3d
@@ -0,0 +1,19 @@
+. tests/env-imsm-template
+
+# RAID 1 volume, 2 disks grow to RAID 1 volume, 3 disks
+# NEGATIVE test
+
+num_disks=2
+device_list="$dev0 $dev1"
+spare_list="$dev4"
+
+# Before: RAID 1 volume, 2 disks, 64k chunk size
+vol0_level=1
+vol0_comp_size=$((5 * 1024))
+vol0_num_comps=$((num_disks - 1))
+vol0_offset=0
+
+# After: RAID 1 volume, 3 disks, 64k chunks size (test should fail)
+vol0_new_num_comps=$num_disks
+
+. tests/imsm-grow-template 1 0
diff --git a/tests/14imsm-r1_2d-grow-r1_3d.broken b/tests/14imsm-r1_2d-grow-r1_3d.broken
new file mode 100644
index 0000000..4ef1d40
--- /dev/null
+++ b/tests/14imsm-r1_2d-grow-r1_3d.broken
@@ -0,0 +1,5 @@
+always fails
+
+Fails with error:
+
+ mdadm/tests/func.sh: line 325: dvsize/chunk: division by 0 (error token is "chunk")
diff --git a/tests/14imsm-r1_2d-takeover-r0_2d b/tests/14imsm-r1_2d-takeover-r0_2d
new file mode 100644
index 0000000..27002e1
--- /dev/null
+++ b/tests/14imsm-r1_2d-takeover-r0_2d
@@ -0,0 +1,21 @@
+. tests/env-imsm-template
+
+# RAID 1 volume, 2 disks change to RAID 0 volume, 2 disks
+#
+#NEGATIVE test
+
+num_disks=2
+device_list="$dev0 $dev1"
+
+# Before: RAID 1 volume, 2 disks, 64k chunk size
+vol0_level=1
+vol0_comp_size=$((5 * 1024))
+vol0_num_comps=$((num_disks - 1))
+vol0_offset=0
+
+# After: RAID 0 volume, 2 disks, 64k chunk size
+vol0_new_level=0
+vol0_new_num_comps=$num_disks
+vol0_new_chunk=64
+
+. tests/imsm-grow-template 1
diff --git a/tests/14imsm-r1_2d-takeover-r0_2d.broken b/tests/14imsm-r1_2d-takeover-r0_2d.broken
new file mode 100644
index 0000000..89cd4e5
--- /dev/null
+++ b/tests/14imsm-r1_2d-takeover-r0_2d.broken
@@ -0,0 +1,6 @@
+always fails
+
+Fails with error:
+
+ tests/func.sh: line 325: dvsize/chunk: division by 0 (error token
+ is "chunk")
diff --git a/tests/14imsm-r5_3d-grow-r5_5d-no-spares b/tests/14imsm-r5_3d-grow-r5_5d-no-spares
new file mode 100644
index 0000000..ed18e72
--- /dev/null
+++ b/tests/14imsm-r5_3d-grow-r5_5d-no-spares
@@ -0,0 +1,20 @@
+. tests/env-imsm-template
+
+# RAID 5 volume, 3 disks grow to RAID 5 volume, 4 disks
+# NEGATIVE test
+
+num_disks=3
+device_list="$dev0 $dev1 $dev2"
+
+# Before: RAID 5 volume, 3 disks, 64k chunk size
+vol0_level=5
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$((num_disks - 1))
+vol0_offset=0
+
+# After: RAID 5 volume, 4 disks, 64k chunks size
+add_to_num_disks=2
+vol0_new_num_comps=$((num_disks + 2))
+
+. tests/imsm-grow-template 1 0
diff --git a/tests/14imsm-r5_3d-migrate-r4_3d b/tests/14imsm-r5_3d-migrate-r4_3d
new file mode 100644
index 0000000..e3b971c
--- /dev/null
+++ b/tests/14imsm-r5_3d-migrate-r4_3d
@@ -0,0 +1,21 @@
+. tests/env-imsm-template
+
+# RAID 5 volume (3 disks) migrate to RAID 4 volume (3 disks)
+# NEGATIVE test
+
+num_disks=3
+device_list="$dev0 $dev1 $dev2"
+
+# Before: RAID 5 volume, 3 disks, 64k chunk size
+vol0_level=5
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$((num_disks - 1))
+vol0_offset=0
+
+# After: RAID 4, 3 disks, 64k chunk size
+vol0_new_level=4
+vol0_new_num_comps=$((num_disks - 1))
+vol0_new_chunk=64
+
+. tests/imsm-grow-template 1
diff --git a/tests/15imsm-r0_3d_64k-migrate-r0_3d_256k b/tests/15imsm-r0_3d_64k-migrate-r0_3d_256k
new file mode 100644
index 0000000..4fe3807
--- /dev/null
+++ b/tests/15imsm-r0_3d_64k-migrate-r0_3d_256k
@@ -0,0 +1,21 @@
+. tests/env-imsm-template
+
+# RAID 0 volume, Migration from 64k to 256k chunk size.
+# POSITIVE test
+
+num_disks=2
+device_list="$dev0 $dev1"
+
+# RAID 0, 2 disks, 64k chunk size
+vol0_level=0
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$num_disks
+vol0_offset=0
+
+# RAID 0, 2 disks, 256k chunk size
+vol0_new_level=0
+vol0_new_num_comps=$vol0_num_comps
+vol0_new_chunk=256
+
+. tests/imsm-grow-template 0 1
diff --git a/tests/15imsm-r5_3d_4k-migrate-r5_3d_256k b/tests/15imsm-r5_3d_4k-migrate-r5_3d_256k
new file mode 100644
index 0000000..025e9ef
--- /dev/null
+++ b/tests/15imsm-r5_3d_4k-migrate-r5_3d_256k
@@ -0,0 +1,21 @@
+. tests/env-imsm-template
+
+# RAID 5 volume, Migration from 4k to 256 chunk size.
+# POSITIVE test
+
+num_disks=3
+device_list="$dev0 $dev1 $dev2"
+
+# RAID 5, 3 disks, 4k chunk size
+vol0_level=5
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=4
+vol0_num_comps=$((num_disks - 1))
+vol0_offset=0
+
+# RAID 5, 3 disks, 256k chunk size
+vol0_new_level=5
+vol0_new_num_comps=$vol0_num_comps
+vol0_new_chunk=256
+
+. tests/imsm-grow-template 0 1
diff --git a/tests/15imsm-r5_3d_64k-migrate-r5_3d_256k b/tests/15imsm-r5_3d_64k-migrate-r5_3d_256k
new file mode 100644
index 0000000..37547b7
--- /dev/null
+++ b/tests/15imsm-r5_3d_64k-migrate-r5_3d_256k
@@ -0,0 +1,21 @@
+. tests/env-imsm-template
+
+# RAID 5 volume, Migration from 64k to 256k chunk size.
+# POSITIVE test
+
+num_disks=3
+device_list="$dev0 $dev1 $dev2"
+
+# RAID 5, 3 disks, 64k chunk size
+vol0_level=5
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$((num_disks - 1))
+vol0_offset=0
+
+# RAID 5, 3 disks, 256k chunk size
+vol0_new_level=5
+vol0_new_num_comps=$vol0_num_comps
+vol0_new_chunk=256
+
+. tests/imsm-grow-template 0 1
diff --git a/tests/15imsm-r5_6d_4k-migrate-r5_6d_256k b/tests/15imsm-r5_6d_4k-migrate-r5_6d_256k
new file mode 100644
index 0000000..d2f6c70
--- /dev/null
+++ b/tests/15imsm-r5_6d_4k-migrate-r5_6d_256k
@@ -0,0 +1,21 @@
+. tests/env-imsm-template
+
+# RAID 5 volume, Migration from 4k to 256k chunk size.
+# POSITIVE test
+
+num_disks=6
+device_list="$dev0 $dev1 $dev2 $dev3 $dev4 $dev5"
+
+# RAID 5, 6 disks, 4k chunk size
+vol0_level=5
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=4
+vol0_num_comps=$((num_disks - 1))
+vol0_offset=0
+
+# RAID 5, 6 disks, 256k chunk size
+vol0_new_level=5
+vol0_new_num_comps=$vol0_num_comps
+vol0_new_chunk=256
+
+. tests/imsm-grow-template 0 1
diff --git a/tests/15imsm-r5_r0_3d_64k-migrate-r5_r0_3d_256k b/tests/15imsm-r5_r0_3d_64k-migrate-r5_r0_3d_256k
new file mode 100644
index 0000000..f9369d5
--- /dev/null
+++ b/tests/15imsm-r5_r0_3d_64k-migrate-r5_r0_3d_256k
@@ -0,0 +1,34 @@
+. tests/env-imsm-template
+
+# Member 0: RAID 5 volume, Member 1: RAID 0 volume
+# Migration from 64k to 256k chunk size (both members)
+# POSITIVE test
+
+num_disks=3
+device_list="$dev0 $dev1 $dev2"
+
+# RAID 5, 3 disks, 64k chunk size
+vol0_level=5
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$((num_disks - 1))
+vol0_offset=0
+
+# After migration parameters
+vol0_new_level=5
+vol0_new_num_comps=$vol0_num_comps
+vol0_new_chunk=256
+
+# RAID 0, 3 disks, 64k chunk size
+vol1_level=0
+vol1_comp_size=$((5 * 1024))
+vol1_chunk=64
+vol1_num_comps=$num_disks
+vol1_offset=$((vol0_comp_size + 4096))
+
+# After migration paramters
+vol1_new_level=0
+vol1_new_num_comps=$vol1_num_comps
+vol1_new_chunk=256
+
+. tests/imsm-grow-template 0 1
diff --git a/tests/16imsm-r0_3d-migrate-r5_4d b/tests/16imsm-r0_3d-migrate-r5_4d
new file mode 100644
index 0000000..265adf9
--- /dev/null
+++ b/tests/16imsm-r0_3d-migrate-r5_4d
@@ -0,0 +1,22 @@
+. tests/env-imsm-template
+
+# RAID 0 volume (3 disks) migrate to RAID 5 volume (4 disks)
+# POSITIVE test
+
+num_disks=3
+device_list="$dev0 $dev1 $dev2"
+
+# Before: RAID 0, 3 disks, 64k chunk size
+vol0_level=0
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$num_disks
+vol0_offset=0
+
+# After: RAID 5, 4 disks, 64k chunk size
+vol0_new_level=5
+new_num_disks=4
+vol0_new_num_comps=$num_disks
+vol0_new_chunk=64
+
+. tests/imsm-grow-template 0 1
diff --git a/tests/16imsm-r0_5d-migrate-r5_6d b/tests/16imsm-r0_5d-migrate-r5_6d
new file mode 100644
index 0000000..535b609
--- /dev/null
+++ b/tests/16imsm-r0_5d-migrate-r5_6d
@@ -0,0 +1,22 @@
+. tests/env-imsm-template
+
+# RAID 0 volume (5 disks) migrate to RAID 5 volume (6 disks)
+# POSITIVE test
+
+num_disks=5
+device_list="$dev0 $dev1 $dev2 $dev3 $dev4"
+
+# Before: RAID 0, 5 disks, 64k chunk size
+vol0_level=0
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$num_disks
+vol0_offset=0
+
+# After: RAID 5, 6 disks, 64k chunk size
+vol0_new_level=5
+vol0_new_num_comps=$num_disks
+vol0_new_chunk=64
+new_num_disks=6
+
+. tests/imsm-grow-template 0 1
diff --git a/tests/16imsm-r5_3d-migrate-r0_3d b/tests/16imsm-r5_3d-migrate-r0_3d
new file mode 100644
index 0000000..bcb5709
--- /dev/null
+++ b/tests/16imsm-r5_3d-migrate-r0_3d
@@ -0,0 +1,21 @@
+. tests/env-imsm-template
+
+# RAID 5 volume (3 disks) migrate to RAID 0 volume (2 disks)
+# NEGATIVE test
+
+num_disks=3
+device_list="$dev0 $dev1 $dev2"
+
+# Before: RAID 5, 3 disks, 64k chunk size
+vol0_level=5
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$((num_disks - 1))
+vol0_offset=0
+
+# After: RAID 0, 3 disks, 64k chunk size
+vol0_new_level=0
+vol0_new_num_comps=$((num_disks-1))
+vol0_new_chunk=64
+
+. tests/imsm-grow-template 1 1
diff --git a/tests/16imsm-r5_5d-migrate-r0_5d b/tests/16imsm-r5_5d-migrate-r0_5d
new file mode 100644
index 0000000..ca77435
--- /dev/null
+++ b/tests/16imsm-r5_5d-migrate-r0_5d
@@ -0,0 +1,21 @@
+. tests/env-imsm-template
+
+# RAID 5 volume (5 disks) migration to RAID 0 volume (4 disks)
+# NEGATIVE test
+
+num_disks=5
+device_list="$dev0 $dev1 $dev2 $dev3 $dev4"
+
+# Before: RAID 5 volume, 5 disks, 64k chunk size
+vol0_level=5
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=64
+vol0_num_comps=$((num_disks - 1))
+vol0_offset=0
+
+# After: RAID 0 volume, 5 disks, 64k chunk size
+vol0_new_level=0
+vol0_new_num_comps=$((num_disks - 1))
+vol0_new_chunk=64
+
+. tests/imsm-grow-template 1 1
diff --git a/tests/18imsm-1d-takeover-r0_1d b/tests/18imsm-1d-takeover-r0_1d
new file mode 100644
index 0000000..6f5cf5a
--- /dev/null
+++ b/tests/18imsm-1d-takeover-r0_1d
@@ -0,0 +1,22 @@
+. tests/env-imsm-template
+
+# Create RAID 0 from a single disk.
+# POSITIVE test
+
+vol0_num_comps=1
+vol0_comp_size=$((10 * 1024))
+
+# Create container
+mdadm --create --run $container --auto=md --metadata=imsm --force --raid-disks=$vol0_num_comps $dev0
+check wait
+imsm_check container $vol0_num_comps
+
+# Create RAID 0 volume
+mdadm --create --run $member0 --auto=md --level=0 --size=$vol0_comp_size --chunk=64 --force --raid-disks=$vol0_num_comps $dev0
+check wait
+
+# Test the member
+imsm_check member $member0 $vol0_num_comps 0 $vol0_comp_size $((vol0_num_comps * vol0_comp_size)) 0 64
+testdev $member0 $vol0_num_comps $vol0_comp_size 64
+
+exit 0
diff --git a/tests/18imsm-1d-takeover-r1_2d b/tests/18imsm-1d-takeover-r1_2d
new file mode 100644
index 0000000..e38ed89
--- /dev/null
+++ b/tests/18imsm-1d-takeover-r1_2d
@@ -0,0 +1,20 @@
+. tests/env-imsm-template
+
+# Create RAID 1 from a single disk
+# POSITIVE test
+
+vol0_num_comps=1
+vol0_comp_size=$((10 * 1024))
+
+# Create container
+mdadm --create --run $container --auto=md --metadata=imsm --force --raid-disks=$vol0_num_comps $dev0
+check wait
+imsm_check container $vol0_num_comps
+
+# Create RAID 1 volume
+mdadm --create --run $member0 --auto=md --level=1 --size=$vol0_comp_size --raid-disks=$((vol0_num_comps + 1)) $dev0 missing
+check wait
+
+# Test the member0
+imsm_check member $member0 $((vol_num_comps + 1)) 1 $vol0_comp_size $((vol0_num_comps * vol0_comp_size)) 0 64
+testdev $member0 $vol0_num_comps $vol0_comp_size 64
diff --git a/tests/18imsm-r0_2d-takeover-r10_4d b/tests/18imsm-r0_2d-takeover-r10_4d
new file mode 100644
index 0000000..0e77e5d
--- /dev/null
+++ b/tests/18imsm-r0_2d-takeover-r10_4d
@@ -0,0 +1,22 @@
+. tests/env-imsm-template
+
+# RAID 0 volume, 2 disks change to RAID 10 volume, 4 disks
+# POSITIVE test
+
+num_disks=2
+device_list="$dev0 $dev1"
+spare_list="$dev2 $dev3"
+
+# Before: RAID 0 volume, 2 disks, 256k chunk size
+vol0_level=0
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=128
+vol0_num_comps=$num_disks
+vol0_offset=0
+
+# After: RAID 10 volume, 4 disks, 256k chunk size
+vol0_new_level=10
+vol0_new_num_comps=$vol0_num_comps
+vol0_new_chunk=128
+
+. tests/imsm-grow-template 0 1
diff --git a/tests/18imsm-r10_4d-takeover-r0_2d b/tests/18imsm-r10_4d-takeover-r0_2d
new file mode 100644
index 0000000..8a9606b
--- /dev/null
+++ b/tests/18imsm-r10_4d-takeover-r0_2d
@@ -0,0 +1,22 @@
+. tests/env-imsm-template
+
+# RAID 10 volume, 4 disks change to RAID 0 volume, 2 disks
+# POSITIVE test
+
+num_disks=4
+device_list="$dev0 $dev1 $dev2 $dev3"
+
+# Before: RAID 10 volume, 4 disks, 128k chunk size
+vol0_level=10
+vol0_comp_size=$((5 * 1024))
+vol0_chunk=128
+vol0_num_comps=$((num_disks - 2))
+vol0_offset=0
+
+# After: RAID 0 volume, 2 disks, 128k chunk size
+vol0_new_level=0
+vol0_new_num_comps=2
+vol0_new_chunk=128
+new_num_disks=2
+
+. tests/imsm-grow-template 0 1
diff --git a/tests/18imsm-r10_4d-takeover-r0_2d.broken b/tests/18imsm-r10_4d-takeover-r0_2d.broken
new file mode 100644
index 0000000..a27399f
--- /dev/null
+++ b/tests/18imsm-r10_4d-takeover-r0_2d.broken
@@ -0,0 +1,5 @@
+fails rarely
+
+Fails about 1 run in 100 with message:
+
+ ERROR: size is wrong for /dev/md/vol0: 2 * 5120 (chunk=128) = 20480, not 0
diff --git a/tests/18imsm-r1_2d-takeover-r0_1d b/tests/18imsm-r1_2d-takeover-r0_1d
new file mode 100644
index 0000000..049f19c
--- /dev/null
+++ b/tests/18imsm-r1_2d-takeover-r0_1d
@@ -0,0 +1,21 @@
+. tests/env-imsm-template
+
+# RAID 1 volume, 2 disks change to RAID 0 volume, 1 disks
+# POSITIVE test
+
+num_disks=2
+device_list="$dev0 $dev1"
+
+# Before: RAID 1 volume, 2 disks
+vol0_level=1
+vol0_comp_size=$((5 * 1024))
+vol0_num_comps=$(( $num_disks - 1 ))
+vol0_offset=0
+
+# After: RAID 0 volume, 1 disks, 64k chunk size
+vol0_new_level=0
+vol0_new_num_comps=1
+vol0_new_chunk=64
+new_num_disks=1
+
+. tests/imsm-grow-template 0 1
diff --git a/tests/18imsm-r1_2d-takeover-r0_1d.broken b/tests/18imsm-r1_2d-takeover-r0_1d.broken
new file mode 100644
index 0000000..aa1982e
--- /dev/null
+++ b/tests/18imsm-r1_2d-takeover-r0_1d.broken
@@ -0,0 +1,6 @@
+always fails
+
+Fails with error:
+
+ tests/func.sh: line 325: dvsize/chunk: division by 0 (error token
+ is "chunk")
diff --git a/tests/19raid6auto-repair b/tests/19raid6auto-repair
new file mode 100644
index 0000000..ce4a7c0
--- /dev/null
+++ b/tests/19raid6auto-repair
@@ -0,0 +1,49 @@
+number_of_disks=5
+chunksize_in_kib=512
+chunksize_in_b=$[chunksize_in_kib*1024]
+array_data_size_in_kib=$[chunksize_in_kib*(number_of_disks-2)*number_of_disks]
+array_data_size_in_b=$[array_data_size_in_kib*1024]
+devs="$dev0 $dev1 $dev2 $dev3 $dev4"
+
+# default 2048 sectors
+data_offset_in_kib=$[2048/2]
+
+# make a raid5 from a file
+dd if=/dev/urandom of=/tmp/RandFile bs=1024 count=$array_data_size_in_kib
+
+# perform test for every layout
+layouts="ls rs la ra parity-first ddf-zero-restart ddf-N-restart ddf-N-continue \
+ left-asymmetric-6 right-asymmetric-6 left-symmetric-6 \
+ right-symmetric-6 parity-first-6"
+
+for layout in $layouts
+do
+ mdadm -CR $md0 -l6 --layout=$layout -n$number_of_disks -c $chunksize_in_kib $devs
+ dd if=/tmp/RandFile of=$md0 bs=1024 count=$array_data_size_in_kib
+ blockdev --flushbufs $md0; sync
+ check wait
+ blockdev --flushbufs $devs; sync
+ echo 3 > /proc/sys/vm/drop_caches
+ cmp -s -n $array_data_size_in_b $md0 /tmp/RandFile || { echo sanity cmp failed ; exit 2; }
+
+ # wipe out 5 chunks on each device
+ dd if=/dev/urandom of=$dev0 bs=1024 count=$[5*chunksize_in_kib] seek=$[data_offset_in_kib+chunksize_in_kib*0]
+ dd if=/dev/urandom of=$dev1 bs=1024 count=$[5*chunksize_in_kib] seek=$[data_offset_in_kib+chunksize_in_kib*5]
+ dd if=/dev/urandom of=$dev2 bs=1024 count=$[5*chunksize_in_kib] seek=$[data_offset_in_kib+chunksize_in_kib*10]
+ dd if=/dev/urandom of=$dev3 bs=1024 count=$[5*chunksize_in_kib] seek=$[data_offset_in_kib+chunksize_in_kib*15]
+ dd if=/dev/urandom of=$dev4 bs=1024 count=$[5*chunksize_in_kib] seek=$[data_offset_in_kib+chunksize_in_kib*20]
+
+ blockdev --flushbufs $devs; sync
+ echo 3 > /proc/sys/vm/drop_caches
+
+ $dir/raid6check $md0 0 0 2>&1 | grep -qs "Error" || { echo should detect errors; exit 2; }
+
+ $dir/raid6check $md0 0 0 autorepair > /dev/null || { echo repair failed; exit 2; }
+ blockdev --flushbufs $md0 $devs; sync
+ echo 3 > /proc/sys/vm/drop_caches
+
+ $dir/raid6check $md0 0 0 2>&1 | grep -qs "Error" && { echo errors detected; exit 2; }
+ cmp -s -n $array_data_size_in_b $md0 /tmp/RandFile || { echo cmp failed ; exit 2; }
+
+ mdadm -S $md0
+done
diff --git a/tests/19raid6auto-repair.broken b/tests/19raid6auto-repair.broken
new file mode 100644
index 0000000..e91a142
--- /dev/null
+++ b/tests/19raid6auto-repair.broken
@@ -0,0 +1,5 @@
+always fails
+
+Fails with:
+
+ "should detect errors"
diff --git a/tests/19raid6check b/tests/19raid6check
new file mode 100644
index 0000000..67958c6
--- /dev/null
+++ b/tests/19raid6check
@@ -0,0 +1,27 @@
+#
+# Confirm that raid6check handles all RAID6 layouts.
+# Try both 4 and 5 devices.
+
+layouts='ls rs la ra'
+lv=`uname -r`
+if expr $lv '>=' 2.6.30 > /dev/null
+then
+ layouts="$layouts parity-first ddf-zero-restart ddf-N-restart ddf-N-continue \
+ left-asymmetric-6 right-asymmetric-6 left-symmetric-6 right-symmetric-6 parity-first-6"
+fi
+
+for layout in $layouts
+do
+ for devs in 4 5
+ do
+ dl="$dev0 $dev1 $dev2 $dev3"
+ if [ $devs = 5 ]; then dl="$dl $dev4"; fi
+
+ mdadm -CR $md0 -l6 --layout $layout -n$devs $dl
+ check wait
+ tar cf - /etc > $md0
+ ./raid6check $md0 0 0 | grep 'Error detected' && exit 1
+ mdadm -S $md0
+ done
+done
+
diff --git a/tests/19raid6repair b/tests/19raid6repair
new file mode 100644
index 0000000..26846cc
--- /dev/null
+++ b/tests/19raid6repair
@@ -0,0 +1,56 @@
+number_of_disks=4
+chunksize_in_kib=512
+chunksize_in_b=$[chunksize_in_kib*1024]
+array_data_size_in_kib=$[chunksize_in_kib*(number_of_disks-2)*number_of_disks]
+array_data_size_in_b=$[array_data_size_in_kib*1024]
+devs="$dev1 $dev2 $dev3 $dev4"
+
+# default 2048 sectors
+data_offset_in_kib=$[2048/2]
+
+layouts="ls rs la ra parity-first ddf-zero-restart ddf-N-restart ddf-N-continue \
+ left-asymmetric-6 right-asymmetric-6 left-symmetric-6 \
+ right-symmetric-6 parity-first-6"
+
+for layout in $layouts
+do
+ for failure in "$dev3 3 3 2" "$dev3 3 2 3" "$dev3 3 2 1" "$dev3 3 2 0" \
+ "$dev4 3 3 0" "$dev4 3 3 1" "$dev4 3 3 2" \
+ "$dev1 3 0 1" "$dev1 3 0 2" "$dev1 3 0 3" \
+ "$dev2 3 1 0" "$dev2 3 1 2" "$dev2 3 1 3" ; do
+ failure_split=( $failure )
+ device_with_error=${failure_split[0]}
+ stripe_with_error=${failure_split[1]}
+ repair_params="$stripe_with_error ${failure_split[2]} ${failure_split[3]}"
+ start_of_errors_in_kib=$[data_offset_in_kib+chunksize_in_kib*stripe_with_error]
+
+ # make a raid5 from a file
+ dd if=/dev/urandom of=/tmp/RandFile bs=1024 count=$array_data_size_in_kib
+ mdadm -CR $md0 -l6 --layout=$layout -n$number_of_disks -c $chunksize_in_kib $devs
+ dd if=/tmp/RandFile of=$md0 bs=1024 count=$array_data_size_in_kib
+ blockdev --flushbufs $md0; sync
+
+ check wait
+ blockdev --flushbufs $devs; sync
+ echo 3 > /proc/sys/vm/drop_caches
+ cmp -s -n $array_data_size_in_b $md0 /tmp/RandFile || { echo sanity cmp failed ; exit 2; }
+
+ dd if=/dev/urandom of=$device_with_error bs=1024 count=$chunksize_in_kib seek=$start_of_errors_in_kib
+ blockdev --flushbufs $device_with_error; sync
+ echo 3 > /proc/sys/vm/drop_caches
+
+ $dir/raid6check $md0 0 0 2>&1 | grep -qs "Error" || { echo should detect errors; exit 2; }
+
+ $dir/raid6check $md0 repair $repair_params > /dev/null || { echo repair failed; exit 2; }
+ blockdev --flushbufs $md0 $devs; sync
+ echo 3 > /proc/sys/vm/drop_caches
+
+ $dir/raid6check $md0 0 0 2>&1 | grep -qs "Error" && { echo errors detected; exit 2; }
+ cmp -s -n $array_data_size_in_b $md0 /tmp/RandFile || { echo cmp failed ; exit 2; }
+
+ mdadm -S $md0
+ udevadm settle
+ sync
+ echo 3 > /proc/sys/vm/drop_caches
+ done
+done
diff --git a/tests/19raid6repair.broken b/tests/19raid6repair.broken
new file mode 100644
index 0000000..e91a142
--- /dev/null
+++ b/tests/19raid6repair.broken
@@ -0,0 +1,5 @@
+always fails
+
+Fails with:
+
+ "should detect errors"
diff --git a/tests/19repair-does-not-destroy b/tests/19repair-does-not-destroy
new file mode 100644
index 0000000..a92883f
--- /dev/null
+++ b/tests/19repair-does-not-destroy
@@ -0,0 +1,28 @@
+number_of_disks=7
+chunksize_in_kib=512
+array_data_size_in_kib=$[chunksize_in_kib*(number_of_disks-2)*number_of_disks]
+array_data_size_in_b=$[array_data_size_in_kib*1024]
+devs="$dev0 $dev1 $dev2 $dev3 $dev4 $dev5 $dev6"
+
+dd if=/dev/urandom of=/tmp/RandFile bs=1024 count=$array_data_size_in_kib
+mdadm -CR $md0 -l6 -n$number_of_disks -c $chunksize_in_kib $devs
+dd if=/tmp/RandFile of=$md0 bs=1024 count=$array_data_size_in_kib
+blockdev --flushbufs $md0; sync
+check wait
+blockdev --flushbufs $devs; sync
+echo 3 > /proc/sys/vm/drop_caches
+$dir/raid6check $md0 repair 1 2 3 > /dev/null # D D
+$dir/raid6check $md0 repair 8 2 5 > /dev/null # D P
+$dir/raid6check $md0 repair 15 4 6 > /dev/null # D Q
+$dir/raid6check $md0 repair 22 5 6 > /dev/null # P Q
+$dir/raid6check $md0 repair 3 4 0 > /dev/null # Q D
+$dir/raid6check $md0 repair 3 3 1 > /dev/null # P D
+$dir/raid6check $md0 repair 6 4 5 > /dev/null # D<D
+$dir/raid6check $md0 repair 13 5 4 > /dev/null # D>D
+blockdev --flushbufs $devs; sync
+echo 3 > /proc/sys/vm/drop_caches
+$dir/raid6check $md0 0 0 2>&1 | grep -qs "Error" && { echo errors detected; exit 2; }
+cmp -s -n $array_data_size_in_b $md0 /tmp/RandFile || { echo should not mess up correct stripe ; exit 2; }
+
+mdadm -S $md0
+udevadm settle
diff --git a/tests/20raid5journal b/tests/20raid5journal
new file mode 100644
index 0000000..f751ace
--- /dev/null
+++ b/tests/20raid5journal
@@ -0,0 +1,64 @@
+# check write journal of raid456
+
+# test --detail
+test_detail_shows_journal() {
+ mdadm -D $1 | grep journal || {
+ echo >&2 "ERROR --detail does show journal device!"; mdadm -D $1 ; exit 1; }
+}
+
+# test --examine
+test_examine_shows_journal() {
+ mdadm -E $1 | grep Journal || {
+ echo >&2 "ERROR --examine does show Journal device!"; mdadm -E $1 ; exit 1; }
+}
+
+# test --create
+create_with_journal_and_stop() {
+ mdadm -CR $md0 -l5 -n4 $dev0 $dev1 $dev2 $dev3 --write-journal $dev4
+ check wait
+ tar cf - /etc > $md0
+ ./raid6check $md0 0 0 | grep 'Error detected' && exit 1
+ test_detail_shows_journal $md0
+ test_examine_shows_journal $dev4
+ mdadm -S $md0
+}
+
+# test --assemble
+test_assemble() {
+ create_with_journal_and_stop
+ if mdadm -A $md0 $dev0 $dev1 $dev2 $dev3
+ then
+ echo >&2 "ERROR should return 1 when journal is missing!"; cat /proc/mdstat ; exit 1;
+ fi
+ mdadm -S $md0
+
+ mdadm -A $md0 $dev0 $dev1 $dev2 $dev3 --force
+ check readonly
+ mdadm -S $md0
+}
+
+# test --incremental
+test_incremental() {
+ create_with_journal_and_stop
+ for d in $dev0 $dev1 $dev2 $dev3
+ do
+ mdadm -I $d
+ done
+ check inactive
+ mdadm -I $dev4
+ check raid5
+ mdadm -S $md0
+
+ # test --incremental with journal missing
+ for d in $dev0 $dev1 $dev2 $dev3
+ do
+ mdadm -I $d
+ done
+ mdadm -R $md0
+ check readonly
+ mdadm -S $md0
+}
+
+create_with_journal_and_stop
+test_assemble
+test_incremental
diff --git a/tests/21raid5cache b/tests/21raid5cache
new file mode 100644
index 0000000..0dd97bf
--- /dev/null
+++ b/tests/21raid5cache
@@ -0,0 +1,87 @@
+# check data integrity with raid5 write back cache
+
+# create a 4kB random file and 4 files each with a 1kB chunk of the random file:
+# randfile: ABCD randchunk[0-3]: A B C D
+#
+# then create another random 1kB chunk E, and a new random page with A, B, E, D:
+# randchunk4: E newrandfile: ABED
+create_random_data() {
+ dd if=/dev/urandom of=/tmp/randfile bs=4k count=1
+ for x in {0..3}
+ do
+ dd if=/tmp/randfile of=/tmp/randchunk$x bs=1k count=1 skip=$x count=1
+ done
+
+ dd if=/dev/urandom of=/tmp/randchunk4 bs=1k count=1
+
+ rm /tmp/newrandfile
+ for x in 0 1 4 3
+ do
+ cat /tmp/randchunk$x >> /tmp/newrandfile
+ done
+}
+
+# create array, $1 could be 5 for raid5 and 6 for raid6
+create_array() {
+ if [ $1 -lt 5 -o $1 -gt 6 ]
+ then
+ echo wrong array type $1
+ exit 2
+ fi
+
+ mdadm -CR $md0 -c4 -l5 -n10 $dev0 $dev1 $dev2 $dev3 $dev4 $dev5 $dev6 $dev11 $dev8 $dev9 --write-journal $dev10
+ check wait
+ echo write-back > /sys/block/md0/md/journal_mode
+}
+
+restart_array_write_back() {
+ mdadm -S $md0
+ mdadm -A $md0 $dev0 $dev1 $dev2 $dev3 $dev4 $dev5 $dev6 $dev11 $dev8 $dev9 $dev10
+ echo write-back > /sys/block/md0/md/journal_mode
+}
+
+# compare the first page of md0 with file in $1
+cmp_first_page() {
+ cmp -n 4096 $1 $md0 || { echo cmp failed ; exit 2 ; }
+}
+
+# write 3 pages after the first page of md0
+write_three_pages() {
+ for x in {1..3}
+ do
+ dd if=/dev/urandom of=$md0 bs=4k count=1 seek=$x count=1
+ done
+}
+
+# run_test <array_type:5/6> <degraded_or_not:yes/no>
+run_test() {
+ create_random_data
+ create_array $1
+
+ if [ $2 == yes ]
+ then
+ mdadm --fail $md0 $dev0
+ fi
+
+ dd if=/tmp/randfile of=$md0 bs=4k count=1
+ restart_array_write_back
+ cmp_first_page /tmp/randfile
+ restart_array_write_back
+ write_three_pages
+ cmp_first_page /tmp/randfile
+
+
+ dd if=/tmp/randchunk4 of=/dev/md0 bs=1k count=1 seek=2
+ restart_array_write_back
+ cmp_first_page /tmp/newrandfile
+ restart_array_write_back
+ write_three_pages
+ cmp_first_page /tmp/newrandfile
+
+ mdadm -S $md0
+}
+
+run_test 5 no
+run_test 5 yes
+run_test 6 no
+run_test 6 yes
diff --git a/tests/23rdev-lifetime b/tests/23rdev-lifetime
new file mode 100644
index 0000000..1750b0d
--- /dev/null
+++ b/tests/23rdev-lifetime
@@ -0,0 +1,34 @@
+devname=${dev0##*/}
+devt=`cat /sys/block/$devname/dev`
+pid=""
+runtime=2
+
+clean_up_test() {
+ pill -9 $pid
+ echo clear > /sys/block/md0/md/array_state
+}
+
+trap 'clean_up_test' EXIT
+
+add_by_sysfs() {
+ while true; do
+ echo $devt > /sys/block/md0/md/new_dev
+ done
+}
+
+remove_by_sysfs(){
+ while true; do
+ echo remove > /sys/block/md0/md/dev-${devname}/state
+ done
+}
+
+echo md0 > /sys/module/md_mod/parameters/new_array || die "create md0 failed"
+
+add_by_sysfs &
+pid="$pid $!"
+
+remove_by_sysfs &
+pid="$pid $!"
+
+sleep $runtime
+exit 0
diff --git a/tests/24raid10deadlock b/tests/24raid10deadlock
new file mode 100644
index 0000000..ee330aa
--- /dev/null
+++ b/tests/24raid10deadlock
@@ -0,0 +1,88 @@
+devs="$dev0 $dev1 $dev2 $dev3"
+runtime=120
+pid=""
+action_pid=""
+
+set_up_injection()
+{
+ echo -1 > /sys/kernel/debug/fail_make_request/times
+ echo 1 > /sys/kernel/debug/fail_make_request/probability
+ echo 0 > /sys/kernel/debug/fail_make_request/verbose
+ echo 1 > /sys/block/${1##*/}/make-it-fail
+}
+
+clean_up_injection()
+{
+ echo 0 > /sys/block/${1##*/}/make-it-fail
+ echo 0 > /sys/kernel/debug/fail_make_request/times
+ echo 0 > /sys/kernel/debug/fail_make_request/probability
+ echo 2 > /sys/kernel/debug/fail_make_request/verbose
+}
+
+test_rdev()
+{
+ while true; do
+ mdadm -f $md0 $1 &> /dev/null
+ mdadm -r $md0 $1 &> /dev/null
+ mdadm --zero-superblock $1 &> /dev/null
+ mdadm -a $md0 $1 &> /dev/null
+ sleep $2
+ done
+}
+
+test_write_action()
+{
+ while true; do
+ echo frozen > /sys/block/md0/md/sync_action
+ echo idle > /sys/block/md0/md/sync_action
+ sleep 0.1
+ done
+}
+
+set_up_test()
+{
+ fio -h &> /dev/null || die "fio not found"
+
+ # create a simple raid10
+ mdadm -Cv -R -n 4 -l10 $md0 $devs || die "create raid10 failed"
+}
+
+clean_up_test()
+{
+ clean_up_injection $dev0
+ pkill -9 fio
+ kill -9 $pid
+ kill -9 $action_pid
+
+ sleep 1
+
+ if ps $action_pid | tail -1 | awk '{print $3}' | grep D; then
+ die "thread that is writing sysfs is stuck in D state, deadlock is triggered"
+ fi
+ mdadm -S $md0
+}
+
+cat /sys/kernel/debug/fail_make_request/times || die "fault injection is not enabled"
+
+trap 'clean_up_test' EXIT
+
+set_up_test || die "set up test failed"
+
+# backgroup io pressure
+fio -filename=$md0 -rw=randwrite -direct=1 -name=test -bs=4k -numjobs=16 -iodepth=16 &
+
+# trigger add/remove device by io failure
+set_up_injection $dev0
+test_rdev $dev0 2 &
+pid="$pid $!"
+
+# add/remove device directly
+test_rdev $dev3 10 &
+pid="$pid $!"
+
+test_write_action &
+action_pid="$!"
+
+sleep $runtime
+
+exit 0
diff --git a/tests/24raid10deadlock.inject_error b/tests/24raid10deadlock.inject_error
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/24raid10deadlock.inject_error
diff --git a/tests/24raid456deadlock b/tests/24raid456deadlock
new file mode 100644
index 0000000..80e6e97
--- /dev/null
+++ b/tests/24raid456deadlock
@@ -0,0 +1,58 @@
+devs="$dev0 $dev1 $dev2 $dev3 $dev4 $dev5"
+runtime=120
+pid=""
+old=`cat /proc/sys/vm/dirty_background_ratio`
+
+test_write_action()
+{
+ while true; do
+ echo check > /sys/block/md0/md/sync_action &> /dev/null
+ sleep 0.1
+ echo idle > /sys/block/md0/md/sync_action &> /dev/null
+ done
+}
+
+test_write_back()
+{
+ fio -filename=$md0 -bs=4k -rw=write -numjobs=1 -name=test \
+ -time_based -runtime=$runtime &> /dev/null
+}
+
+set_up_test()
+{
+ fio -h &> /dev/null || die "fio not found"
+
+ # create a simple raid6
+ mdadm -Cv -R -n 6 -l6 $md0 $devs --assume-clean || die "create raid6 failed"
+
+ # trigger dirty pages write back
+ echo 0 > /proc/sys/vm/dirty_background_ratio
+}
+
+clean_up_test()
+{
+ echo $old > /proc/sys/vm/dirty_background_ratio
+
+ pkill -9 fio
+ kill -9 $pid
+
+ sleep 1
+
+ if ps $pid | tail -1 | awk '{print $3}' | grep D; then
+ die "thread that is writing sysfs is stuck in D state, deadlock is triggered"
+ fi
+ mdadm -S $md0
+}
+
+trap 'clean_up_test' EXIT
+
+set_up_test || die "set up test failed"
+
+test_write_back &
+
+test_write_action &
+pid="$!"
+
+sleep $runtime
+
+exit 0
diff --git a/tests/25raid456-recovery-while-reshape b/tests/25raid456-recovery-while-reshape
new file mode 100644
index 0000000..3f6251b
--- /dev/null
+++ b/tests/25raid456-recovery-while-reshape
@@ -0,0 +1,33 @@
+devs="$dev0 $dev1 $dev2"
+
+set_up_test()
+{
+ mdadm -Cv -R -n 3 -l5 $md0 $devs --assume-clean --size=50M || die "create array failed"
+ mdadm -a $md0 $dev3 $dev4 || die "failed to bind new disk to array"
+ echo 1000 > /sys/block/md0/md/sync_speed_max
+}
+
+clean_up_test()
+{
+ mdadm -S $md0
+}
+
+trap 'clean_up_test' EXIT
+
+set_up_test || die "set up test failed"
+
+# trigger reshape
+mdadm --grow -l 6 $md0
+sleep 1
+
+# set up replacement
+echo frozen > /sys/block/md0/md/sync_action
+echo want_replacement > /sys/block/md0/md/rd0/state
+echo reshape > /sys/block/md0/md/sync_action
+sleep 1
+
+# reassemeble array
+mdadm -S $md0 || die "can't stop array"
+mdadm --assemble $md0 $devs $dev3 $dev4 || die "can't assemble array"
+
+exit 0
diff --git a/tests/25raid456-reshape-corrupt-data b/tests/25raid456-reshape-corrupt-data
new file mode 100644
index 0000000..fdb875f
--- /dev/null
+++ b/tests/25raid456-reshape-corrupt-data
@@ -0,0 +1,35 @@
+devs="$dev0 $dev1 $dev2"
+
+set_up_test()
+{
+ mdadm -Cv -R -n 3 -l5 $md0 $devs --size=50M || die "create array failed"
+ mdadm -a $md0 $dev3 || die "failed to bind new disk to array"
+ mkfs.xfs -f $md0 || die "mkfs failed"
+ xfs_ncheck $md0 || die "check fs failed"
+}
+
+clean_up_test()
+{
+ mdadm -S $md0
+}
+
+trap 'clean_up_test' EXIT
+
+set_up_test || die "set up test failed"
+
+# trigger reshape
+echo 1000 > /sys/block/md0/md/sync_speed_max
+mdadm --grow -l 6 $md0
+sleep 1
+
+# stop and start reshape
+echo frozen > /sys/block/md0/md/sync_action
+echo system > /sys/block/md0/md/sync_speed_max
+echo reshape > /sys/block/md0/md/sync_action
+
+mdadm -W $md0
+
+# check if data is corrupted
+xfs_ncheck $md0 || die "data is corrupted after reshape"
+
+exit 0
diff --git a/tests/25raid456-reshape-deadlock b/tests/25raid456-reshape-deadlock
new file mode 100644
index 0000000..bfa0cc5
--- /dev/null
+++ b/tests/25raid456-reshape-deadlock
@@ -0,0 +1,34 @@
+devs="$dev0 $dev1 $dev2"
+
+set_up_test()
+{
+ mdadm -Cv -R -n 3 -l5 $md0 $devs --size=50M || die "create array failed"
+ mdadm -a $md0 $dev3 || die "failed to bind new disk to array"
+ echo 1000 > /sys/block/md0/md/sync_speed_max
+}
+
+clean_up_test()
+{
+ echo idle > /sys/block/md0/md/sync_action
+ mdadm -S $md0
+}
+
+trap 'clean_up_test' EXIT
+
+set_up_test || die "set up test failed"
+
+# trigger reshape
+mdadm --grow -l 6 $md0
+sleep 1
+
+# stop reshape
+echo frozen > /sys/block/md0/md/sync_action
+
+# read accross reshape
+dd if=$md0 of=/dev/NULL bs=1m count=100 iflag=direct &> /dev/null &
+sleep 2
+
+# suspend array
+echo 1 > /sys/block/md0/md/suspend_lo
+
+exit 0
diff --git a/tests/25raid456-reshape-while-recovery b/tests/25raid456-reshape-while-recovery
new file mode 100644
index 0000000..b9f871f
--- /dev/null
+++ b/tests/25raid456-reshape-while-recovery
@@ -0,0 +1,32 @@
+devs="$dev0 $dev1 $dev2"
+
+set_up_test()
+{
+ mdadm -Cv -R -n 3 -l5 $md0 $devs --assume-clean --size=50M || die "create array failed"
+ mdadm -a $md0 $dev3 $dev4 || die "failed to bind new disk to array"
+ echo 1000 > /sys/block/md0/md/sync_speed_max
+}
+
+clean_up_test()
+{
+ mdadm -S $md0
+}
+
+trap 'clean_up_test' EXIT
+
+set_up_test || die "set up test failed"
+
+# set up replacement
+echo want_replacement > /sys/block/md0/md/rd0/state
+sleep 1
+
+# trigger reshape
+echo frozen > /sys/block/md0/md/sync_action
+mdadm --grow -l 6 $md0
+sleep 1
+
+# reassemeble array
+mdadm -S $md0 || die "can't stop array"
+mdadm --assemble $md0 $devs $dev3 $dev4 || die "can't assemble array"
+
+exit 0
diff --git a/tests/ToTest b/tests/ToTest
new file mode 100644
index 0000000..b98e266
--- /dev/null
+++ b/tests/ToTest
@@ -0,0 +1,44 @@
+
+multipath!!
+
+add/remove/fail
+ raid1 DONE
+ raid5 DONE
+ raid6/10 needed??
+
+assemble
+ by devices DONE
+ by uuid DONE
+ by superminor DONE
+ by config file DONE
+
+ various --updates DONE (not sparc2.2 or summaries)
+
+stop
+ --scan
+
+readonly/readwrite
+
+bitmap
+ separate file
+ internal
+ filename in config file
+
+examine
+ --scan
+ --brief
+
+detail
+
+grow:
+ size
+ raid1/5/6 DONE
+ devices
+ raid1 add DONE
+ raid1 shrink DONE
+
+'--quiet' option, and remove ""
+'--name' option fo v1, and configfile etc...
+
+faulty
+ errors in raid1/5/6
diff --git a/tests/env-ddf-template b/tests/env-ddf-template
new file mode 100644
index 0000000..90d7272
--- /dev/null
+++ b/tests/env-ddf-template
@@ -0,0 +1,113 @@
+sha1_sum() {
+ sha1sum "$1" | cut -c 1-40
+}
+
+get_rootdev() {
+ local dev=$(stat -c %D /)
+ local maj=$(expr $dev : '\(..*\)..')
+ local min=${dev#$maj}
+ local bd=/dev/$(basename $(readlink /sys/dev/block/$((0x$maj)):$((0x$min))))
+ [ -b $bd ] || exit 1
+ echo $bd
+}
+
+get_sysdir() {
+ local mddev=$1
+ [ -L $mddev ] && mddev=$(readlink -f $mddev)
+ echo "/sys/class/block/$(basename $mddev)/md"
+}
+
+get_raiddisks() {
+ sysdir=$(get_sysdir "$1")
+ for i in $(seq 0 $(($(cat $sysdir/raid_disks)-1))); do
+ if [ -d $sysdir/rd$i ]; then
+ readlink -f /dev/block/$(cat $sysdir/rd$i/block/dev)
+ else
+ echo MISSING
+ fi
+ done
+}
+
+get_present() {
+ get_raiddisks $1 | grep -vc MISSING
+}
+
+ddf_check() {
+ udevadm settle
+ case $1 in
+ container )
+ grep -s "blocks super external:ddf" /proc/mdstat > /dev/null || {
+ echo >&2 "**Fatal** Correctly formed container not found"; cat /proc/mdstat; exit 1; }
+ ;;
+ member )
+ t_member=$2
+ t_num_disks=$3
+ t_level=$4
+ t_rd_size=$5
+ t_size=$6
+ t_offset=$7
+ t_chunk=$8
+ t_layout=$9
+
+ if [ $t_chunk -ne 0 ]; then
+ t_rd_size=$((t_rd_size & ~(t_chunk - 1)))
+ fi
+ case $t_level in
+ 0) t_size=$((t_num_disks*$t_rd_size));;
+ 1) t_size=$t_rd_size;;
+ 4|5) t_size=$(((t_num_disks-1)*$t_rd_size));;
+ 6) t_size=$(((t_num_disks-2)*$t_rd_size));;
+ 10) t_size=$((t_num_disks*$t_rd_size/t_layout));;
+ esac
+
+ err=0
+
+ eval `stat -L -c "let major=0x%t; let minor=0x%T;" $t_member`
+ sysfs=/sys/dev/block/${major}:${minor}
+ if [ ! -f ${sysfs}/md/array_state ]; then
+ echo "**Fatal**: Array member $t_member not found" >&2; cat /proc/mdstat >&2; exit 1
+ fi
+ _chunk=`cat ${sysfs}/md/chunk_size`
+ if [ $t_chunk -ne $((_chunk/1024)) ]; then
+ echo "**Error**: Chunk size mismatch - expected $t_chunk, actual $_chunk" >&2
+ err=$((err + 1))
+ fi
+ for i in `seq 0 $((t_num_disks - 1))`; do
+ _offset=`cat ${sysfs}/md/rd${i}/offset`
+ if [ $t_offset -ne $((_offset / 2)) ]; then
+ echo "**Error**: Offset mismatch - expected $t_offset, actual $((_offset/2))" >&2
+ err=$((err + 1))
+ fi
+ _rd_size=`cat ${sysfs}/md/rd${i}/size`
+ if [ $t_rd_size -ne $_rd_size ]; then
+ echo "**Error**: Component size mismatch - expected $t_rd_size, actual $_rd_size" >&2
+ err=$((err + 1))
+ fi
+ done
+ _size=`cat ${sysfs}/md/array_size`
+ [ o$_size = odefault ] && _size=$(($(cat ${sysfs}/size)/2))
+ if [ $t_size -ne $_size ]; then
+ echo "**Error**: Array size mismatch - expected $t_size, actual $_size" >&2
+ err=$((err + 1))
+ fi
+ if [ $err -gt 0 ]; then
+ echo "$t_member failed check" >&2
+ cat /proc/mdstat >&2
+ mdadm -E /dev/loop8 >&2
+ exit 1
+ fi
+ ;;
+ * )
+ echo >&2 "**Error** unknown check $1"; exit 1;
+ esac
+}
+
+container=/dev/md/ddf0
+member0=/dev/md/vol0
+member1=/dev/md/vol1
+member2=/dev/md/vol2
+member3=/dev/md/vol3
+member4=/dev/md/vol4
+
+# We don't want systemd to start system mdmon; start our own
+export MDADM_NO_SYSTEMCTL=1
diff --git a/tests/env-imsm-template b/tests/env-imsm-template
new file mode 100644
index 0000000..d524771
--- /dev/null
+++ b/tests/env-imsm-template
@@ -0,0 +1,91 @@
+imsm_check() {
+ udevadm settle
+ case $1 in
+ container )
+ grep -s "blocks super external:imsm" /proc/mdstat > /dev/null || {
+ echo >&2 "**Fatal** Correctly formed container not found"; cat /proc/mdstat; exit 1; }
+ ;;
+ member )
+ t_member=$2
+ t_num_disks=$3
+ t_level=$4
+ t_rd_size=$5
+ t_size=$6
+ t_offset=$7
+ t_chunk=$8
+
+ t_rd_size=$((t_rd_size & ~(1024 - 1)))
+
+ if [ $t_level -eq 1 ]; then
+ t_chunk=64
+ fi
+
+ t_num_data_disks=0
+
+ case $t_level in
+ 0)
+ t_num_data_disks=$t_num_disks
+ ;;
+ 1)
+ t_num_data_disks=1
+ ;;
+ 5)
+ t_num_data_disks=$((t_num_disks-1))
+ ;;
+ 10)
+ t_num_data_disks=$((t_num_disks/2))
+ ;;
+ esac
+
+ t_size=$((t_rd_size*t_num_data_disks))
+
+ err=0
+
+ eval `stat -L -c "let major=0x%t; let minor=0x%T;" $t_member`
+ sysfs=/sys/dev/block/${major}:${minor}
+ if [ ! -f ${sysfs}/md/array_state ]; then
+ echo "**Fatal**: Array member $t_member not found" >&2; cat /proc/mdstat >&2; exit 1
+ fi
+ _chunk=`cat ${sysfs}/md/chunk_size`
+ if [ $t_chunk -ne $((_chunk/1024)) ]; then
+ echo "**Error**: Chunk size mismatch - expected $t_chunk, actual $(($_chunk/1024))" >&2
+ err=$((err + 1))
+ fi
+ for i in `seq 0 $((t_num_disks - 1))`; do
+ _offset=`cat ${sysfs}/md/rd${i}/offset`
+ if [ $t_offset -ne $((_offset / 2)) ]; then
+ echo "**Error**: Offset mismatch - expected $t_offset, actual $_offset" >&2
+ err=$((err + 1))
+ fi
+ _rd_size=`cat ${sysfs}/md/rd${i}/size`
+ if [ $t_rd_size -ne $_rd_size ]; then
+ echo "**Error**: Component size mismatch - expected $t_rd_size, actual $_rd_size" >&2
+ err=$((err + 1))
+ fi
+ done
+ _size=`cat ${sysfs}/md/array_size`
+ if [ $t_size -ne $_size ]; then
+ echo "**Error**: Array size mismatch - expected $t_size, actual $_size" >&2
+ err=$((err + 1))
+ fi
+ if [ $err -gt 0 ]; then
+ echo "$t_member failed check" >&2
+ cat /proc/mdstat >&2
+ mdadm -E /dev/loop0 >&2
+ exit 1
+ fi
+ ;;
+ * )
+ echo >&2 "**Error** unknown check $1"; exit 1;
+ esac
+}
+
+export IMSM_NO_PLATFORM=1
+export IMSM_DEVNAME_AS_SERIAL=1
+export IMSM_TEST_OROM=1
+container=/dev/md/container
+member0=/dev/md/vol0
+member1=/dev/md/vol1
+member2=/dev/md/vol2
+member3=/dev/md/vol3
+member4=/dev/md/vol4
diff --git a/tests/func.sh b/tests/func.sh
new file mode 100644
index 0000000..1c1a28a
--- /dev/null
+++ b/tests/func.sh
@@ -0,0 +1,354 @@
+#!/bin/bash
+
+# We test mdadm on loop-back block devices.
+# dir for storing files should be settable by command line maybe
+size=20000
+# super0, round down to multiple of 64 and substract 64
+mdsize0=19904
+# super00 is nested, subtract 128
+mdsize00=19840
+# super1.0 round down to multiple of 2, subtract 8
+mdsize1=19992
+mdsize1a=19988
+mdsize12=19988
+# super1.2 for linear: round to multiple of 2, subtract 4
+mdsize1_l=19996
+mdsize2_l=19996
+# subtract another 4 for bitmaps
+mdsize1b=19988
+mdsize11=19992
+mdsize11a=19456
+mdsize12=19988
+
+# ddf needs bigger devices as 32Meg is reserved!
+ddfsize=65536
+
+# $1 is optional parameter, it shows why to save log
+save_log() {
+ status=$1
+ logfile="$status""$_basename".log
+
+ cat $targetdir/stderr >> $targetdir/log
+ cp $targetdir/log $logdir/$_basename.log
+ echo "## $HOSTNAME: saving dmesg." >> $logdir/$logfile
+ dmesg -c >> $logdir/$logfile
+ echo "## $HOSTNAME: saving proc mdstat." >> $logdir/$logfile
+ cat /proc/mdstat >> $logdir/$logfile
+ array=($(mdadm -Ds | cut -d' ' -f2))
+ [ "$1" == "fail" ] &&
+ echo "FAILED - see $logdir/$_basename.log and $logdir/$logfile for details"
+ if [ $DEVTYPE == 'lvm' ]
+ then
+ # not supported lvm type yet
+ echo
+ elif [ "$DEVTYPE" == 'loop' -o "$DEVTYPE" == 'disk' ]
+ then
+ if [ ! -z "$array" -a ${#array[@]} -ge 1 ]
+ then
+ echo "## $HOSTNAME: mdadm -D ${array[@]}" >> $logdir/$logfile
+ $mdadm -D ${array[@]} >> $logdir/$logfile
+ # ignore saving external(external file, imsm...) bitmap
+ cat /proc/mdstat | grep -q "linear\|external" && return 0
+ md_disks=($($mdadm -D -Y ${array[@]} | grep "/dev/" | cut -d'=' -f2))
+ cat /proc/mdstat | grep -q "bitmap"
+ if [ $? -eq 0 ]
+ then
+ echo "## $HOSTNAME: mdadm -X ${md_disks[@]}" >> $logdir/$logfile
+ $mdadm -X ${md_disks[@]} >> $logdir/$logfile
+ echo "## $HOSTNAME: mdadm -E ${md_disks[@]}" >> $logdir/$logfile
+ $mdadm -E ${md_disks[@]} >> $logdir/$logfile
+ fi
+ else
+ echo "## $HOSTNAME: no array assembled!" >> $logdir/$logfile
+ fi
+ fi
+}
+
+cleanup() {
+ udevadm settle
+ $mdadm -Ssq 2> /dev/null
+ case $DEVTYPE in
+ loop )
+ for d in 0 1 2 3 4 5 6 7 8 9 10 11 12 13
+ do
+ losetup -d /dev/loop$d &> /dev/null
+ rm -f /dev/disk/by-path/loop*
+ rm -f /var/tmp/mdtest$d
+ done
+ ;;
+ lvm )
+ for d in 0 1 2 3 4 5 6 7 8 9 10 11 12 13
+ do
+ eval "lvremove --quiet -f \$dev$d"
+ done
+ ;;
+ disk )
+ $mdadm --zero ${disks[@]} &> /dev/null
+ ;;
+ esac
+}
+
+do_clean()
+{
+ mdadm -Ss > /dev/null
+ mdadm --zero $devlist 2> /dev/null
+ dmesg -c > /dev/null
+}
+
+check_env() {
+ user=$(id -un)
+ [ "X$user" != "Xroot" ] && {
+ echo "test: testing can only be done as 'root'."
+ exit 1
+ }
+ [ \! -x $mdadm ] && {
+ echo "test: please run make everything before perform testing."
+ exit 1
+ }
+ cmds=(mdadm lsblk df udevadm losetup mkfs.ext3 fsck seq)
+ for cmd in ${cmds[@]}
+ do
+ which $cmd > /dev/null || {
+ echo "$cmd command not found!"
+ exit 1
+ }
+ done
+ if $(lsblk -a | grep -iq raid)
+ then
+ # donot run mdadm -Ss directly if there are RAIDs working.
+ echo "test: please run test suite without running RAIDs environment."
+ exit 1
+ fi
+ # Check whether to run multipath tests
+ modprobe multipath 2> /dev/null
+ grep -sq 'Personalities : .*multipath' /proc/mdstat &&
+ MULTIPATH="yes"
+ if [ "$MULTIPATH" != "yes" ]; then
+ echo "test: skipping tests for multipath, which is removed in upstream 6.8+ kernels"
+ fi
+
+ # Check whether to run linear tests
+ modprobe linear 2> /dev/null
+ grep -sq 'Personalities : .*linear' /proc/mdstat &&
+ LINEAR="yes"
+ if [ "$LINEAR" != "yes" ]; then
+ echo "test: skipping tests for linear, which is removed in upstream 6.8+ kernels"
+ fi
+}
+
+do_setup() {
+ trap cleanup 0 1 3 15
+ trap ctrl_c 2
+
+ check_env
+ [ -d $logdir ] || mkdir -p $logdir
+
+ devlist=
+ if [ "$DEVTYPE" == "loop" ]
+ then
+ # make sure there are no loop devices remaining.
+ # udev started things can sometimes prevent them being stopped
+ # immediately
+ while grep loop /proc/partitions > /dev/null 2>&1
+ do
+ $mdadm -Ssq
+ losetup -d /dev/loop[0-9]* 2> /dev/null
+ sleep 0.2
+ done
+ elif [ "$DEVTYPE" == "disk" ]
+ then
+ if [ ! -z "$disks" ]
+ then
+ for d in $(seq 0 ${#disks[@]})
+ do
+ eval "dev$d=${disks[$d]}"
+ eval devlist=\"\$devlist \$dev$d\"
+ eval devlist$d=\"\$devlist\"
+ done
+ $mdadm --zero ${disks[@]} &> /dev/null
+ else
+ echo "Forget to provide physical devices for disk mode."
+ exit 1
+ fi
+ fi
+ for d in 0 1 2 3 4 5 6 7 8 9 10 11 12 13
+ do
+ sz=$size
+ [ $d -gt 7 ] && sz=$ddfsize
+ case $DEVTYPE in
+ loop)
+ [ -f $targetdir/mdtest$d ] ||
+ dd if=/dev/zero of=$targetdir/mdtest$d count=$sz bs=1K > /dev/null 2>&1
+ # make sure udev doesn't touch
+ mdadm --zero $targetdir/mdtest$d 2> /dev/null
+ if [ $d -eq 7 ]
+ then
+ losetup /dev/loop$d $targetdir/mdtest6 # for multipath use
+ else
+ losetup /dev/loop$d $targetdir/mdtest$d
+ fi
+ eval dev$d=/dev/loop$d
+ eval file$d=$targetdir/mdtest$d
+ ;;
+ lvm)
+ unset MULTIPATH
+ eval dev$d=/dev/mapper/${LVM_VOLGROUP}-mdtest$d
+ if ! lvcreate --quiet -L ${sz}K -n mdtest$d $LVM_VOLGROUP
+ then
+ trap '' 0 # make sure lvremove is not called
+ eval echo error creating \$dev$d
+ exit 129
+ fi
+ ;;
+ ram)
+ unset MULTIPATH
+ eval dev$d=/dev/ram$d
+ ;;
+ esac
+ eval devlist=\"\$devlist \$dev$d\"
+ eval devlist$d=\"\$devlist\"
+ #" <-- add this quote to un-confuse vim syntax highlighting
+ done
+ path0=$dev6
+ path1=$dev7
+ ulimit -c unlimited
+ [ -f /proc/mdstat ] || modprobe md_mod
+ echo 2000 > /proc/sys/dev/raid/speed_limit_max
+ echo 0 > /sys/module/md_mod/parameters/start_ro
+}
+
+# check various things
+check() {
+ case $1 in
+ opposite_result )
+ if [ $? -eq 0 ]; then
+ die "This command shouldn't run successfully"
+ fi
+ ;;
+ spares )
+ spares=$(tr '] ' '\012\012' < /proc/mdstat | grep -c '(S)' || exit 0)
+ [ $spares -ne $2 ] &&
+ die "expected $2 spares, found $spares"
+ ;;
+ raid* | linear )
+ grep -sq "active $1 " /proc/mdstat ||
+ die "active $1 not found"
+ ;;
+ algorithm )
+ grep -sq " algorithm $2 " /proc/mdstat ||
+ die "algorithm $2 not found"
+ ;;
+ resync | recovery | reshape )
+ cnt=5
+ while ! grep -sq $1 /proc/mdstat
+ do
+ if [ $cnt -gt 0 ] && grep -v idle /sys/block/md*/md/sync_action > /dev/null
+ then # Something isn't idle - wait a bit
+ sleep 0.5
+ cnt=$[cnt-1]
+ else
+ die "no $1 happening"
+ fi
+ done
+ ;;
+ nosync )
+ sleep 0.5
+ # Since 4.2 we delay the close of recovery until there has been a chance for
+ # spares to be activated. That means that a recovery that finds nothing
+ # to do can still take a little longer than expected.
+ # add an extra check: is sync_completed shows the end is reached, assume
+ # there is no recovery.
+ if grep -sq -E '(resync|recovery|reshape) *=' /proc/mdstat
+ then
+ incomplete=`grep / /sys/block/md*/md/sync_completed 2> /dev/null | sed '/^ *\([0-9]*\) \/ \1/d'`
+ [ -n "$incomplete" ] &&
+ die "resync or recovery is happening!"
+ fi
+ ;;
+ wait )
+ p=`cat /proc/sys/dev/raid/speed_limit_max`
+ echo 2000000 > /proc/sys/dev/raid/speed_limit_max
+ sleep 0.1
+ while grep -Eq '(resync|recovery|reshape|check|repair) *=' /proc/mdstat ||
+ grep -v idle > /dev/null /sys/block/md*/md/sync_action
+ do
+ sleep 0.5
+ done
+ echo $p > /proc/sys/dev/raid/speed_limit_max
+ ;;
+ state )
+ grep -sq "blocks.*\[$2\]\$" /proc/mdstat ||
+ die "state $2 not found!"
+ sleep 0.5
+ ;;
+ bitmap )
+ grep -sq bitmap /proc/mdstat ||
+ die "no bitmap"
+ ;;
+ nobitmap )
+ grep -sq "bitmap" /proc/mdstat &&
+ die "bitmap present"
+ ;;
+ readonly )
+ grep -sq "read-only" /proc/mdstat ||
+ die "array is not read-only!"
+ ;;
+ inactive )
+ grep -sq "inactive" /proc/mdstat ||
+ die "array is not inactive!"
+ ;;
+ # It only can be used when there is only one raid
+ chunk )
+ chunk_size=`awk -F',' '/chunk/{print $2}' /proc/mdstat | awk -F'[a-z]' '{print $1}'`
+ if [ "$chunk_size" -ne "$2" ] ; then
+ die "chunksize should be $2, but it's $chunk_size"
+ fi
+ ;;
+ * )
+ die "unknown check $1"
+ ;;
+ esac
+}
+
+no_errors() {
+ if [ -s $targetdir/stderr ]
+ then
+ echo Bad errors from mdadm:
+ cat $targetdir/stderr
+ exit 2
+ fi
+}
+
+# basic device test
+testdev() {
+ [ -b $1 ] || die "$1 isn't a block device."
+ [ "$DEVTYPE" == "disk" ] && return 0
+ udevadm settle
+ dev=$1
+ cnt=$2
+ dvsize=$3
+ chunk=$4
+ if [ -z "$5" ]
+ then
+ mkfs.ext3 -F -j $dev > /dev/null 2>&1 && fsck -fn $dev >&2
+ fi
+ dsize=$[dvsize/chunk]
+ dsize=$[dsize*chunk]
+ rasize=$[dsize*2*cnt]
+ # rasize is in sectors
+ if [ -n "$DEV_ROUND_K" ]
+ then
+ rasize=$[rasize/DEV_ROUND_K/2]
+ rasize=$[rasize*DEV_ROUND_K*2]
+ fi
+ [ `/sbin/blockdev --getsize $dev` -eq 0 ] && sleep 2
+ _sz=`/sbin/blockdev --getsize $dev`
+ [ $rasize -lt $_sz -o $[rasize*4/5] -gt $_sz ] &&
+ die "size is wrong for $dev: $cnt * $dvsize (chunk=$chunk) = $rasize, not $_sz"
+ return 0
+}
+
+rotest() {
+ dev=$1
+ fsck -fn $dev >&2
+}
diff --git a/tests/imsm-grow-template b/tests/imsm-grow-template
new file mode 100644
index 0000000..1a8676e
--- /dev/null
+++ b/tests/imsm-grow-template
@@ -0,0 +1,119 @@
+
+# 0 - POSITIVE test, otherwise NEGATIVE test
+negative_test=$1
+
+# 0 - On-line Capacity Expansion test, otherwise LEVEL migration or CHUNK size migration test
+migration_test=$2
+
+function grow_member() {
+ local member=$1
+ local disks=$2
+ local comps=$3
+ local level=$4
+ local size=$5
+ local offset=$6
+ local chunk=$7
+ local old_chunk=$8
+ local array_size=$((comps * size))
+
+ rm -f $backup_imsm
+ if [ $chunk -eq $old_chunk ]; then
+ ( set -ex; mdadm --grow $member --level=$level )
+ else
+ ( set -ex; mdadm --grow $member --chunk=$chunk )
+ fi
+ local status=$?
+ if [ $negative_test -ne 0 ]; then
+ if [ $status -eq 0 ]; then
+ echo >&2 "**Error**: $member: --grow should failed, but it completed successfuly"
+ exit 1
+ fi
+ return
+ fi
+ check wait
+ sleep 5
+ imsm_check member $member $disks $level $size $array_size $offset $chunk
+ testdev $member $comps $size $chunk
+}
+
+# Create container
+mdadm --create --run $container --auto=md --metadata=imsm --raid-disks=$num_disks $device_list
+check wait
+imsm_check container $num_disks
+
+# Create first volume inside the container
+if [[ ! -z $vol0_chunk ]]; then
+ mdadm --create --run $member0 --auto=md --level=$vol0_level --size=$vol0_comp_size --chunk=$vol0_chunk --raid-disks=$num_disks $device_list
+else
+ mdadm --create --run $member0 --auto=md --level=$vol0_level --size=$vol0_comp_size --raid-disks=$num_disks $device_list
+fi
+check wait
+
+# Create second volume inside the container (if defined)
+if [ ! -z $vol1_level ]; then
+ if [ ! -z $vol1_chunk ]; then
+ mdadm --create --run $member1 --auto=md --level=$vol1_level --size=$vol1_comp_size --chunk=$vol1_chunk --raid-disks=$num_disks $device_list
+ else
+ mdadm --create --run $member1 --auto=md --level=$vol1_level --size=$vol1_comp_size --raid-disks=$num_disks $device_list
+ fi
+ check wait
+fi
+
+# Wait for any RESYNC to complete
+check wait
+
+# Test first volume
+imsm_check member $member0 $num_disks $vol0_level $vol0_comp_size $((vol0_comp_size * vol0_num_comps)) $vol0_offset $vol0_chunk
+testdev $member0 $vol0_num_comps $vol0_comp_size $vol0_chunk
+
+# Test second volume (if defined)
+if [ ! -z $vol1_level ]; then
+ imsm_check member $member1 $num_disks $vol1_level $vol1_comp_size $((vol1_comp_size * vol1_num_comps)) $vol1_offset $vol1_chunk
+ testdev $member1 $vol1_num_comps $vol1_comp_size $vol1_chunk
+fi
+
+# Add extra disks to container if operation requires spares in container.
+for i in $spare_list
+do
+ mdadm --add $container $i
+ check wait
+ num_disks=$((num_disks + 1))
+done
+
+imsm_check container $num_disks
+num_disks=$((num_disks + add_to_num_disks))
+backup_imsm=/tmp/backup_imsm
+
+# Grow each member or a container depending on the type of an operation
+if [ $migration_test -ne 0 ]; then
+ if [ -z $new_num_disks ]; then
+ new_num_disks=$num_disks
+ fi
+ grow_member $member0 $new_num_disks $vol0_new_num_comps $vol0_new_level $vol0_comp_size $vol0_offset $vol0_new_chunk $vol0_chunk
+ if [[ $vol1_new_chunk -ne 0 ]] ; then
+ grow_member $member1 $new_num_disks $vol1_new_num_comps $vol1_new_level $vol1_comp_size $vol1_offset $vol1_new_chunk $vol1_chunk
+ fi
+else
+ rm -f $backup_imsm
+ ( set -x; mdadm --grow $container --raid-disks=$num_disks )
+ grow_status=$?
+ if [ $negative_test -ne 0 ]; then
+ if [ $grow_status -eq 0 ]; then
+ echo >&2 "**Error**: $container: --grow should failed, but it completed successfuly"
+ exit 1
+ fi
+ else
+ sleep 5
+ check wait
+ sleep 5
+ check wait
+ imsm_check member $member0 $num_disks $vol0_level $vol0_comp_size $((vol0_comp_size * vol0_new_num_comps)) $vol0_offset $vol0_chunk
+ testdev $member0 $vol0_new_num_comps $vol0_comp_size $vol0_chunk
+ if [ $vol1_new_num_comps -ne 0 ]; then
+ imsm_check member $member1 $num_disks $vol1_level $vol1_comp_size $((vol1_comp_size * vol1_new_num_comps)) $vol1_offset $vol1_chunk
+ testdev $member1 $vol1_new_num_comps $vol1_comp_size $vol1_chunk
+ fi
+ fi
+fi
+
+exit 0
diff --git a/tests/templates/names_template b/tests/templates/names_template
new file mode 100644
index 0000000..1b6cd14
--- /dev/null
+++ b/tests/templates/names_template
@@ -0,0 +1,75 @@
+# NAME is optional. Testing with native 1.2 superblock.
+function names_create() {
+ local DEVNAME=$1
+ local NAME=$2
+ local NEG_TEST=$3
+
+ if [[ -z "$NAME" ]]; then
+ mdadm -CR "$DEVNAME" -l0 -n 1 $dev0 --force
+ else
+ mdadm -CR "$DEVNAME" --name="$NAME" --metadata=1.2 -l0 -n 1 $dev0 --force
+ fi
+
+ if [[ "$NEG_TEST" == "true" ]]; then
+ [[ "$?" == "0" ]] && return 0
+ echo "Negative verification failed"
+ exit 1
+ fi
+
+ if [[ "$?" != "0" ]]; then
+ echo "Cannot create device."
+ exit 1
+ fi
+}
+
+# Three properties to check:
+# - devnode name
+# - link in /dev/md/ (MD_DEVNAME property from --detail --export)
+# - name in metadata (MD_NAME property from --detail --export)- that works only with 1.2 sb.
+function names_verify() {
+ local DEVNODE_NAME="$1"
+ local WANTED_LINK="$2"
+ local WANTED_NAME="$3"
+
+ local RES="$(mdadm -D --export $DEVNODE_NAME | grep MD_DEVNAME)"
+ if [[ "$?" != "0" ]]; then
+ echo "Cannot get details for $DEVNODE_NAME - unexpected devnode."
+ exit 1
+ fi
+
+ if [[ "$WANTED_LINK" != "empty" ]]; then
+ local EXPECTED="MD_DEVNAME=$WANTED_LINK"
+ fi
+
+ if [[ "$RES" != "$EXPECTED" ]]; then
+ echo "$RES doesn't match $EXPECTED."
+ exit 1
+ fi
+
+ local RES="$(mdadm -D --export $DEVNODE_NAME | grep MD_NAME)"
+ if [[ "$?" != "0" ]]; then
+ echo "Cannot get metadata from $dev0."
+ exit 1
+ fi
+
+ local EXPECTED="MD_NAME=$(hostname):$WANTED_NAME"
+ if [[ "$RES" != "$EXPECTED" ]]; then
+ echo "$RES doesn't match $EXPECTED."
+ exit 1
+ fi
+}
+
+# Generate ARRAYLINE for tested array.
+names_make_conf() {
+ local UUID="$1"
+ local WANTED_DEVNAME="$2"
+ local CONF="$3"
+
+ local LINE="ARRAY metadata=1.2 UUID=$UUID"
+
+ if [[ "$WANTED_DEVNAME" != "empty" ]]; then
+ LINE="$LINE $WANTED_DEVNAME"
+ fi
+
+ echo $LINE > $CONF
+}
diff --git a/tests/utils b/tests/utils
new file mode 100644
index 0000000..3acebd7
--- /dev/null
+++ b/tests/utils
@@ -0,0 +1,191 @@
+# set of functions used to test policy framework with assemble, incremental and Monitor
+
+set +e
+#create links to be able to use domains
+for d in 0 1 2 3 4 5 6 7 8 9 10 11 12
+do
+ eval ln -s \$dev$d /dev/disk/by-path/loop$d
+ eval d$d="loop$d"
+ eval mdadm --zero-superblock \$dev$d
+done
+
+devices="/dev/loop[0-9] /dev/loop10 /dev/loop11 /dev/loop12"
+
+# on failure print out few things before exit
+# uses testdsc and platform global variables
+err(){
+ echo >&2 "ERROR: $*"
+ cat $config >&2 || true
+ cat /proc/mdstat >&2
+ [ -z "$testdsc" ] || { echo >&2 $platform: $testdsc "- failed"; }
+ ps -e | grep mdadm >&2 || true
+ if [ $listfailed == "yes" ]; then
+ [ "$verbose" != "yes" ] || echo ---FAILED---
+ flist="$flist \n $platform $testdsc"
+ failed=1
+ else
+ exit 1
+ fi
+}
+
+# set test description
+dsc(){
+ failed=0
+ testdsc="$*"
+ [ "$verbose" != "yes" ] || echo $testdsc
+}
+
+killmonitor(){
+ [ -z "$monitorpid" ] || { kill -9 $monitorpid; unset monitorpid; }
+}
+
+tidyup(){
+ killmonitor
+ mdadm -Ss || true
+ mdadm -Ss
+ mdadm --zero-superblock $devices || true
+ udevadm settle
+ rm -f $config
+}
+
+trap tidyup 0 1 2 3 15
+
+# create a RAID 1 array or container and subarray(s) on 2 disks
+# if platform not specified imsm is used
+# if subsize is given, first subarray is created with given size and second one on remaining space
+ccv(){
+ # mddevno used to name created array
+ local mddevno="$1"
+ # numbers of devices to be used in array
+ local devno1="$2"
+ local devno2="$3"
+ local platform="$4"
+ local subsize="$5"
+ local onearray="$6"
+ [ -n "$platform" ] || platform="imsm"
+ if [ "$platform" == "imsm" ] || [ "$platform" == "ddf" ]; then
+ eval mdadm -CR /dev/md/con$mddevno -e $platform -n 2 \$dev$devno1 \$dev$devno2
+ udevadm settle
+ [ -z "$subsize" ] || eval mdadm -CR sub$mddevno"_" -l 1 -n 2 /dev/md/con$mddevno -z $subsize
+ [ -n "$onearray" ] || eval mdadm -CR sub$mddevno -l 1 -n 2 /dev/md/con$mddevno
+ else
+ [ -z "$subsize" ] || sizepar="-z $subsize"
+ eval mdadm -CR arr$mddevno -e $platform -l 1 -n 2 \$dev$devno1 \$dev$devno2 $sizepar
+ unset sizepar
+ fi
+}
+
+# get container and subarray using given device from mdstat
+# sets global variables c and v
+getarray(){
+ local devname=`basename $1`
+ local platformtype=`grep -A 1 $devname /proc/mdstat | awk '/active/ {getline; print $4 }' | awk -F ":" 'END {print $1}'`
+ c=`grep "inactive.*$devname" /proc/mdstat | awk -F " " '{print $1}'`
+ v=`grep " active.*$devname" /proc/mdstat | awk -F " " '{print $1}'`
+ [ "$platformtype" == "external" ] || c=$v
+}
+
+# check if given device belongs to any container and subarray
+# if $2 given then only container checked
+chkarray(){
+ local devname="$1"
+ local subcheck="$2"
+ getarray $devname
+ [ -n "$c" ] || err "$devname not in any container"
+ [ -n "$subcheck" ] || [ -n "$v" ] || err " $devname not in subarray"
+}
+
+# test if two devices in the same container/subarray
+# $1 $2 - devices
+# $3 don't check subarrays, only containers
+tst(){
+ local device1=`basename $1`
+ local device2=`basename $2`
+ local subcheck="$3"
+ chkarray $device1 $subcheck
+ local x="$c"
+ local y="$v"
+ chkarray $device2 $subcheck
+ [ "$c" == "$x" ] || err "$device1 and $device2 not in the same container"
+ [ -n "$subcheck" ] || [ "$v" == "$y" ] || err "$device1 and $device2 not in the same subarray"
+}
+
+# same as tst, just use numbers of devices instead of names as parameters
+dtst(){
+ local devno1="$1"
+ local devno2="$2"
+ local subcheck="$3"
+ eval tst \$dev$devno1 \$dev$devno2 $subcheck
+}
+
+# create containers/subarrays, check if created properly,
+# set global variables c$mddevno v$mddevno, usually c0=md127, v0=md126 , etc.
+setupdevs(){
+ local mddevno="$1"
+ local devno1="$2"
+ local devno2="$3"
+ local p="$4"
+ local subsize="$5"
+ local onearray="$6"
+ [ -n "$p" ] || p=$platform
+ ccv $mddevno $devno1 $devno2 $p $subsize $onearray
+ dtst $devno1 $devno2
+ eval c$mddevno=\"$c\"
+ eval v$mddevno=\"$v\"
+}
+
+# check if given spare in container
+# usage: chkspare container spare [n] (n if spare shouldn't be in container)
+chkspare(){
+ local container=`basename $1`
+ local spare=$2
+ local expected=$3
+ getarray $spare
+ [ -n "$expected" ] || expected="y"
+ if [ "$expected" == "y" ]; then
+ [ "$c" == "$container" ] || err "$spare not in container $container"
+ else
+ [ "$c" != "$container" ] || err "$spare in container $container"
+ fi
+}
+
+#check if spare was moved from one container to another
+# args: from_container to_container spare [yn]
+# n when spare should remain in original container
+chksparemoved(){
+ sleep $sleeptime
+ from_container="$1"
+ to_container="$2"
+ spare="$3"
+ expected="$4"
+ [ -n "$expected" ] || expected="y"
+ notexpected="n"; [ "$expected" == "y" ] || notexpected="y"
+ chkspare $from_container $spare $notexpected
+ [ $failed -eq 1 ] || chkspare $to_container $spare $expected
+}
+
+
+# for domains defined through policy
+createconfig(){
+if [ "$1" != "a" ]; then
+{
+ domain=$1
+ metadata=$2
+ action=$3
+ while [ -n "$4" ]; do
+ echo="policy domain=$domain"
+ [ "$metadata" == "noplatform" ] || echo="$echo metadata=$metadata"
+ echo="$echo path=loop$4"
+ echo="$echo action=$action"
+ echo "$echo"
+ shift
+ done
+} >> $config
+else
+{
+ echo "DEVICES $devlist /dev/md1*"
+ mdadm -Ebs
+} > $config
+fi
+#[ "$verbose" != "yes" ] || cat $config | grep policy || true
+}