summaryrefslogtreecommitdiffstats
path: root/src/VBox/ValidationKit/tests/storage
diff options
context:
space:
mode:
Diffstat (limited to 'src/VBox/ValidationKit/tests/storage')
-rw-r--r--src/VBox/ValidationKit/tests/storage/Makefile.kmk45
-rwxr-xr-xsrc/VBox/ValidationKit/tests/storage/remoteexecutor.py277
-rwxr-xr-xsrc/VBox/ValidationKit/tests/storage/storagecfg.py608
-rwxr-xr-xsrc/VBox/ValidationKit/tests/storage/tdStorageBenchmark1.py1350
-rwxr-xr-xsrc/VBox/ValidationKit/tests/storage/tdStorageSnapshotMerging1.py423
-rwxr-xr-xsrc/VBox/ValidationKit/tests/storage/tdStorageStress1.py517
6 files changed, 3220 insertions, 0 deletions
diff --git a/src/VBox/ValidationKit/tests/storage/Makefile.kmk b/src/VBox/ValidationKit/tests/storage/Makefile.kmk
new file mode 100644
index 00000000..92c3fef8
--- /dev/null
+++ b/src/VBox/ValidationKit/tests/storage/Makefile.kmk
@@ -0,0 +1,45 @@
+# $Id: Makefile.kmk $
+## @file
+# VirtualBox Validation Kit - Storage Tests.
+#
+
+#
+# Copyright (C) 2012-2019 Oracle Corporation
+#
+# This file is part of VirtualBox Open Source Edition (OSE), as
+# available from http://www.virtualbox.org. This file is free software;
+# you can redistribute it and/or modify it under the terms of the GNU
+# General Public License (GPL) as published by the Free Software
+# Foundation, in version 2 as it comes in the "COPYING" file of the
+# VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+# hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+#
+# The contents of this file may alternatively be used under the terms
+# of the Common Development and Distribution License Version 1.0
+# (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+# VirtualBox OSE distribution, in which case the provisions of the
+# CDDL are applicable instead of those of the GPL.
+#
+# You may elect to license modified versions of this file under the
+# terms and conditions of either the GPL or the CDDL or both.
+#
+
+SUB_DEPTH = ../../../../..
+include $(KBUILD_PATH)/subheader.kmk
+
+
+INSTALLS += ValidationKitTestsStorage
+ValidationKitTestsStorage_TEMPLATE = VBoxValidationKitR3
+ValidationKitTestsStorage_INST = $(INST_VALIDATIONKIT)tests/storage/
+ValidationKitTestsStorage_EXEC_SOURCES := \
+ $(PATH_SUB_CURRENT)/tdStorageBenchmark1.py \
+ $(PATH_SUB_CURRENT)/tdStorageSnapshotMerging1.py \
+ $(PATH_SUB_CURRENT)/tdStorageStress1.py \
+ $(PATH_SUB_CURRENT)/remoteexecutor.py \
+ $(PATH_SUB_CURRENT)/storagecfg.py
+
+VBOX_VALIDATIONKIT_PYTHON_SOURCES += $(ValidationKitTestsStorage_EXEC_SOURCES)
+
+$(evalcall def_vbox_validationkit_process_python_sources)
+include $(FILE_KBUILD_SUB_FOOTER)
+
diff --git a/src/VBox/ValidationKit/tests/storage/remoteexecutor.py b/src/VBox/ValidationKit/tests/storage/remoteexecutor.py
new file mode 100755
index 00000000..e35c2fc7
--- /dev/null
+++ b/src/VBox/ValidationKit/tests/storage/remoteexecutor.py
@@ -0,0 +1,277 @@
+# -*- coding: utf-8 -*-
+# $Id: remoteexecutor.py $
+
+"""
+VirtualBox Validation Kit - Storage benchmark, test execution helpers.
+"""
+
+__copyright__ = \
+"""
+Copyright (C) 2016-2019 Oracle Corporation
+
+This file is part of VirtualBox Open Source Edition (OSE), as
+available from http://www.virtualbox.org. This file is free software;
+you can redistribute it and/or modify it under the terms of the GNU
+General Public License (GPL) as published by the Free Software
+Foundation, in version 2 as it comes in the "COPYING" file of the
+VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+
+The contents of this file may alternatively be used under the terms
+of the Common Development and Distribution License Version 1.0
+(CDDL) only, as it comes in the "COPYING.CDDL" file of the
+VirtualBox OSE distribution, in which case the provisions of the
+CDDL are applicable instead of those of the GPL.
+
+You may elect to license modified versions of this file under the
+terms and conditions of either the GPL or the CDDL or both.
+"""
+__version__ = "$Revision: 127855 $"
+
+
+# Standard Python imports.
+import array;
+import os;
+import shutil;
+import sys;
+if sys.version_info[0] >= 3:
+ from io import StringIO as StringIO; # pylint: disable=import-error,no-name-in-module
+else:
+ from StringIO import StringIO as StringIO; # pylint: disable=import-error,no-name-in-module
+import subprocess;
+
+# Validation Kit imports.
+from common import utils;
+from testdriver import reporter;
+
+
+
+class StdInOutBuffer(object):
+ """ Standard input output buffer """
+
+ def __init__(self, sInput = None):
+ self.sInput = StringIO();
+ if sInput is not None:
+ self.sInput.write(self._toString(sInput));
+ self.sInput.seek(0);
+ self.sOutput = '';
+
+ def _toString(self, sText):
+ """
+ Converts any possible array to
+ a string.
+ """
+ if isinstance(sText, array.array):
+ try:
+ return sText.tostring();
+ except:
+ pass;
+ else:
+ return sText;
+
+ def read(self, cb):
+ """file.read"""
+ return self.sInput.read(cb);
+
+ def write(self, sText):
+ """file.write"""
+ self.sOutput += self._toString(sText);
+ return None;
+
+ def getOutput(self):
+ """
+ Returns the output of the buffer.
+ """
+ return self.sOutput;
+
+ def close(self):
+ """ file.close """
+ return;
+
+class RemoteExecutor(object):
+ """
+ Helper for executing tests remotely through TXS or locally
+ """
+
+ def __init__(self, oTxsSession = None, asBinaryPaths = None, sScratchPath = None):
+ self.oTxsSession = oTxsSession;
+ self.asPaths = asBinaryPaths;
+ self.sScratchPath = sScratchPath;
+ if self.asPaths is None:
+ self.asPaths = [ ];
+
+ def _isFile(self, sFile):
+ """
+ Checks whether a file exists.
+ """
+ if self.oTxsSession is not None:
+ return self.oTxsSession.syncIsFile(sFile);
+ return os.path.isfile(sFile);
+
+ def _getBinaryPath(self, sBinary):
+ """
+ Returns the complete path of the given binary if found
+ from the configured search path or None if not found.
+ """
+ for sPath in self.asPaths:
+ sFile = sPath + '/' + sBinary;
+ if self._isFile(sFile):
+ return sFile;
+ return None;
+
+ def _sudoExecuteSync(self, asArgs, sInput):
+ """
+ Executes a sudo child process synchronously.
+ Returns a tuple [True, 0] if the process executed successfully
+ and returned 0, otherwise [False, rc] is returned.
+ """
+ reporter.log('Executing [sudo]: %s' % (asArgs, ));
+ reporter.flushall();
+ fRc = True;
+ sOutput = '';
+ sError = '';
+ try:
+ oProcess = utils.sudoProcessPopen(asArgs, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
+ stderr=subprocess.PIPE, shell = False, close_fds = False);
+
+ sOutput, sError = oProcess.communicate(sInput);
+ iExitCode = oProcess.poll();
+
+ if iExitCode is not 0:
+ fRc = False;
+ except:
+ reporter.errorXcpt();
+ fRc = False;
+ reporter.log('Exit code [sudo]: %s (%s)' % (fRc, asArgs));
+ return (fRc, str(sOutput), str(sError));
+
+ def _execLocallyOrThroughTxs(self, sExec, asArgs, sInput, cMsTimeout):
+ """
+ Executes the given program locally or through TXS based on the
+ current config.
+ """
+ fRc = False;
+ sOutput = None;
+ if self.oTxsSession is not None:
+ reporter.log('Executing [remote]: %s %s %s' % (sExec, asArgs, sInput));
+ reporter.flushall();
+ oStdOut = StdInOutBuffer();
+ oStdErr = StdInOutBuffer();
+ oStdIn = None;
+ if sInput is not None:
+ oStdIn = StdInOutBuffer(sInput);
+ else:
+ oStdIn = '/dev/null'; # pylint: disable=R0204
+ fRc = self.oTxsSession.syncExecEx(sExec, (sExec,) + asArgs,
+ oStdIn = oStdIn, oStdOut = oStdOut,
+ oStdErr = oStdErr, cMsTimeout = cMsTimeout);
+ sOutput = oStdOut.getOutput();
+ sError = oStdErr.getOutput();
+ if fRc is False:
+ reporter.log('Exit code [remote]: %s (stdout: %s stderr: %s)' % (fRc, sOutput, sError));
+ else:
+ reporter.log('Exit code [remote]: %s' % (fRc,));
+ else:
+ fRc, sOutput, sError = self._sudoExecuteSync([sExec, ] + list(asArgs), sInput);
+ return (fRc, sOutput, sError);
+
+ def execBinary(self, sExec, asArgs, sInput = None, cMsTimeout = 3600000):
+ """
+ Executes the given binary with the given arguments
+ providing some optional input through stdin and
+ returning whether the process exited successfully and the output
+ in a string.
+ """
+
+ fRc = True;
+ sOutput = None;
+ sError = None;
+ sBinary = self._getBinaryPath(sExec);
+ if sBinary is not None:
+ fRc, sOutput, sError = self._execLocallyOrThroughTxs(sBinary, asArgs, sInput, cMsTimeout);
+ else:
+ fRc = False;
+ return (fRc, sOutput, sError);
+
+ def execBinaryNoStdOut(self, sExec, asArgs, sInput = None):
+ """
+ Executes the given binary with the given arguments
+ providing some optional input through stdin and
+ returning whether the process exited successfully.
+ """
+ fRc, _, _ = self.execBinary(sExec, asArgs, sInput);
+ return fRc;
+
+ def copyFile(self, sLocalFile, sFilename, cMsTimeout = 30000):
+ """
+ Copies the local file to the remote destination
+ if configured
+
+ Returns a file ID which can be used as an input parameter
+ to execBinary() resolving to the real filepath on the remote side
+ or locally.
+ """
+ sFileId = None;
+ if self.oTxsSession is not None:
+ sFileId = '${SCRATCH}/' + sFilename;
+ fRc = self.oTxsSession.syncUploadFile(sLocalFile, sFileId, cMsTimeout);
+ if not fRc:
+ sFileId = None;
+ else:
+ sFileId = self.sScratchPath + '/' + sFilename;
+ try:
+ shutil.copy(sLocalFile, sFileId);
+ except:
+ sFileId = None;
+
+ return sFileId;
+
+ def copyString(self, sContent, sFilename, cMsTimeout = 30000):
+ """
+ Creates a file remotely or locally with the given content.
+
+ Returns a file ID which can be used as an input parameter
+ to execBinary() resolving to the real filepath on the remote side
+ or locally.
+ """
+ sFileId = None;
+ if self.oTxsSession is not None:
+ sFileId = '${SCRATCH}/' + sFilename;
+ fRc = self.oTxsSession.syncUploadString(sContent, sFileId, cMsTimeout);
+ if not fRc:
+ sFileId = None;
+ else:
+ sFileId = self.sScratchPath + '/' + sFilename;
+ try:
+ oFile = open(sFileId, 'wb');
+ oFile.write(sContent);
+ oFile.close();
+ except:
+ sFileId = None;
+
+ return sFileId;
+
+ def mkDir(self, sDir, fMode = 0o700, cMsTimeout = 30000):
+ """
+ Creates a new directory at the given location.
+ """
+ fRc = True;
+ if self.oTxsSession is not None:
+ fRc = self.oTxsSession.syncMkDir(sDir, fMode, cMsTimeout);
+ else:
+ fRc = self.execBinaryNoStdOut('mkdir', ('-m', format(fMode, 'o'), sDir));
+
+ return fRc;
+
+ def rmDir(self, sDir, cMsTimeout = 30000):
+ """
+ Removes the given directory.
+ """
+ fRc = True;
+ if self.oTxsSession is not None:
+ fRc = self.oTxsSession.syncRmDir(sDir, cMsTimeout);
+ else:
+ fRc = self.execBinaryNoStdOut('rmdir', (sDir,));
+
+ return fRc;
+
diff --git a/src/VBox/ValidationKit/tests/storage/storagecfg.py b/src/VBox/ValidationKit/tests/storage/storagecfg.py
new file mode 100755
index 00000000..e97f57b0
--- /dev/null
+++ b/src/VBox/ValidationKit/tests/storage/storagecfg.py
@@ -0,0 +1,608 @@
+# -*- coding: utf-8 -*-
+# $Id: storagecfg.py $
+
+"""
+VirtualBox Validation Kit - Storage test configuration API.
+"""
+
+__copyright__ = \
+"""
+Copyright (C) 2016-2019 Oracle Corporation
+
+This file is part of VirtualBox Open Source Edition (OSE), as
+available from http://www.virtualbox.org. This file is free software;
+you can redistribute it and/or modify it under the terms of the GNU
+General Public License (GPL) as published by the Free Software
+Foundation, in version 2 as it comes in the "COPYING" file of the
+VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+
+The contents of this file may alternatively be used under the terms
+of the Common Development and Distribution License Version 1.0
+(CDDL) only, as it comes in the "COPYING.CDDL" file of the
+VirtualBox OSE distribution, in which case the provisions of the
+CDDL are applicable instead of those of the GPL.
+
+You may elect to license modified versions of this file under the
+terms and conditions of either the GPL or the CDDL or both.
+"""
+__version__ = "$Revision: 127855 $"
+
+# Standard Python imports.
+import os;
+import re;
+
+# Validation Kit imports.
+from common import utils;
+
+
+class StorageDisk(object):
+ """
+ Class representing a disk for testing.
+ """
+
+ def __init__(self, sPath, fRamDisk = False):
+ self.sPath = sPath;
+ self.fUsed = False;
+ self.fRamDisk = fRamDisk;
+
+ def getPath(self):
+ """
+ Return the disk path.
+ """
+ return self.sPath;
+
+ def isUsed(self):
+ """
+ Returns whether the disk is currently in use.
+ """
+ return self.fUsed;
+
+ def isRamDisk(self):
+ """
+ Returns whether the disk objecthas a RAM backing.
+ """
+ return self.fRamDisk;
+
+ def setUsed(self, fUsed):
+ """
+ Sets the used flag for the disk.
+ """
+ if fUsed:
+ if self.fUsed:
+ return False;
+
+ self.fUsed = True;
+ else:
+ self.fUsed = fUsed;
+
+ return True;
+
+class StorageConfigOs(object):
+ """
+ Base class for a single hosts OS storage configuration.
+ """
+
+ def _getDisksMatchingRegExpWithPath(self, sPath, sRegExp):
+ """
+ Adds new disks to the config matching the given regular expression.
+ """
+
+ lstDisks = [];
+ oRegExp = re.compile(sRegExp);
+ asFiles = os.listdir(sPath);
+ for sFile in asFiles:
+ if oRegExp.match(os.path.basename(sFile)) and os.path.exists(sPath + '/' + sFile):
+ lstDisks.append(StorageDisk(sPath + '/' + sFile));
+
+ return lstDisks;
+
+class StorageConfigOsSolaris(StorageConfigOs):
+ """
+ Class implementing the Solaris specifics for a storage configuration.
+ """
+
+ def __init__(self):
+ StorageConfigOs.__init__(self);
+ self.idxRamDisk = 0;
+
+ def _getActivePoolsStartingWith(self, oExec, sPoolIdStart):
+ """
+ Returns a list of pools starting with the given ID or None on failure.
+ """
+ lstPools = None;
+ fRc, sOutput, _ = oExec.execBinary('zpool', ('list', '-H'));
+ if fRc:
+ lstPools = [];
+ asPools = sOutput.splitlines();
+ for sPool in asPools:
+ if sPool.startswith(sPoolIdStart):
+ # Extract the whole name and add it to the list.
+ asItems = sPool.split('\t');
+ lstPools.append(asItems[0]);
+ return lstPools;
+
+ def _getActiveVolumesInPoolStartingWith(self, oExec, sPool, sVolumeIdStart):
+ """
+ Returns a list of active volumes for the given pool starting with the given
+ identifier or None on failure.
+ """
+ lstVolumes = None;
+ fRc, sOutput, _ = oExec.execBinary('zfs', ('list', '-H'));
+ if fRc:
+ lstVolumes = [];
+ asVolumes = sOutput.splitlines();
+ for sVolume in asVolumes:
+ if sVolume.startswith(sPool + '/' + sVolumeIdStart):
+ # Extract the whole name and add it to the list.
+ asItems = sVolume.split('\t');
+ lstVolumes.append(asItems[0]);
+ return lstVolumes;
+
+ def getDisksMatchingRegExp(self, sRegExp):
+ """
+ Returns a list of disks matching the regular expression.
+ """
+ return self._getDisksMatchingRegExpWithPath('/dev/dsk', sRegExp);
+
+ def getMntBase(self):
+ """
+ Returns the mountpoint base for the host.
+ """
+ return '/pools';
+
+ def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
+ """
+ Creates a new storage pool with the given disks and the given RAID level.
+ """
+ sZPoolRaid = None;
+ if len(asDisks) > 1 and (sRaidLvl == 'raid5' or sRaidLvl is None):
+ sZPoolRaid = 'raidz';
+
+ fRc = True;
+ if sZPoolRaid is not None:
+ fRc = oExec.execBinaryNoStdOut('zpool', ('create', '-f', sPool, sZPoolRaid,) + tuple(asDisks));
+ else:
+ fRc = oExec.execBinaryNoStdOut('zpool', ('create', '-f', sPool,) + tuple(asDisks));
+
+ return fRc;
+
+ def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
+ """
+ Creates and mounts a filesystem at the given mountpoint using the
+ given pool and volume IDs.
+ """
+ fRc = True;
+ if cbVol is not None:
+ fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, '-V', cbVol, sPool + '/' + sVol));
+ else:
+ fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, sPool + '/' + sVol));
+
+ return fRc;
+
+ def destroyVolume(self, oExec, sPool, sVol):
+ """
+ Destroys the given volume.
+ """
+ fRc = oExec.execBinaryNoStdOut('zfs', ('destroy', sPool + '/' + sVol));
+ return fRc;
+
+ def destroyPool(self, oExec, sPool):
+ """
+ Destroys the given storage pool.
+ """
+ fRc = oExec.execBinaryNoStdOut('zpool', ('destroy', sPool));
+ return fRc;
+
+ def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
+ """
+ Cleans up any pools and volumes starting with the name in the given
+ parameters.
+ """
+ fRc = True;
+ lstPools = self._getActivePoolsStartingWith(oExec, sPoolIdStart);
+ if lstPools is not None:
+ for sPool in lstPools:
+ lstVolumes = self._getActiveVolumesInPoolStartingWith(oExec, sPool, sVolIdStart);
+ if lstVolumes is not None:
+ # Destroy all the volumes first
+ for sVolume in lstVolumes:
+ fRc2 = oExec.execBinaryNoStdOut('zfs', ('destroy', sVolume));
+ if not fRc2:
+ fRc = fRc2;
+
+ # Destroy the pool
+ fRc2 = self.destroyPool(oExec, sPool);
+ if not fRc2:
+ fRc = fRc2;
+ else:
+ fRc = False;
+ else:
+ fRc = False;
+
+ return fRc;
+
+ def createRamDisk(self, oExec, cbRamDisk):
+ """
+ Creates a RAM backed disk with the given size.
+ """
+ oDisk = None;
+ sRamDiskName = 'ramdisk%u' % (self.idxRamDisk,);
+ fRc, _ , _ = oExec.execBinary('ramdiskadm', ('-a', sRamDiskName, str(cbRamDisk)));
+ if fRc:
+ self.idxRamDisk += 1;
+ oDisk = StorageDisk('/dev/ramdisk/%s' % (sRamDiskName, ), True);
+
+ return oDisk;
+
+ def destroyRamDisk(self, oExec, oDisk):
+ """
+ Destroys the given ramdisk object.
+ """
+ sRamDiskName = os.path.basename(oDisk.getPath());
+ return oExec.execBinaryNoStdOut('ramdiskadm', ('-d', sRamDiskName));
+
+class StorageConfigOsLinux(StorageConfigOs):
+ """
+ Class implementing the Linux specifics for a storage configuration.
+ """
+
+ def __init__(self):
+ StorageConfigOs.__init__(self);
+ self.dSimplePools = { }; # Simple storage pools which don't use lvm (just one partition)
+ self.dMounts = { }; # Pool/Volume to mountpoint mapping.
+
+ def _getDmRaidLevelFromLvl(self, sRaidLvl):
+ """
+ Converts our raid level indicators to something mdadm can understand.
+ """
+ if sRaidLvl == 'raid5':
+ return '5';
+ elif sRaidLvl == 'raid1':
+ return 'mirror';
+ elif sRaidLvl == 'raid0' or sRaidLvl is None:
+ return 'stripe';
+
+ return 'stripe';
+
+ def getDisksMatchingRegExp(self, sRegExp):
+ """
+ Returns a list of disks matching the regular expression.
+ """
+ return self._getDisksMatchingRegExpWithPath('/dev/', sRegExp);
+
+ def getMntBase(self):
+ """
+ Returns the mountpoint base for the host.
+ """
+ return '/mnt';
+
+ def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
+ """
+ Creates a new storage pool with the given disks and the given RAID level.
+ """
+ fRc = True;
+ if len(asDisks) == 1 and sRaidLvl is None:
+ # Doesn't require LVM, put into the simple pools dictionary so we can
+ # use it when creating a volume later.
+ self.dSimplePools[sPool] = asDisks[0];
+ else:
+ # If a RAID is required use dm-raid first to create one.
+ asLvmPvDisks = asDisks;
+ fRc = oExec.execBinaryNoStdOut('mdadm', ('--create', '/dev/md0', '--assume-clean',
+ '--level=' + self._getDmRaidLevelFromLvl(sRaidLvl),
+ '--raid-devices=' + str(len(asDisks))) + tuple(asDisks));
+ if fRc:
+ # /dev/md0 is the only block device to use for our volume group.
+ asLvmPvDisks = [ '/dev/md0' ];
+
+ # Create a physical volume on every disk first.
+ for sLvmPvDisk in asLvmPvDisks:
+ fRc = oExec.execBinaryNoStdOut('pvcreate', (sLvmPvDisk, ));
+ if not fRc:
+ break;
+
+ if fRc:
+ # Create volume group with all physical volumes included
+ fRc = oExec.execBinaryNoStdOut('vgcreate', (sPool, ) + tuple(asLvmPvDisks));
+ return fRc;
+
+ def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
+ """
+ Creates and mounts a filesystem at the given mountpoint using the
+ given pool and volume IDs.
+ """
+ fRc = True;
+ sBlkDev = None;
+ if sPool in self.dSimplePools:
+ sDiskPath = self.dSimplePools.get(sPool);
+ if sDiskPath.find('zram') != -1:
+ sBlkDev = sDiskPath;
+ else:
+ # Create a partition with the requested size
+ sFdiskScript = ';\n'; # Single partition filling everything
+ if cbVol is not None:
+ sFdiskScript = ',' + str(cbVol // 512) + '\n'; # Get number of sectors
+ fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', sDiskPath), \
+ sFdiskScript);
+ if fRc:
+ if sDiskPath.find('nvme') != -1:
+ sBlkDev = sDiskPath + 'p1';
+ else:
+ sBlkDev = sDiskPath + '1';
+ else:
+ if cbVol is None:
+ fRc = oExec.execBinaryNoStdOut('lvcreate', ('-l', '100%FREE', '-n', sVol, sPool));
+ else:
+ fRc = oExec.execBinaryNoStdOut('lvcreate', ('-L', str(cbVol), '-n', sVol, sPool));
+ if fRc:
+ sBlkDev = '/dev/mapper' + sPool + '-' + sVol;
+
+ if fRc is True and sBlkDev is not None:
+ # Create a filesystem and mount it
+ fRc = oExec.execBinaryNoStdOut('mkfs.ext4', ('-F', '-F', sBlkDev,));
+ fRc = fRc and oExec.mkDir(sMountPoint);
+ fRc = fRc and oExec.execBinaryNoStdOut('mount', (sBlkDev, sMountPoint));
+ if fRc:
+ self.dMounts[sPool + '/' + sVol] = sMountPoint;
+ return fRc;
+
+ def destroyVolume(self, oExec, sPool, sVol):
+ """
+ Destroys the given volume.
+ """
+ # Unmount first
+ sMountPoint = self.dMounts[sPool + '/' + sVol];
+ fRc = oExec.execBinaryNoStdOut('umount', (sMountPoint,));
+ self.dMounts.pop(sPool + '/' + sVol);
+ oExec.rmDir(sMountPoint);
+ if sPool in self.dSimplePools:
+ # Wipe partition table
+ sDiskPath = self.dSimplePools.get(sPool);
+ if sDiskPath.find('zram') == -1:
+ fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', '--delete', \
+ sDiskPath));
+ else:
+ fRc = oExec.execBinaryNoStdOut('lvremove', (sPool + '/' + sVol,));
+ return fRc;
+
+ def destroyPool(self, oExec, sPool):
+ """
+ Destroys the given storage pool.
+ """
+ fRc = True;
+ if sPool in self.dSimplePools:
+ self.dSimplePools.pop(sPool);
+ else:
+ fRc = oExec.execBinaryNoStdOut('vgremove', (sPool,));
+ return fRc;
+
+ def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
+ """
+ Cleans up any pools and volumes starting with the name in the given
+ parameters.
+ """
+ # @todo: Needs implementation, for LVM based configs a similar approach can be used
+ # as for Solaris.
+ _ = oExec;
+ _ = sPoolIdStart;
+ _ = sVolIdStart;
+ return True;
+
+ def createRamDisk(self, oExec, cbRamDisk):
+ """
+ Creates a RAM backed disk with the given size.
+ """
+ # Make sure the ZRAM module is loaded.
+ oDisk = None;
+ fRc = oExec.execBinaryNoStdOut('modprobe', ('zram',));
+ if fRc:
+ fRc, sOut, _ = oExec.execBinary('zramctl', ('--raw', '-f', '-s', str(cbRamDisk)));
+ if fRc:
+ oDisk = StorageDisk(sOut.rstrip(), True);
+
+ return oDisk;
+
+ def destroyRamDisk(self, oExec, oDisk):
+ """
+ Destroys the given ramdisk object.
+ """
+ return oExec.execBinaryNoStdOut('zramctl', ('-r', oDisk.getPath()));
+
+class StorageCfg(object):
+ """
+ Storage configuration helper class taking care of the different host OS.
+ """
+
+ def __init__(self, oExec, sTargetOs, oDiskCfg):
+ self.oExec = oExec;
+ self.lstDisks = [ ]; # List of disks present in the system.
+ self.dPools = { }; # Dictionary of storage pools.
+ self.dVols = { }; # Dictionary of volumes.
+ self.iPoolId = 0;
+ self.iVolId = 0;
+
+ fRc = True;
+ oStorOs = None;
+ if sTargetOs == 'solaris':
+ oStorOs = StorageConfigOsSolaris();
+ elif sTargetOs == 'linux':
+ oStorOs = StorageConfigOsLinux(); # pylint: disable=R0204
+ else:
+ fRc = False;
+
+ if fRc:
+ self.oStorOs = oStorOs;
+ if utils.isString(oDiskCfg):
+ self.lstDisks = oStorOs.getDisksMatchingRegExp(oDiskCfg);
+ else:
+ # Assume a list of of disks and add.
+ for sDisk in oDiskCfg:
+ self.lstDisks.append(StorageDisk(sDisk));
+
+ def __del__(self):
+ self.cleanup();
+
+ def cleanup(self):
+ """
+ Cleans up any created storage configs.
+ """
+
+ # Destroy all volumes first.
+ for sMountPoint in self.dVols.keys(): # pylint: disable=C0201
+ self.destroyVolume(sMountPoint);
+
+ # Destroy all pools.
+ for sPool in self.dPools.keys(): # pylint: disable=C0201
+ self.destroyStoragePool(sPool);
+
+ self.dVols.clear();
+ self.dPools.clear();
+ self.iPoolId = 0;
+ self.iVolId = 0;
+
+ def getRawDisk(self):
+ """
+ Returns a raw disk device from the list of free devices for use.
+ """
+ for oDisk in self.lstDisks:
+ if oDisk.isUsed() is False:
+ oDisk.setUsed(True);
+ return oDisk.getPath();
+
+ return None;
+
+ def getUnusedDiskCount(self):
+ """
+ Returns the number of unused disks.
+ """
+
+ cDisksUnused = 0;
+ for oDisk in self.lstDisks:
+ if not oDisk.isUsed():
+ cDisksUnused += 1;
+
+ return cDisksUnused;
+
+ def createStoragePool(self, cDisks = 0, sRaidLvl = None,
+ cbPool = None, fRamDisk = False):
+ """
+ Create a new storage pool
+ """
+ lstDisks = [ ];
+ fRc = True;
+ sPool = None;
+
+ if fRamDisk:
+ oDisk = self.oStorOs.createRamDisk(self.oExec, cbPool);
+ if oDisk is not None:
+ lstDisks.append(oDisk);
+ cDisks = 1;
+ else:
+ if cDisks == 0:
+ cDisks = self.getUnusedDiskCount();
+
+ for oDisk in self.lstDisks:
+ if not oDisk.isUsed():
+ oDisk.setUsed(True);
+ lstDisks.append(oDisk);
+ if len(lstDisks) == cDisks:
+ break;
+
+ # Enough drives to satisfy the request?
+ if len(lstDisks) == cDisks:
+ # Create a list of all device paths
+ lstDiskPaths = [ ];
+ for oDisk in lstDisks:
+ lstDiskPaths.append(oDisk.getPath());
+
+ # Find a name for the pool
+ sPool = 'pool' + str(self.iPoolId);
+ self.iPoolId += 1;
+
+ fRc = self.oStorOs.createStoragePool(self.oExec, sPool, lstDiskPaths, sRaidLvl);
+ if fRc:
+ self.dPools[sPool] = lstDisks;
+ else:
+ self.iPoolId -= 1;
+ else:
+ fRc = False;
+
+ # Cleanup in case of error.
+ if not fRc:
+ for oDisk in lstDisks:
+ oDisk.setUsed(False);
+ if oDisk.isRamDisk():
+ self.oStorOs.destroyRamDisk(self.oExec, oDisk);
+
+ return fRc, sPool;
+
+ def destroyStoragePool(self, sPool):
+ """
+ Destroys the storage pool with the given ID.
+ """
+
+ lstDisks = self.dPools.get(sPool);
+ if lstDisks is not None:
+ fRc = self.oStorOs.destroyPool(self.oExec, sPool);
+ if fRc:
+ # Mark disks as unused
+ self.dPools.pop(sPool);
+ for oDisk in lstDisks:
+ oDisk.setUsed(False);
+ if oDisk.isRamDisk():
+ self.oStorOs.destroyRamDisk(self.oExec, oDisk);
+ else:
+ fRc = False;
+
+ return fRc;
+
+ def createVolume(self, sPool, cbVol = None):
+ """
+ Creates a new volume from the given pool returning the mountpoint.
+ """
+
+ fRc = True;
+ sMountPoint = None;
+ if sPool in self.dPools:
+ sVol = 'vol' + str(self.iVolId);
+ sMountPoint = self.oStorOs.getMntBase() + '/' + sVol;
+ self.iVolId += 1;
+ fRc = self.oStorOs.createVolume(self.oExec, sPool, sVol, sMountPoint, cbVol);
+ if fRc:
+ self.dVols[sMountPoint] = (sVol, sPool);
+ else:
+ self.iVolId -= 1;
+ else:
+ fRc = False;
+
+ return fRc, sMountPoint;
+
+ def destroyVolume(self, sMountPoint):
+ """
+ Destroy the volume at the given mount point.
+ """
+
+ sVol, sPool = self.dVols.get(sMountPoint);
+ fRc = True;
+ if sVol is not None:
+ fRc = self.oStorOs.destroyVolume(self.oExec, sPool, sVol);
+ if fRc:
+ self.dVols.pop(sMountPoint);
+ else:
+ fRc = False;
+
+ return fRc;
+
+ def mkDirOnVolume(self, sMountPoint, sDir, fMode = 0o700):
+ """
+ Creates a new directory on the volume pointed to by the given mount point.
+ """
+ return self.oExec.mkDir(sMountPoint + '/' + sDir, fMode);
+
+ def cleanupLeftovers(self):
+ """
+ Tries to cleanup any leftover pools and volumes from a failed previous run.
+ """
+ return self.oStorOs.cleanupPoolsAndVolumes(self.oExec, 'pool', 'vol');
+
diff --git a/src/VBox/ValidationKit/tests/storage/tdStorageBenchmark1.py b/src/VBox/ValidationKit/tests/storage/tdStorageBenchmark1.py
new file mode 100755
index 00000000..a9dfaa99
--- /dev/null
+++ b/src/VBox/ValidationKit/tests/storage/tdStorageBenchmark1.py
@@ -0,0 +1,1350 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# $Id: tdStorageBenchmark1.py $
+
+"""
+VirtualBox Validation Kit - Storage benchmark.
+"""
+
+__copyright__ = \
+"""
+Copyright (C) 2012-2019 Oracle Corporation
+
+This file is part of VirtualBox Open Source Edition (OSE), as
+available from http://www.virtualbox.org. This file is free software;
+you can redistribute it and/or modify it under the terms of the GNU
+General Public License (GPL) as published by the Free Software
+Foundation, in version 2 as it comes in the "COPYING" file of the
+VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+
+The contents of this file may alternatively be used under the terms
+of the Common Development and Distribution License Version 1.0
+(CDDL) only, as it comes in the "COPYING.CDDL" file of the
+VirtualBox OSE distribution, in which case the provisions of the
+CDDL are applicable instead of those of the GPL.
+
+You may elect to license modified versions of this file under the
+terms and conditions of either the GPL or the CDDL or both.
+"""
+__version__ = "$Revision: 127855 $"
+
+
+# Standard Python imports.
+import os;
+import socket;
+import sys;
+if sys.version_info[0] >= 3:
+ from io import StringIO as StringIO; # pylint: disable=import-error,no-name-in-module
+else:
+ from StringIO import StringIO as StringIO; # pylint: disable=import-error,no-name-in-module
+
+# Only the main script needs to modify the path.
+try: __file__
+except: __file__ = sys.argv[0];
+g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))));
+sys.path.append(g_ksValidationKitDir);
+
+# Validation Kit imports.
+from common import constants;
+from common import utils;
+from testdriver import reporter;
+from testdriver import base;
+from testdriver import vbox;
+from testdriver import vboxcon;
+from testdriver import vboxwrappers;
+
+import remoteexecutor;
+import storagecfg;
+
+
+def _ControllerTypeToName(eControllerType):
+ """ Translate a controller type to a name. """
+ if eControllerType == vboxcon.StorageControllerType_PIIX3 or eControllerType == vboxcon.StorageControllerType_PIIX4:
+ sType = "IDE Controller";
+ elif eControllerType == vboxcon.StorageControllerType_IntelAhci:
+ sType = "SATA Controller";
+ elif eControllerType == vboxcon.StorageControllerType_LsiLogicSas:
+ sType = "SAS Controller";
+ elif eControllerType == vboxcon.StorageControllerType_LsiLogic or eControllerType == vboxcon.StorageControllerType_BusLogic:
+ sType = "SCSI Controller";
+ elif eControllerType == vboxcon.StorageControllerType_NVMe:
+ sType = "NVMe Controller";
+ else:
+ sType = "Storage Controller";
+ return sType;
+
+class FioTest(object):
+ """
+ Flexible I/O tester testcase.
+ """
+
+ kdHostIoEngine = {
+ 'solaris': ('solarisaio', False),
+ 'linux': ('libaio', True)
+ };
+
+ def __init__(self, oExecutor, dCfg = None):
+ self.oExecutor = oExecutor;
+ self.sCfgFileId = None;
+ self.dCfg = dCfg;
+ self.sError = None;
+ self.sResult = None;
+
+ def prepare(self, cMsTimeout = 30000):
+ """ Prepares the testcase """
+
+ sTargetOs = self.dCfg.get('TargetOs', 'linux');
+ sIoEngine, fDirectIo = self.kdHostIoEngine.get(sTargetOs);
+ if sIoEngine is None:
+ return False;
+
+ cfgBuf = StringIO();
+ cfgBuf.write('[global]\n');
+ cfgBuf.write('bs=' + self.dCfg.get('RecordSize', '4k') + '\n');
+ cfgBuf.write('ioengine=' + sIoEngine + '\n');
+ cfgBuf.write('iodepth=' + self.dCfg.get('QueueDepth', '32') + '\n');
+ cfgBuf.write('size=' + self.dCfg.get('TestsetSize', '2g') + '\n');
+ if fDirectIo:
+ cfgBuf.write('direct=1\n');
+ else:
+ cfgBuf.write('direct=0\n');
+ cfgBuf.write('directory=' + self.dCfg.get('FilePath', '/mnt') + '\n');
+ cfgBuf.write('filename=fio.test.file');
+
+ cfgBuf.write('[seq-write]\n');
+ cfgBuf.write('rw=write\n');
+ cfgBuf.write('stonewall\n');
+
+ cfgBuf.write('[rand-write]\n');
+ cfgBuf.write('rw=randwrite\n');
+ cfgBuf.write('stonewall\n');
+
+ cfgBuf.write('[seq-read]\n');
+ cfgBuf.write('rw=read\n');
+ cfgBuf.write('stonewall\n');
+
+ cfgBuf.write('[rand-read]\n');
+ cfgBuf.write('rw=randread\n');
+ cfgBuf.write('stonewall\n');
+
+ self.sCfgFileId = self.oExecutor.copyString(cfgBuf.getvalue(), 'aio-test', cMsTimeout);
+ return self.sCfgFileId is not None;
+
+ def run(self, cMsTimeout = 30000):
+ """ Runs the testcase """
+ _ = cMsTimeout
+ fRc, sOutput, sError = self.oExecutor.execBinary('fio', (self.sCfgFileId,), cMsTimeout = cMsTimeout);
+ if fRc:
+ self.sResult = sOutput;
+ else:
+ self.sError = ('Binary: fio\n' +
+ '\nOutput:\n\n' +
+ sOutput +
+ '\nError:\n\n' +
+ sError);
+ return fRc;
+
+ def cleanup(self):
+ """ Cleans up any leftovers from the testcase. """
+
+ def reportResult(self):
+ """
+ Reports the test results to the test manager.
+ """
+ return True;
+
+ def getErrorReport(self):
+ """
+ Returns the error report in case the testcase failed.
+ """
+ return self.sError;
+
+class IozoneTest(object):
+ """
+ I/O zone testcase.
+ """
+ def __init__(self, oExecutor, dCfg = None):
+ self.oExecutor = oExecutor;
+ self.sResult = None;
+ self.sError = None;
+ self.lstTests = [ ('initial writers', 'FirstWrite'),
+ ('rewriters', 'Rewrite'),
+ ('re-readers', 'ReRead'),
+ ('stride readers', 'StrideRead'),
+ ('reverse readers', 'ReverseRead'),
+ ('random readers', 'RandomRead'),
+ ('mixed workload', 'MixedWorkload'),
+ ('random writers', 'RandomWrite'),
+ ('pwrite writers', 'PWrite'),
+ ('pread readers', 'PRead'),
+ ('fwriters', 'FWrite'),
+ ('freaders', 'FRead'),
+ ('readers', 'FirstRead')];
+ self.sRecordSize = dCfg.get('RecordSize', '4k');
+ self.sTestsetSize = dCfg.get('TestsetSize', '2g');
+ self.sQueueDepth = dCfg.get('QueueDepth', '32');
+ self.sFilePath = dCfg.get('FilePath', '/mnt/iozone');
+ self.fDirectIo = True;
+
+ sTargetOs = dCfg.get('TargetOs');
+ if sTargetOs == 'solaris':
+ self.fDirectIo = False;
+
+ def prepare(self, cMsTimeout = 30000):
+ """ Prepares the testcase """
+ _ = cMsTimeout;
+ return True; # Nothing to do.
+
+ def run(self, cMsTimeout = 30000):
+ """ Runs the testcase """
+ tupArgs = ('-r', self.sRecordSize, '-s', self.sTestsetSize, \
+ '-t', '1', '-T', '-F', self.sFilePath + '/iozone.tmp');
+ if self.fDirectIo:
+ tupArgs += ('-I',);
+ fRc, sOutput, sError = self.oExecutor.execBinary('iozone', tupArgs, cMsTimeout = cMsTimeout);
+ if fRc:
+ self.sResult = sOutput;
+ else:
+ self.sError = ('Binary: iozone\n' +
+ '\nOutput:\n\n' +
+ sOutput +
+ '\nError:\n\n' +
+ sError);
+
+ _ = cMsTimeout;
+ return fRc;
+
+ def cleanup(self):
+ """ Cleans up any leftovers from the testcase. """
+ return True;
+
+ def reportResult(self):
+ """
+ Reports the test results to the test manager.
+ """
+
+ fRc = True;
+ if self.sResult is not None:
+ try:
+ asLines = self.sResult.splitlines();
+ for sLine in asLines:
+ sLine = sLine.strip();
+ if sLine.startswith('Children') is True:
+ # Extract the value
+ idxValue = sLine.rfind('=');
+ if idxValue == -1:
+ raise Exception('IozoneTest: Invalid state');
+
+ idxValue += 1;
+ while sLine[idxValue] == ' ':
+ idxValue += 1;
+
+ # Get the reported value, cut off after the decimal point
+ # it is not supported by the testmanager yet and is not really
+ # relevant anyway.
+ idxValueEnd = idxValue;
+ while sLine[idxValueEnd].isdigit():
+ idxValueEnd += 1;
+
+ for sNeedle, sTestVal in self.lstTests:
+ if sLine.rfind(sNeedle) != -1:
+ reporter.testValue(sTestVal, sLine[idxValue:idxValueEnd],
+ constants.valueunit.g_asNames[constants.valueunit.KILOBYTES_PER_SEC]);
+ break;
+ except:
+ fRc = False;
+ else:
+ fRc = False;
+
+ return fRc;
+
+ def getErrorReport(self):
+ """
+ Returns the error report in case the testcase failed.
+ """
+ return self.sError;
+
+class StorTestCfgMgr(object):
+ """
+ Manages the different testcases.
+ """
+
+ def __init__(self, aasTestLvls, aasTestsBlacklist, fnIsCfgSupported = None):
+ self.aasTestsBlacklist = aasTestsBlacklist;
+ self.at3TestLvls = [];
+ self.iTestLvl = 0;
+ self.fnIsCfgSupported = fnIsCfgSupported;
+ for asTestLvl in aasTestLvls:
+ if isinstance(asTestLvl, tuple):
+ asTestLvl, fnTestFmt = asTestLvl;
+ self.at3TestLvls.append((0, fnTestFmt, asTestLvl));
+ else:
+ self.at3TestLvls.append((0, None, asTestLvl));
+
+ self.at3TestLvls.reverse();
+
+ # Get the first non blacklisted test.
+ asTestCfg = self.getCurrentTestCfg();
+ while asTestCfg and self.isTestCfgBlacklisted(asTestCfg):
+ asTestCfg = self.advanceTestCfg();
+
+ iLvl = 0;
+ for sCfg in asTestCfg:
+ reporter.testStart('%s' % (self.getTestIdString(sCfg, iLvl)));
+ iLvl += 1;
+
+ def __del__(self):
+ # Make sure the tests are marked as done.
+ while self.iTestLvl < len(self.at3TestLvls):
+ reporter.testDone();
+ self.iTestLvl += 1;
+
+ def getTestIdString(self, oCfg, iLvl):
+ """
+ Returns a potentially formatted string for the test name.
+ """
+
+ # The order of the test levels is reversed so get the level starting
+ # from the end.
+ _, fnTestFmt, _ = self.at3TestLvls[len(self.at3TestLvls) - 1 - iLvl];
+ if fnTestFmt is not None:
+ return fnTestFmt(oCfg);
+ return oCfg;
+
+ def isTestCfgBlacklisted(self, asTestCfg):
+ """
+ Returns whether the given test config is black listed.
+ """
+ fBlacklisted = False;
+
+ for asTestBlacklist in self.aasTestsBlacklist:
+ iLvl = 0;
+ fBlacklisted = True;
+ while iLvl < len(asTestBlacklist) and iLvl < len(asTestCfg):
+ if asTestBlacklist[iLvl] != asTestCfg[iLvl] and asTestBlacklist[iLvl] != '*':
+ fBlacklisted = False;
+ break;
+
+ iLvl += 1;
+
+ if not fBlacklisted and self.fnIsCfgSupported is not None:
+ fBlacklisted = not self.fnIsCfgSupported(asTestCfg);
+
+ return fBlacklisted;
+
+ def advanceTestCfg(self):
+ """
+ Advances to the next test config and returns it as an
+ array of strings or an empty config if there is no test left anymore.
+ """
+ iTestCfg, fnTestFmt, asTestCfg = self.at3TestLvls[self.iTestLvl];
+ iTestCfg += 1;
+ self.at3TestLvls[self.iTestLvl] = (iTestCfg, fnTestFmt, asTestCfg);
+ while iTestCfg == len(asTestCfg) and self.iTestLvl < len(self.at3TestLvls):
+ self.at3TestLvls[self.iTestLvl] = (0, fnTestFmt, asTestCfg);
+ self.iTestLvl += 1;
+ if self.iTestLvl < len(self.at3TestLvls):
+ iTestCfg, fnTestFmt, asTestCfg = self.at3TestLvls[self.iTestLvl];
+ iTestCfg += 1;
+ self.at3TestLvls[self.iTestLvl] = (iTestCfg, fnTestFmt, asTestCfg);
+ if iTestCfg < len(asTestCfg):
+ self.iTestLvl = 0;
+ break;
+ else:
+ break; # We reached the end of our tests.
+
+ return self.getCurrentTestCfg();
+
+ def getCurrentTestCfg(self):
+ """
+ Returns the current not black listed test config as an array of strings.
+ """
+ asTestCfg = [];
+
+ if self.iTestLvl < len(self.at3TestLvls):
+ for t3TestLvl in self.at3TestLvls:
+ iTestCfg, _, asTestLvl = t3TestLvl;
+ asTestCfg.append(asTestLvl[iTestCfg]);
+
+ asTestCfg.reverse()
+
+ return asTestCfg;
+
+ def getNextTestCfg(self, fSkippedLast = False):
+ """
+ Returns the next not blacklisted test config or an empty list if
+ there is no test left.
+ """
+ asTestCfgCur = self.getCurrentTestCfg();
+
+ asTestCfg = self.advanceTestCfg();
+ while asTestCfg and self.isTestCfgBlacklisted(asTestCfg):
+ asTestCfg = self.advanceTestCfg();
+
+ # Compare the current and next config and close the approriate test
+ # categories.
+ reporter.testDone(fSkippedLast);
+ if asTestCfg:
+ idxSame = 0;
+ while asTestCfgCur[idxSame] == asTestCfg[idxSame]:
+ idxSame += 1;
+
+ for i in range(idxSame, len(asTestCfg) - 1):
+ reporter.testDone();
+
+ for i in range(idxSame, len(asTestCfg)):
+ reporter.testStart('%s' % (self.getTestIdString(asTestCfg[i], i)));
+
+ else:
+ # No more tests, mark all tests as done
+ for i in range(0, len(asTestCfgCur) - 1):
+ reporter.testDone();
+
+ return asTestCfg;
+
+class tdStorageBenchmark(vbox.TestDriver): # pylint: disable=R0902
+ """
+ Storage benchmark.
+ """
+
+ # Global storage configs for the testbox
+ kdStorageCfgs = {
+ 'testboxstor1.de.oracle.com': r'c[3-9]t\dd0\Z',
+ 'adaris': [ '/dev/sda' ]
+ };
+
+ # Available test sets.
+ kdTestSets = {
+ # Mostly for developing and debugging the testcase.
+ 'Fast': {
+ 'RecordSize': '64k',
+ 'TestsetSize': '100m',
+ 'QueueDepth': '32',
+ 'DiskSizeGb': 2
+ },
+ # For quick functionality tests where benchmark results are not required.
+ 'Functionality': {
+ 'RecordSize': '64k',
+ 'TestsetSize': '2g',
+ 'QueueDepth': '32',
+ 'DiskSizeGb': 10
+ },
+ # For benchmarking the I/O stack.
+ 'Benchmark': {
+ 'RecordSize': '64k',
+ 'TestsetSize': '20g',
+ 'QueueDepth': '32',
+ 'DiskSizeGb': 30
+ },
+ # For stress testing which takes a lot of time.
+ 'Stress': {
+ 'RecordSize': '64k',
+ 'TestsetSize': '2t',
+ 'QueueDepth': '32',
+ 'DiskSizeGb': 10000
+ },
+ };
+
+ # Dictionary mapping the virtualization mode mnemonics to a little less cryptic
+ # strings used in test descriptions.
+ kdVirtModeDescs = {
+ 'raw' : 'Raw-mode',
+ 'hwvirt' : 'HwVirt',
+ 'hwvirt-np' : 'NestedPaging'
+ };
+
+ kdHostIoCacheDescs = {
+ 'default' : 'HostCacheDef',
+ 'hostiocache' : 'HostCacheOn',
+ 'no-hostiocache' : 'HostCacheOff'
+ };
+
+ # Password ID for encryption.
+ ksPwId = 'EncPwId';
+
+ # Array indexes for the test configs.
+ kiVmName = 0;
+ kiStorageCtrl = 1;
+ kiHostIoCache = 2;
+ kiDiskFmt = 3;
+ kiDiskVar = 4;
+ kiCpuCount = 5;
+ kiVirtMode = 6;
+ kiIoTest = 7;
+ kiTestSet = 8;
+
+ def __init__(self):
+ vbox.TestDriver.__init__(self);
+ self.asRsrcs = None;
+ self.asTestVMsDef = ['tst-storage', 'tst-storage32'];
+ self.asTestVMs = self.asTestVMsDef;
+ self.asSkipVMs = [];
+ self.asVirtModesDef = ['hwvirt', 'hwvirt-np', 'raw',]
+ self.asVirtModes = self.asVirtModesDef;
+ self.acCpusDef = [1, 2];
+ self.acCpus = self.acCpusDef;
+ self.asStorageCtrlsDef = ['AHCI', 'IDE', 'LsiLogicSAS', 'LsiLogic', 'BusLogic', 'NVMe'];
+ self.asStorageCtrls = self.asStorageCtrlsDef;
+ self.asHostIoCacheDef = ['default', 'hostiocache', 'no-hostiocache'];
+ self.asHostIoCache = self.asHostIoCacheDef;
+ self.asDiskFormatsDef = ['VDI', 'VMDK', 'VHD', 'QED', 'Parallels', 'QCOW', 'iSCSI'];
+ self.asDiskFormats = self.asDiskFormatsDef;
+ self.asDiskVariantsDef = ['Dynamic', 'Fixed', 'DynamicSplit2G', 'FixedSplit2G', 'Network'];
+ self.asDiskVariants = self.asDiskVariantsDef;
+ self.asTestsDef = ['iozone', 'fio'];
+ self.asTests = self.asTestsDef;
+ self.asTestSetsDef = ['Fast', 'Functionality', 'Benchmark', 'Stress'];
+ self.asTestSets = self.asTestSetsDef;
+ self.asIscsiTargetsDef = [ ]; # @todo: Configure one target for basic iSCSI testing
+ self.asIscsiTargets = self.asIscsiTargetsDef;
+ self.cDiffLvlsDef = 0;
+ self.cDiffLvls = self.cDiffLvlsDef;
+ self.fTestHost = False;
+ self.fUseScratch = False;
+ self.fRecreateStorCfg = True;
+ self.fReportBenchmarkResults = True;
+ self.oStorCfg = None;
+ self.sIoLogPathDef = self.sScratchPath;
+ self.sIoLogPath = self.sIoLogPathDef;
+ self.fIoLog = False;
+ self.fUseRamDiskDef = False;
+ self.fUseRamDisk = self.fUseRamDiskDef;
+ self.fEncryptDiskDef = False;
+ self.fEncryptDisk = self.fEncryptDiskDef;
+ self.sEncryptPwDef = 'TestTestTest';
+ self.sEncryptPw = self.sEncryptPwDef;
+ self.sEncryptAlgoDef = 'AES-XTS256-PLAIN64';
+ self.sEncryptAlgo = self.sEncryptAlgoDef;
+
+ #
+ # Overridden methods.
+ #
+ def showUsage(self):
+ rc = vbox.TestDriver.showUsage(self);
+ reporter.log('');
+ reporter.log('tdStorageBenchmark1 Options:');
+ reporter.log(' --virt-modes <m1[:m2[:]]');
+ reporter.log(' Default: %s' % (':'.join(self.asVirtModesDef)));
+ reporter.log(' --cpu-counts <c1[:c2[:]]');
+ reporter.log(' Default: %s' % (':'.join(str(c) for c in self.acCpusDef)));
+ reporter.log(' --storage-ctrls <type1[:type2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asStorageCtrlsDef)));
+ reporter.log(' --host-io-cache <setting1[:setting2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asHostIoCacheDef)));
+ reporter.log(' --disk-formats <type1[:type2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asDiskFormatsDef)));
+ reporter.log(' --disk-variants <variant1[:variant2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asDiskVariantsDef)));
+ reporter.log(' --iscsi-targets <target1[:target2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asIscsiTargetsDef)));
+ reporter.log(' --tests <test1[:test2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asTestsDef)));
+ reporter.log(' --test-sets <set1[:set2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asTestSetsDef)));
+ reporter.log(' --diff-levels <number of diffs>');
+ reporter.log(' Default: %s' % (self.cDiffLvlsDef));
+ reporter.log(' --test-vms <vm1[:vm2[:...]]>');
+ reporter.log(' Test the specified VMs in the given order. Use this to change');
+ reporter.log(' the execution order or limit the choice of VMs');
+ reporter.log(' Default: %s (all)' % (':'.join(self.asTestVMsDef)));
+ reporter.log(' --skip-vms <vm1[:vm2[:...]]>');
+ reporter.log(' Skip the specified VMs when testing.');
+ reporter.log(' --test-host');
+ reporter.log(' Do all configured tests on the host first and report the results');
+ reporter.log(' to get a baseline');
+ reporter.log(' --use-scratch');
+ reporter.log(' Use the scratch directory for testing instead of setting up');
+ reporter.log(' fresh volumes on dedicated disks (for development)');
+ reporter.log(' --always-wipe-storage-cfg');
+ reporter.log(' Recreate the host storage config before each test');
+ reporter.log(' --dont-wipe-storage-cfg');
+ reporter.log(' Don\'t recreate the host storage config before each test');
+ reporter.log(' --report-benchmark-results');
+ reporter.log(' Report all benchmark results');
+ reporter.log(' --dont-report-benchmark-results');
+ reporter.log(' Don\'t report any benchmark results');
+ reporter.log(' --io-log-path <path>');
+ reporter.log(' Default: %s' % (self.sIoLogPathDef));
+ reporter.log(' --enable-io-log');
+ reporter.log(' Whether to enable I/O logging for each test');
+ reporter.log(' --use-ramdisk');
+ reporter.log(' Default: %s' % (self.fUseRamDiskDef));
+ reporter.log(' --encrypt-disk');
+ reporter.log(' Default: %s' % (self.fEncryptDiskDef));
+ reporter.log(' --encrypt-password');
+ reporter.log(' Default: %s' % (self.sEncryptPwDef));
+ reporter.log(' --encrypt-algorithm');
+ reporter.log(' Default: %s' % (self.sEncryptAlgoDef));
+ return rc;
+
+ def parseOption(self, asArgs, iArg): # pylint: disable=R0912,R0915
+ if asArgs[iArg] == '--virt-modes':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--virt-modes" takes a colon separated list of modes');
+ self.asVirtModes = asArgs[iArg].split(':');
+ for s in self.asVirtModes:
+ if s not in self.asVirtModesDef:
+ raise base.InvalidOption('The "--virt-modes" value "%s" is not valid; valid values are: %s' \
+ % (s, ' '.join(self.asVirtModesDef)));
+ elif asArgs[iArg] == '--cpu-counts':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--cpu-counts" takes a colon separated list of cpu counts');
+ self.acCpus = [];
+ for s in asArgs[iArg].split(':'):
+ try: c = int(s);
+ except: raise base.InvalidOption('The "--cpu-counts" value "%s" is not an integer' % (s,));
+ if c <= 0: raise base.InvalidOption('The "--cpu-counts" value "%s" is zero or negative' % (s,));
+ self.acCpus.append(c);
+ elif asArgs[iArg] == '--storage-ctrls':
+ iArg += 1;
+ if iArg >= len(asArgs):
+ raise base.InvalidOption('The "--storage-ctrls" takes a colon separated list of Storage controller types');
+ self.asStorageCtrls = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--host-io-cache':
+ iArg += 1;
+ if iArg >= len(asArgs):
+ raise base.InvalidOption('The "--host-io-cache" takes a colon separated list of I/O cache settings');
+ self.asHostIoCache = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--disk-formats':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--disk-formats" takes a colon separated list of disk formats');
+ self.asDiskFormats = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--disk-variants':
+ iArg += 1;
+ if iArg >= len(asArgs):
+ raise base.InvalidOption('The "--disk-variants" takes a colon separated list of disk variants');
+ self.asDiskVariants = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--iscsi-targets':
+ iArg += 1;
+ if iArg >= len(asArgs):
+ raise base.InvalidOption('The "--iscsi-targets" takes a colon separated list of iscsi targets');
+ self.asIscsiTargets = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--tests':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--tests" takes a colon separated list of tests to run');
+ self.asTests = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--test-sets':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--test-sets" takes a colon separated list of test sets');
+ self.asTestSets = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--diff-levels':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--diff-levels" takes an integer');
+ try: self.cDiffLvls = int(asArgs[iArg]);
+ except: raise base.InvalidOption('The "--diff-levels" value "%s" is not an integer' % (asArgs[iArg],));
+ elif asArgs[iArg] == '--test-vms':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--test-vms" takes colon separated list');
+ self.asTestVMs = asArgs[iArg].split(':');
+ for s in self.asTestVMs:
+ if s not in self.asTestVMsDef:
+ raise base.InvalidOption('The "--test-vms" value "%s" is not valid; valid values are: %s' \
+ % (s, ' '.join(self.asTestVMsDef)));
+ elif asArgs[iArg] == '--skip-vms':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--skip-vms" takes colon separated list');
+ self.asSkipVMs = asArgs[iArg].split(':');
+ for s in self.asSkipVMs:
+ if s not in self.asTestVMsDef:
+ reporter.log('warning: The "--test-vms" value "%s" does not specify any of our test VMs.' % (s));
+ elif asArgs[iArg] == '--test-host':
+ self.fTestHost = True;
+ elif asArgs[iArg] == '--use-scratch':
+ self.fUseScratch = True;
+ elif asArgs[iArg] == '--always-wipe-storage-cfg':
+ self.fRecreateStorCfg = True;
+ elif asArgs[iArg] == '--dont-wipe-storage-cfg':
+ self.fRecreateStorCfg = False;
+ elif asArgs[iArg] == '--report-benchmark-results':
+ self.fReportBenchmarkResults = True;
+ elif asArgs[iArg] == '--dont-report-benchmark-results':
+ self.fReportBenchmarkResults = False;
+ elif asArgs[iArg] == '--io-log-path':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--io-log-path" takes a path argument');
+ self.sIoLogPath = asArgs[iArg];
+ elif asArgs[iArg] == '--enable-io-log':
+ self.fIoLog = True;
+ elif asArgs[iArg] == '--use-ramdisk':
+ self.fUseRamDisk = True;
+ elif asArgs[iArg] == '--encrypt-disk':
+ self.fEncryptDisk = True;
+ elif asArgs[iArg] == '--encrypt-password':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--encrypt-password" takes a string');
+ self.sEncryptPw = asArgs[iArg];
+ elif asArgs[iArg] == '--encrypt-algorithm':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--encrypt-algorithm" takes a string');
+ self.sEncryptAlgo = asArgs[iArg];
+ else:
+ return vbox.TestDriver.parseOption(self, asArgs, iArg);
+ return iArg + 1;
+
+ def completeOptions(self):
+ # Remove skipped VMs from the test list.
+ for sVM in self.asSkipVMs:
+ try: self.asTestVMs.remove(sVM);
+ except: pass;
+
+ return vbox.TestDriver.completeOptions(self);
+
+ def getResourceSet(self):
+ # Construct the resource list the first time it's queried.
+ if self.asRsrcs is None:
+ self.asRsrcs = [];
+ if 'tst-storage' in self.asTestVMs:
+ self.asRsrcs.append('5.0/storage/tst-storage.vdi');
+ if 'tst-storage32' in self.asTestVMs:
+ self.asRsrcs.append('5.0/storage/tst-storage32.vdi');
+
+ return self.asRsrcs;
+
+ def actionConfig(self):
+
+ # Make sure vboxapi has been imported so we can use the constants.
+ if not self.importVBoxApi():
+ return False;
+
+ #
+ # Configure the VMs we're going to use.
+ #
+
+ # Linux VMs
+ if 'tst-storage' in self.asTestVMs:
+ oVM = self.createTestVM('tst-storage', 1, '5.0/storage/tst-storage.vdi', sKind = 'ArchLinux_64', fIoApic = True, \
+ eNic0AttachType = vboxcon.NetworkAttachmentType_NAT, \
+ eNic0Type = vboxcon.NetworkAdapterType_Am79C973);
+ if oVM is None:
+ return False;
+
+ if 'tst-storage32' in self.asTestVMs:
+ oVM = self.createTestVM('tst-storage32', 1, '5.0/storage/tst-storage32.vdi', sKind = 'ArchLinux', fIoApic = True, \
+ eNic0AttachType = vboxcon.NetworkAttachmentType_NAT, \
+ eNic0Type = vboxcon.NetworkAdapterType_Am79C973);
+ if oVM is None:
+ return False;
+
+ return True;
+
+ def actionExecute(self):
+ """
+ Execute the testcase.
+ """
+ fRc = self.test1();
+ return fRc;
+
+
+ #
+ # Test execution helpers.
+ #
+
+ def prepareStorage(self, oStorCfg, fRamDisk = False, cbPool = None):
+ """
+ Prepares the host storage for disk images or direct testing on the host.
+ """
+ # Create a basic pool with the default configuration.
+ sMountPoint = None;
+ fRc, sPoolId = oStorCfg.createStoragePool(cbPool = cbPool, fRamDisk = fRamDisk);
+ if fRc:
+ fRc, sMountPoint = oStorCfg.createVolume(sPoolId);
+ if not fRc:
+ sMountPoint = None;
+ oStorCfg.cleanup();
+
+ return sMountPoint;
+
+ def cleanupStorage(self, oStorCfg):
+ """
+ Cleans up any created storage space for a test.
+ """
+ return oStorCfg.cleanup();
+
+ def getGuestDisk(self, oSession, oTxsSession, eStorageController):
+ """
+ Gets the path of the disk in the guest to use for testing.
+ """
+ lstDisks = None;
+
+ # The naming scheme for NVMe is different and we don't have
+ # to query the guest for unformatted disks here because the disk with the OS
+ # is not attached to a NVMe controller.
+ if eStorageController == vboxcon.StorageControllerType_NVMe:
+ lstDisks = [ '/dev/nvme0n1' ];
+ else:
+ # Find a unformatted disk (no partition).
+ # @todo: This is a hack because LIST and STAT are not yet implemented
+ # in TXS (get to this eventually)
+ lstBlkDev = [ '/dev/sda', '/dev/sdb' ];
+ for sBlkDev in lstBlkDev:
+ fRc = oTxsSession.syncExec('/usr/bin/ls', ('ls', sBlkDev + '1'));
+ if not fRc:
+ lstDisks = [ sBlkDev ];
+ break;
+
+ _ = oSession;
+ return lstDisks;
+
+ def getDiskFormatVariantsForTesting(self, sDiskFmt, asVariants):
+ """
+ Returns a list of disk variants for testing supported by the given
+ disk format and selected for testing.
+ """
+ lstDskFmts = self.oVBoxMgr.getArray(self.oVBox.systemProperties, 'mediumFormats');
+ for oDskFmt in lstDskFmts:
+ if oDskFmt.id == sDiskFmt:
+ lstDskVariants = [];
+ lstCaps = self.oVBoxMgr.getArray(oDskFmt, 'capabilities');
+
+ if vboxcon.MediumFormatCapabilities_CreateDynamic in lstCaps \
+ and 'Dynamic' in asVariants:
+ lstDskVariants.append('Dynamic');
+
+ if vboxcon.MediumFormatCapabilities_CreateFixed in lstCaps \
+ and 'Fixed' in asVariants:
+ lstDskVariants.append('Fixed');
+
+ if vboxcon.MediumFormatCapabilities_CreateSplit2G in lstCaps \
+ and vboxcon.MediumFormatCapabilities_CreateDynamic in lstCaps \
+ and 'DynamicSplit2G' in asVariants:
+ lstDskVariants.append('DynamicSplit2G');
+
+ if vboxcon.MediumFormatCapabilities_CreateSplit2G in lstCaps \
+ and vboxcon.MediumFormatCapabilities_CreateFixed in lstCaps \
+ and 'FixedSplit2G' in asVariants:
+ lstDskVariants.append('FixedSplit2G');
+
+ if vboxcon.MediumFormatCapabilities_TcpNetworking in lstCaps \
+ and 'Network' in asVariants:
+ lstDskVariants.append('Network'); # Solely for iSCSI to get a non empty list
+
+ return lstDskVariants;
+
+ return [];
+
+ def convDiskToMediumVariant(self, sDiskVariant):
+ """
+ Returns a tuple of medium variant flags matching the given disk variant.
+ """
+ tMediumVariant = None;
+ if sDiskVariant == 'Dynamic':
+ tMediumVariant = (vboxcon.MediumVariant_Standard, );
+ elif sDiskVariant == 'Fixed':
+ tMediumVariant = (vboxcon.MediumVariant_Fixed, );
+ elif sDiskVariant == 'DynamicSplit2G':
+ tMediumVariant = (vboxcon.MediumVariant_Standard, vboxcon.MediumVariant_VmdkSplit2G);
+ elif sDiskVariant == 'FixedSplit2G':
+ tMediumVariant = (vboxcon.MediumVariant_Fixed, vboxcon.MediumVariant_VmdkSplit2G);
+
+ return tMediumVariant;
+
+ def getStorageCtrlFromName(self, sStorageCtrl):
+ """
+ Resolves the storage controller string to the matching constant.
+ """
+ eStorageCtrl = None;
+
+ if sStorageCtrl == 'AHCI':
+ eStorageCtrl = vboxcon.StorageControllerType_IntelAhci;
+ elif sStorageCtrl == 'IDE':
+ eStorageCtrl = vboxcon.StorageControllerType_PIIX4;
+ elif sStorageCtrl == 'LsiLogicSAS':
+ eStorageCtrl = vboxcon.StorageControllerType_LsiLogicSas;
+ elif sStorageCtrl == 'LsiLogic':
+ eStorageCtrl = vboxcon.StorageControllerType_LsiLogic;
+ elif sStorageCtrl == 'BusLogic':
+ eStorageCtrl = vboxcon.StorageControllerType_BusLogic;
+ elif sStorageCtrl == 'NVMe':
+ eStorageCtrl = vboxcon.StorageControllerType_NVMe;
+
+ return eStorageCtrl;
+
+ def getStorageDriverFromEnum(self, eStorageCtrl, fHardDisk):
+ """
+ Returns the appropriate driver name for the given storage controller
+ and a flag whether the driver has the generic SCSI driver attached.
+ """
+ if eStorageCtrl == vboxcon.StorageControllerType_IntelAhci:
+ if fHardDisk:
+ return ('ahci', False);
+ return ('ahci', True);
+ if eStorageCtrl == vboxcon.StorageControllerType_PIIX4:
+ return ('piix3ide', False);
+ if eStorageCtrl == vboxcon.StorageControllerType_LsiLogicSas:
+ return ('lsilogicsas', True);
+ if eStorageCtrl == vboxcon.StorageControllerType_LsiLogic:
+ return ('lsilogicscsi', True);
+ if eStorageCtrl == vboxcon.StorageControllerType_BusLogic:
+ return ('buslogic', True);
+ if eStorageCtrl == vboxcon.StorageControllerType_NVMe:
+ return ('nvme', False);
+
+ return ('<invalid>', False);
+
+ def isTestCfgSupported(self, asTestCfg):
+ """
+ Returns whether a specific test config is supported.
+ """
+
+ # Check whether the disk variant is supported by the selected format.
+ asVariants = self.getDiskFormatVariantsForTesting(asTestCfg[self.kiDiskFmt], [ asTestCfg[self.kiDiskVar] ]);
+ if not asVariants:
+ return False;
+
+ # For iSCSI check whether we have targets configured.
+ if asTestCfg[self.kiDiskFmt] == 'iSCSI' and not self.asIscsiTargets:
+ return False;
+
+ # Check for virt mode, CPU count and selected VM.
+ if asTestCfg[self.kiVirtMode] == 'raw' \
+ and (asTestCfg[self.kiCpuCount] > 1 or asTestCfg[self.kiVmName] == 'tst-storage'):
+ return False;
+
+ # IDE does not support the no host I/O cache setting
+ if asTestCfg[self.kiHostIoCache] == 'no-hostiocache' \
+ and asTestCfg[self.kiStorageCtrl] == 'IDE':
+ return False;
+
+ return True;
+
+ def fnFormatCpuString(self, cCpus):
+ """
+ Formats the CPU count to be readable.
+ """
+ if cCpus == 1:
+ return '1 cpu';
+ return '%u cpus' % (cCpus);
+
+ def fnFormatVirtMode(self, sVirtMode):
+ """
+ Formats the virtualization mode to be a little less cryptic for use in test
+ descriptions.
+ """
+ return self.kdVirtModeDescs[sVirtMode];
+
+ def fnFormatHostIoCache(self, sHostIoCache):
+ """
+ Formats the host I/O cache mode to be a little less cryptic for use in test
+ descriptions.
+ """
+ return self.kdHostIoCacheDescs[sHostIoCache];
+
+ def testBenchmark(self, sTargetOs, sBenchmark, sMountpoint, oExecutor, dTestSet, \
+ cMsTimeout = 3600000):
+ """
+ Runs the given benchmark on the test host.
+ """
+
+ dTestSet['FilePath'] = sMountpoint;
+ dTestSet['TargetOs'] = sTargetOs;
+
+ oTst = None;
+ if sBenchmark == 'iozone':
+ oTst = IozoneTest(oExecutor, dTestSet);
+ elif sBenchmark == 'fio':
+ oTst = FioTest(oExecutor, dTestSet); # pylint: disable=R0204
+
+ if oTst is not None:
+ fRc = oTst.prepare();
+ if fRc:
+ fRc = oTst.run(cMsTimeout);
+ if fRc:
+ if self.fReportBenchmarkResults:
+ fRc = oTst.reportResult();
+ else:
+ reporter.testFailure('Running the testcase failed');
+ reporter.addLogString(oTst.getErrorReport(), sBenchmark + '.log',
+ 'log/release/client', 'Benchmark raw output');
+ else:
+ reporter.testFailure('Preparing the testcase failed');
+
+ oTst.cleanup();
+
+ return fRc;
+
+ def createHd(self, oSession, sDiskFormat, sDiskVariant, iDiffLvl, oHdParent, \
+ sDiskPath, cbDisk):
+ """
+ Creates a new disk with the given parameters returning the medium object
+ on success.
+ """
+
+ oHd = None;
+ if sDiskFormat == "iSCSI" and iDiffLvl == 0:
+ listNames = [];
+ listValues = [];
+ listValues = self.asIscsiTargets[0].split('|');
+ listNames.append('TargetAddress');
+ listNames.append('TargetName');
+ listNames.append('LUN');
+
+ if self.fpApiVer >= 5.0:
+ oHd = oSession.oVBox.createMedium(sDiskFormat, sDiskPath, vboxcon.AccessMode_ReadWrite, \
+ vboxcon.DeviceType_HardDisk);
+ else:
+ oHd = oSession.oVBox.createHardDisk(sDiskFormat, sDiskPath);
+ oHd.type = vboxcon.MediumType_Normal;
+ oHd.setProperties(listNames, listValues);
+ else:
+ if iDiffLvl == 0:
+ tMediumVariant = self.convDiskToMediumVariant(sDiskVariant);
+ oHd = oSession.createBaseHd(sDiskPath + '/base.disk', sDiskFormat, cbDisk, \
+ cMsTimeout = 3600 * 1000, tMediumVariant = tMediumVariant);
+ else:
+ sDiskPath = sDiskPath + '/diff_%u.disk' % (iDiffLvl);
+ oHd = oSession.createDiffHd(oHdParent, sDiskPath, None);
+
+ if oHd is not None and iDiffLvl == 0 and self.fEncryptDisk:
+ try:
+ oIProgress = oHd.changeEncryption('', self.sEncryptAlgo, self.sEncryptPw, self.ksPwId);
+ oProgress = vboxwrappers.ProgressWrapper(oIProgress, self.oVBoxMgr, self, 'Encrypting "%s"' % (sDiskPath,));
+ oProgress.wait(60*60000); # Wait for up to one hour, fixed disks take longer to encrypt.
+ if oProgress.logResult() is False:
+ raise base.GenError('Encrypting disk "%s" failed' % (sDiskPath, ));
+ except:
+ reporter.errorXcpt('changeEncryption("%s","%s","%s") failed on "%s"' \
+ % ('', self.sEncryptAlgo, self.sEncryptPw, oSession.sName) );
+ self.oVBox.deleteHdByMedium(oHd);
+ oHd = None;
+ else:
+ reporter.log('Encrypted "%s"' % (sDiskPath,));
+
+ return oHd;
+
+ def startVmAndConnect(self, sVmName):
+ """
+ Our own implementation of startVmAndConnectToTxsViaTcp to make it possible
+ to add passwords to a running VM when encryption is used.
+ """
+ oSession = self.startVmByName(sVmName);
+ if oSession is not None:
+ # Add password to the session in case encryption is used.
+ fRc = True;
+ if self.fEncryptDisk:
+ try:
+ oSession.o.console.addDiskEncryptionPassword(self.ksPwId, self.sEncryptPw, False);
+ except:
+ reporter.logXcpt();
+ fRc = False;
+
+ # Connect to TXS.
+ if fRc:
+ reporter.log2('startVmAndConnect: Started(/prepared) "%s", connecting to TXS ...' % (sVmName,));
+ (fRc, oTxsSession) = self.txsDoConnectViaTcp(oSession, 15*60000, fNatForwardingForTxs = True);
+ if fRc is True:
+ if fRc is True:
+ # Success!
+ return (oSession, oTxsSession);
+ else:
+ reporter.error('startVmAndConnect: txsDoConnectViaTcp failed');
+ # If something went wrong while waiting for TXS to be started - take VM screenshot before terminate it
+
+ self.terminateVmBySession(oSession);
+
+ return (None, None);
+
+ def testOneCfg(self, sVmName, eStorageController, sHostIoCache, sDiskFormat, # pylint: disable=R0913,R0914,R0915
+ sDiskVariant, sDiskPath, cCpus, sIoTest, sVirtMode, sTestSet):
+ """
+ Runs the specified VM thru test #1.
+
+ Returns a success indicator on the general test execution. This is not
+ the actual test result.
+ """
+ oVM = self.getVmByName(sVmName);
+
+ dTestSet = self.kdTestSets.get(sTestSet);
+ cbDisk = dTestSet.get('DiskSizeGb') * 1024*1024*1024;
+ fHwVirt = sVirtMode != 'raw';
+ fNestedPaging = sVirtMode == 'hwvirt-np';
+
+ fRc = True;
+ if sDiskFormat == 'iSCSI':
+ sDiskPath = self.asIscsiTargets[0];
+ elif self.fUseScratch:
+ sDiskPath = self.sScratchPath;
+ else:
+ # If requested recreate the storage space to start with a clean config
+ # for benchmarks
+ if self.fRecreateStorCfg:
+ sMountPoint = self.prepareStorage(self.oStorCfg, self.fUseRamDisk, 2 * cbDisk);
+ if sMountPoint is not None:
+ # Create a directory where every normal user can write to.
+ self.oStorCfg.mkDirOnVolume(sMountPoint, 'test', 0o777);
+ sDiskPath = sMountPoint + '/test';
+ else:
+ fRc = False;
+ reporter.testFailure('Failed to prepare storage for VM');
+
+ if not fRc:
+ return fRc;
+
+ lstDisks = []; # List of disks we have to delete afterwards.
+
+ for iDiffLvl in range(self.cDiffLvls + 1):
+ sIoLogFile = None;
+
+ if iDiffLvl == 0:
+ reporter.testStart('Base');
+ else:
+ reporter.testStart('Diff %u' % (iDiffLvl));
+
+ # Reconfigure the VM
+ oSession = self.openSession(oVM);
+ if oSession is not None:
+ #
+ # Disable audio controller which shares the interrupt line with the BusLogic controller and is suspected to cause
+ # rare test failures because the device initialization fails.
+ #
+ fRc = oSession.setupAudio(vboxcon.AudioControllerType_AC97, False);
+ # Attach HD
+ fRc = fRc and oSession.ensureControllerAttached(_ControllerTypeToName(eStorageController));
+ fRc = fRc and oSession.setStorageControllerType(eStorageController, _ControllerTypeToName(eStorageController));
+
+ if sHostIoCache == 'hostiocache':
+ fRc = fRc and oSession.setStorageControllerHostIoCache(_ControllerTypeToName(eStorageController), True);
+ elif sHostIoCache == 'no-hostiocache':
+ fRc = fRc and oSession.setStorageControllerHostIoCache(_ControllerTypeToName(eStorageController), False);
+
+ iDevice = 0;
+ if eStorageController == vboxcon.StorageControllerType_PIIX3 or \
+ eStorageController == vboxcon.StorageControllerType_PIIX4:
+ iDevice = 1; # Master is for the OS.
+
+ oHdParent = None;
+ if iDiffLvl > 0:
+ oHdParent = lstDisks[0];
+ oHd = self.createHd(oSession, sDiskFormat, sDiskVariant, iDiffLvl, oHdParent, sDiskPath, cbDisk);
+ if oHd is not None:
+ lstDisks.insert(0, oHd);
+ try:
+ if oSession.fpApiVer >= 4.0:
+ oSession.o.machine.attachDevice(_ControllerTypeToName(eStorageController), \
+ 0, iDevice, vboxcon.DeviceType_HardDisk, oHd);
+ else:
+ oSession.o.machine.attachDevice(_ControllerTypeToName(eStorageController), \
+ 0, iDevice, vboxcon.DeviceType_HardDisk, oHd.id);
+ except:
+ reporter.errorXcpt('attachDevice("%s",%s,%s,HardDisk,"%s") failed on "%s"' \
+ % (_ControllerTypeToName(eStorageController), 1, 0, oHd.id, oSession.sName) );
+ fRc = False;
+ else:
+ reporter.log('attached "%s" to %s' % (sDiskPath, oSession.sName));
+ else:
+ fRc = False;
+
+ # Set up the I/O logging config if enabled
+ if fRc and self.fIoLog:
+ try:
+ oSession.o.machine.setExtraData('VBoxInternal2/EnableDiskIntegrityDriver', '1');
+
+ iLun = 0;
+ if eStorageController == vboxcon.StorageControllerType_PIIX3 or \
+ eStorageController == vboxcon.StorageControllerType_PIIX4:
+ iLun = 1
+ sDrv, fDrvScsi = self.getStorageDriverFromEnum(eStorageController, True);
+ if fDrvScsi:
+ sCfgmPath = 'VBoxInternal/Devices/%s/0/LUN#%u/AttachedDriver/Config' % (sDrv, iLun);
+ else:
+ sCfgmPath = 'VBoxInternal/Devices/%s/0/LUN#%u/Config' % (sDrv, iLun);
+
+ sIoLogFile = '%s/%s.iolog' % (self.sIoLogPath, sDrv);
+ print(sCfgmPath);
+ print(sIoLogFile);
+ oSession.o.machine.setExtraData('%s/IoLog' % (sCfgmPath,), sIoLogFile);
+ except:
+ reporter.logXcpt();
+
+ fRc = fRc and oSession.enableVirtEx(fHwVirt);
+ fRc = fRc and oSession.enableNestedPaging(fNestedPaging);
+ fRc = fRc and oSession.setCpuCount(cCpus);
+ fRc = fRc and oSession.saveSettings();
+ fRc = oSession.close() and fRc and True; # pychecker hack.
+ oSession = None;
+ else:
+ fRc = False;
+
+ # Start up.
+ if fRc is True:
+ self.logVmInfo(oVM);
+ oSession, oTxsSession = self.startVmAndConnect(sVmName);
+ if oSession is not None:
+ self.addTask(oTxsSession);
+
+ # Fudge factor - Allow the guest to finish starting up.
+ self.sleep(5);
+
+ # Prepare the storage on the guest
+ lstBinaryPaths = ['/bin', '/sbin', '/usr/bin', '/usr/sbin' ];
+ oExecVm = remoteexecutor.RemoteExecutor(oTxsSession, lstBinaryPaths, '${SCRATCH}');
+ oStorCfgVm = storagecfg.StorageCfg(oExecVm, 'linux', self.getGuestDisk(oSession, oTxsSession, \
+ eStorageController));
+
+ iTry = 0;
+ while iTry < 3:
+ sMountPoint = self.prepareStorage(oStorCfgVm);
+ if sMountPoint is not None:
+ reporter.log('Prepared storage on %s try' % (iTry + 1,));
+ break;
+ else:
+ iTry = iTry + 1;
+ self.sleep(5);
+
+ if sMountPoint is not None:
+ self.testBenchmark('linux', sIoTest, sMountPoint, oExecVm, dTestSet, \
+ cMsTimeout = 3 * 3600 * 1000); # 3 hours max (Benchmark and QED takes a lot of time)
+ self.cleanupStorage(oStorCfgVm);
+ else:
+ reporter.testFailure('Failed to prepare storage for the guest benchmark');
+
+ # cleanup.
+ self.removeTask(oTxsSession);
+ self.terminateVmBySession(oSession);
+
+ # Add the I/O log if it exists and the test failed
+ if reporter.testErrorCount() > 0 \
+ and sIoLogFile is not None \
+ and os.path.exists(sIoLogFile):
+ reporter.addLogFile(sIoLogFile, 'misc/other', 'I/O log');
+ os.remove(sIoLogFile);
+
+ else:
+ fRc = False;
+
+ # Remove disk
+ oSession = self.openSession(oVM);
+ if oSession is not None:
+ try:
+ oSession.o.machine.detachDevice(_ControllerTypeToName(eStorageController), 0, iDevice);
+
+ # Remove storage controller if it is not an IDE controller.
+ if eStorageController is not vboxcon.StorageControllerType_PIIX3 \
+ and eStorageController is not vboxcon.StorageControllerType_PIIX4:
+ oSession.o.machine.removeStorageController(_ControllerTypeToName(eStorageController));
+
+ oSession.saveSettings();
+ oSession.saveSettings();
+ oSession.close();
+ oSession = None;
+ except:
+ reporter.errorXcpt('failed to detach/delete disk %s from storage controller' % (sDiskPath));
+ else:
+ fRc = False;
+
+ reporter.testDone();
+
+ # Delete all disks
+ for oHd in lstDisks:
+ self.oVBox.deleteHdByMedium(oHd);
+
+ # Cleanup storage area
+ if sDiskFormat != 'iSCSI' and not self.fUseScratch and self.fRecreateStorCfg:
+ self.cleanupStorage(self.oStorCfg);
+
+ return fRc;
+
+ def testStorage(self, sDiskPath = None):
+ """
+ Runs the storage testcase through the selected configurations
+ """
+
+ aasTestCfgs = [];
+ aasTestCfgs.insert(self.kiVmName, self.asTestVMs);
+ aasTestCfgs.insert(self.kiStorageCtrl, self.asStorageCtrls);
+ aasTestCfgs.insert(self.kiHostIoCache, (self.asHostIoCache, self.fnFormatHostIoCache));
+ aasTestCfgs.insert(self.kiDiskFmt, self.asDiskFormats);
+ aasTestCfgs.insert(self.kiDiskVar, self.asDiskVariants);
+ aasTestCfgs.insert(self.kiCpuCount, (self.acCpus, self.fnFormatCpuString));
+ aasTestCfgs.insert(self.kiVirtMode, (self.asVirtModes, self.fnFormatVirtMode));
+ aasTestCfgs.insert(self.kiIoTest, self.asTests);
+ aasTestCfgs.insert(self.kiTestSet, self.asTestSets);
+
+ aasTestsBlacklist = [];
+ aasTestsBlacklist.append(['tst-storage', 'BusLogic']); # 64bit Linux is broken with BusLogic
+
+ oTstCfgMgr = StorTestCfgMgr(aasTestCfgs, aasTestsBlacklist, self.isTestCfgSupported);
+
+ fRc = True;
+ asTestCfg = oTstCfgMgr.getCurrentTestCfg();
+ while asTestCfg:
+ fRc = self.testOneCfg(asTestCfg[self.kiVmName], self.getStorageCtrlFromName(asTestCfg[self.kiStorageCtrl]), \
+ asTestCfg[self.kiHostIoCache], asTestCfg[self.kiDiskFmt], asTestCfg[self.kiDiskVar],
+ sDiskPath, asTestCfg[self.kiCpuCount], asTestCfg[self.kiIoTest], \
+ asTestCfg[self.kiVirtMode], asTestCfg[self.kiTestSet]) and fRc and True; # pychecker hack.
+
+ asTestCfg = oTstCfgMgr.getNextTestCfg();
+
+ return fRc;
+
+ def test1(self):
+ """
+ Executes test #1.
+ """
+
+ fRc = True;
+ oDiskCfg = self.kdStorageCfgs.get(socket.gethostname().lower());
+
+ # Test the host first if requested
+ if oDiskCfg is not None or self.fUseScratch:
+ lstBinaryPaths = ['/bin', '/sbin', '/usr/bin', '/usr/sbin', \
+ '/opt/csw/bin', '/usr/ccs/bin', '/usr/sfw/bin'];
+ oExecutor = remoteexecutor.RemoteExecutor(None, lstBinaryPaths, self.sScratchPath);
+ if not self.fUseScratch:
+ self.oStorCfg = storagecfg.StorageCfg(oExecutor, utils.getHostOs(), oDiskCfg);
+
+ # Try to cleanup any leftovers from a previous run first.
+ fRc = self.oStorCfg.cleanupLeftovers();
+ if not fRc:
+ reporter.error('Failed to cleanup any leftovers from a previous run');
+
+ if self.fTestHost:
+ reporter.testStart('Host');
+ if self.fUseScratch:
+ sMountPoint = self.sScratchPath;
+ else:
+ sMountPoint = self.prepareStorage(self.oStorCfg);
+ if sMountPoint is not None:
+ for sIoTest in self.asTests:
+ reporter.testStart(sIoTest);
+ for sTestSet in self.asTestSets:
+ reporter.testStart(sTestSet);
+ dTestSet = self.kdTestSets.get(sTestSet);
+ self.testBenchmark(utils.getHostOs(), sIoTest, sMountPoint, oExecutor, dTestSet);
+ reporter.testDone();
+ reporter.testDone();
+ self.cleanupStorage(self.oStorCfg);
+ else:
+ reporter.testFailure('Failed to prepare host storage');
+ fRc = False;
+ reporter.testDone();
+ else:
+ # Create the storage space first if it is not done before every test.
+ sMountPoint = None;
+ if self.fUseScratch:
+ sMountPoint = self.sScratchPath;
+ elif not self.fRecreateStorCfg:
+ reporter.testStart('Create host storage');
+ sMountPoint = self.prepareStorage(self.oStorCfg);
+ if sMountPoint is None:
+ reporter.testFailure('Failed to prepare host storage');
+ fRc = False;
+ self.oStorCfg.mkDirOnVolume(sMountPoint, 'test', 0o777);
+ sMountPoint = sMountPoint + '/test';
+ reporter.testDone();
+
+ if fRc:
+ # Run the storage tests.
+ if not self.testStorage(sMountPoint):
+ fRc = False;
+
+ if not self.fRecreateStorCfg and not self.fUseScratch:
+ self.cleanupStorage(self.oStorCfg);
+ else:
+ fRc = False;
+
+ return fRc;
+
+if __name__ == '__main__':
+ sys.exit(tdStorageBenchmark().main(sys.argv));
+
diff --git a/src/VBox/ValidationKit/tests/storage/tdStorageSnapshotMerging1.py b/src/VBox/ValidationKit/tests/storage/tdStorageSnapshotMerging1.py
new file mode 100755
index 00000000..257de781
--- /dev/null
+++ b/src/VBox/ValidationKit/tests/storage/tdStorageSnapshotMerging1.py
@@ -0,0 +1,423 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# $Id: tdStorageSnapshotMerging1.py $
+
+"""
+VirtualBox Validation Kit - Storage snapshotting and merging testcase.
+"""
+
+__copyright__ = \
+"""
+Copyright (C) 2013-2019 Oracle Corporation
+
+This file is part of VirtualBox Open Source Edition (OSE), as
+available from http://www.virtualbox.org. This file is free software;
+you can redistribute it and/or modify it under the terms of the GNU
+General Public License (GPL) as published by the Free Software
+Foundation, in version 2 as it comes in the "COPYING" file of the
+VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+
+The contents of this file may alternatively be used under the terms
+of the Common Development and Distribution License Version 1.0
+(CDDL) only, as it comes in the "COPYING.CDDL" file of the
+VirtualBox OSE distribution, in which case the provisions of the
+CDDL are applicable instead of those of the GPL.
+
+You may elect to license modified versions of this file under the
+terms and conditions of either the GPL or the CDDL or both.
+"""
+__version__ = "$Revision: 127855 $"
+
+
+# Standard Python imports.
+import os;
+import sys;
+import zlib;
+
+# Only the main script needs to modify the path.
+try: __file__
+except: __file__ = sys.argv[0];
+g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))));
+sys.path.append(g_ksValidationKitDir);
+
+# Validation Kit imports.
+from testdriver import reporter;
+from testdriver import base;
+from testdriver import vbox;
+from testdriver import vboxcon;
+from testdriver import vboxwrappers;
+
+
+def _ControllerTypeToName(eControllerType):
+ """ Translate a controller type to a name. """
+ if eControllerType == vboxcon.StorageControllerType_PIIX3 or eControllerType == vboxcon.StorageControllerType_PIIX4:
+ sType = "IDE Controller";
+ elif eControllerType == vboxcon.StorageControllerType_IntelAhci:
+ sType = "SATA Controller";
+ elif eControllerType == vboxcon.StorageControllerType_LsiLogicSas:
+ sType = "SAS Controller";
+ elif eControllerType == vboxcon.StorageControllerType_LsiLogic or eControllerType == vboxcon.StorageControllerType_BusLogic:
+ sType = "SCSI Controller";
+ else:
+ sType = "Storage Controller";
+ return sType;
+
+
+def crc32_of_file(filepath):
+ fileobj = open(filepath,'rb');
+ current = 0;
+
+ while True:
+ buf = fileobj.read(1024 * 1024);
+ if not buf:
+ break
+ current = zlib.crc32(buf, current);
+
+ fileobj.close();
+ return current % 2**32;
+
+
+class tdStorageSnapshot(vbox.TestDriver): # pylint: disable=R0902
+ """
+ Storage benchmark.
+ """
+ def __init__(self):
+ vbox.TestDriver.__init__(self);
+ self.asRsrcs = None;
+ self.oGuestToGuestVM = None;
+ self.oGuestToGuestSess = None;
+ self.oGuestToGuestTxs = None;
+ self.asStorageCtrlsDef = ['AHCI', 'IDE', 'LsiLogicSAS', 'LsiLogic', 'BusLogic'];
+ self.asStorageCtrls = self.asStorageCtrlsDef;
+ #self.asDiskFormatsDef = ['VDI', 'VMDK', 'VHD', 'QED', 'Parallels', 'QCOW', 'iSCSI'];
+ self.asDiskFormatsDef = ['VDI', 'VMDK', 'VHD'];
+ self.asDiskFormats = self.asDiskFormatsDef;
+ self.sRndData = os.urandom(100*1024*1024);
+
+ #
+ # Overridden methods.
+ #
+ def showUsage(self):
+ rc = vbox.TestDriver.showUsage(self);
+ reporter.log('');
+ reporter.log('tdStorageSnapshot1 Options:');
+ reporter.log(' --storage-ctrls <type1[:type2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asStorageCtrls)));
+ reporter.log(' --disk-formats <type1[:type2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asDiskFormats)));
+ return rc;
+
+ def parseOption(self, asArgs, iArg): # pylint: disable=R0912,R0915
+ if asArgs[iArg] == '--storage-ctrls':
+ iArg += 1;
+ if iArg >= len(asArgs):
+ raise base.InvalidOption('The "--storage-ctrls" takes a colon separated list of Storage controller types');
+ self.asStorageCtrls = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--disk-formats':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--disk-formats" takes a colon separated list of disk formats');
+ self.asDiskFormats = asArgs[iArg].split(':');
+ else:
+ return vbox.TestDriver.parseOption(self, asArgs, iArg);
+ return iArg + 1;
+
+ def getResourceSet(self):
+ # Construct the resource list the first time it's queried.
+ if self.asRsrcs is None:
+ self.asRsrcs = ['5.3/storage/mergeMedium/t-orig.vdi',
+ '5.3/storage/mergeMedium/t-fixed.vdi',
+ '5.3/storage/mergeMedium/t-resized.vdi'];
+ return self.asRsrcs;
+
+ def actionExecute(self):
+ """
+ Execute the testcase.
+ """
+ fRc = self.test1();
+ return fRc;
+
+ def resizeMedium(self, oMedium, cbNewSize):
+ if oMedium.deviceType is not vboxcon.DeviceType_HardDisk:
+ return False;
+
+ if oMedium.type is not vboxcon.MediumType_Normal:
+ return False;
+
+ #currently only VDI can be resizable. Medium variant is not checked, because testcase creates disks itself
+ oMediumFormat = oMedium.mediumFormat;
+ if oMediumFormat.id != 'VDI':
+ return False;
+
+ cbCurrSize = oMedium.logicalSize;
+ # currently reduce is not supported
+ if cbNewSize < cbCurrSize:
+ return False;
+
+ try:
+ oProgressCom = oMedium.resize(cbNewSize);
+ oProgress = vboxwrappers.ProgressWrapper(oProgressCom, self.oVBoxMgr, self.oVBox.oTstDrv,
+ 'Resize medium %s' % (oMedium.name));
+ oProgress.wait(cMsTimeout = 60 * 1000);
+ oProgress.logResult();
+ except:
+ reporter.logXcpt('IMedium::resize failed on %s' % (oMedium.name));
+ return False;
+
+ return True;
+
+ def getMedium(self, oVM, sController):
+ oMediumAttachments = oVM.getMediumAttachmentsOfController(sController);
+
+ for oAttachment in oMediumAttachments:
+ oMedium = oAttachment.medium;
+ if oMedium.deviceType is not vboxcon.DeviceType_HardDisk:
+ continue;
+ if oMedium.type is not vboxcon.MediumType_Normal:
+ continue;
+ return oMedium;
+
+ return None;
+
+ def getSnapshotMedium(self, oSnapshot, sController):
+ oVM = oSnapshot.machine;
+ oMedium = self.getMedium(oVM, sController);
+
+ for oChildMedium in oMedium.children:
+ for uSnapshotId in oChildMedium.getSnapshotIds(oVM.id):
+ if uSnapshotId == oVM.id:
+ return oChildMedium;
+
+ return None;
+
+ def openMedium(self, sHd, fImmutable = False):
+ """
+ Opens medium in readonly mode.
+ Returns Medium object on success and None on failure. Error information is logged.
+ """
+ sFullName = self.oVBox.oTstDrv.getFullResourceName(sHd);
+ try:
+ oHd = self.oVBox.findHardDisk(sFullName);
+ except:
+ try:
+ if self.fpApiVer >= 4.1:
+ oHd = self.oVBox.openMedium(sFullName, vboxcon.DeviceType_HardDisk, vboxcon.AccessMode_ReadOnly, False);
+ elif self.fpApiVer >= 4.0:
+ oHd = self.oVBox.openMedium(sFullName, vboxcon.DeviceType_HardDisk, vboxcon.AccessMode_ReadOnly);
+ else:
+ oHd = self.oVBox.openHardDisk(sFullName, vboxcon.AccessMode_ReadOnly, False, "", False, "");
+
+ except:
+ reporter.errorXcpt('failed to open hd "%s"' % (sFullName));
+ return None;
+
+ try:
+ if fImmutable:
+ oHd.type = vboxcon.MediumType_Immutable;
+ else:
+ oHd.type = vboxcon.MediumType_Normal;
+
+ except:
+ if fImmutable:
+ reporter.errorXcpt('failed to set hd "%s" immutable' % (sHd));
+ else:
+ reporter.errorXcpt('failed to set hd "%s" normal' % (sHd));
+
+ return None;
+
+ return oHd;
+
+ def cloneMedium(self, oSrcHd, oTgtHd):
+ """
+ Clones medium into target medium.
+ """
+ try:
+ oProgressCom = oSrcHd.cloneTo(oTgtHd, (vboxcon.MediumVariant_Standard, ), None);
+ oProgress = vboxwrappers.ProgressWrapper(oProgressCom, self.oVBoxMgr, self.oVBox.oTstDrv,
+ 'clone base disk %s to %s' % (oSrcHd.name, oTgtHd.name));
+ oProgress.wait(cMsTimeout = 60 * 1000);
+ oProgress.logResult();
+ except:
+ reporter.errorXcpt('failed to clone medium %s to %s' % (oSrcHd.name, oTgtHd.name));
+ return False;
+
+ return True;
+
+ def deleteVM(self, oVM):
+ try:
+ oVM.unregister(vboxcon.CleanupMode_DetachAllReturnNone);
+ except:
+ reporter.logXcpt();
+
+ if self.fpApiVer >= 4.0:
+ try:
+ if self.fpApiVer >= 4.3:
+ oProgress = oVM.deleteConfig([]);
+ else:
+ oProgress = oVM.delete(None);
+ self.waitOnProgress(oProgress);
+
+ except:
+ reporter.logXcpt();
+
+ else:
+ try: oVM.deleteSettings();
+ except: reporter.logXcpt();
+
+ return None;
+
+ #
+ # Test execution helpers.
+ #
+
+ def test1OneCfg(self, eStorageController, oDskFmt):
+ """
+ Runs the specified VM thru test #1.
+
+ Returns a success indicator on the general test execution. This is not
+ the actual test result.
+ """
+
+ (asExts, aTypes) = oDskFmt.describeFileExtensions()
+ for i in range(0, len(asExts)): #pylint: disable=consider-using-enumerate
+ if aTypes[i] is vboxcon.DeviceType_HardDisk:
+ sExt = '.' + asExts[i]
+ break
+
+ if sExt is None:
+ return False;
+
+ oOrigBaseHd = self.openMedium('5.3/storage/mergeMedium/t-orig.vdi');
+ if oOrigBaseHd is None:
+ return False;
+
+ #currently only VDI can be resizable. Medium variant is not checked, because testcase creates disks itself
+ fFmtDynamic = oDskFmt.id == 'VDI';
+ sOrigWithDiffHd = '5.3/storage/mergeMedium/t-fixed.vdi'
+ uOrigCrc = 0x7a417cbb;
+
+ if fFmtDynamic:
+ sOrigWithDiffHd = '5.3/storage/mergeMedium/t-resized.vdi';
+ uOrigCrc = 0xa8f5daa3;
+
+ oOrigWithDiffHd = self.openMedium(sOrigWithDiffHd);
+ if oOrigWithDiffHd is None:
+ return False;
+
+ oVM = self.createTestVM('testvm', 1, None);
+ if oVM is None:
+ return False;
+
+ sController = _ControllerTypeToName(eStorageController);
+
+ # Reconfigure the VM
+ oSession = self.openSession(oVM);
+ if oSession is None:
+ return False;
+ # Attach HD
+
+ fRc = True;
+ sFile = 't-base' + sExt;
+ sHddPath = os.path.join(self.oVBox.oTstDrv.sScratchPath, sFile);
+ oHd = oSession.createBaseHd(sHddPath, sFmt=oDskFmt.id, cb=oOrigBaseHd.logicalSize);
+ #if oSession.createBaseHd can't create disk because it exists, oHd will point to some stub object anyway
+ fRc = fRc and oHd is not None and (oHd.logicalSize == oOrigBaseHd.logicalSize);
+ fRc = fRc and self.cloneMedium(oOrigBaseHd, oHd);
+
+ fRc = fRc and oSession.ensureControllerAttached(sController);
+ fRc = fRc and oSession.setStorageControllerType(eStorageController, sController);
+ fRc = fRc and oSession.saveSettings();
+ fRc = fRc and oSession.attachHd(sHddPath, sController, iPort = 0, fImmutable=False, fForceResource=False)
+
+ if fRc:
+ oSession.takeSnapshot('Base snapshot');
+ oSnapshot = oSession.findSnapshot('Base snapshot');
+
+ if oSnapshot is not None:
+ oSnapshotMedium = self.getSnapshotMedium(oSnapshot, sController);
+ fRc = oSnapshotMedium is not None;
+
+ if fFmtDynamic:
+ fRc = fRc and self.resizeMedium(oSnapshotMedium, oOrigWithDiffHd.logicalSize);
+ fRc = fRc and self.cloneMedium(oOrigWithDiffHd, oSnapshotMedium);
+ fRc = fRc and oSession.deleteSnapshot(oSnapshot.id, cMsTimeout = 120 * 1000);
+
+ if fRc:
+ # disk for result test by checksum
+ sResFilePath = os.path.join(self.oVBox.oTstDrv.sScratchPath, 't_res.vmdk');
+ sResFilePathRaw = os.path.join(self.oVBox.oTstDrv.sScratchPath, 't_res-flat.vmdk');
+ oResHd = oSession.createBaseHd(sResFilePath, sFmt='VMDK', cb=oOrigWithDiffHd.logicalSize,
+ tMediumVariant = (vboxcon.MediumVariant_Fixed, ));
+ fRc = oResHd is not None;
+ fRc = fRc and self.cloneMedium(oHd, oResHd);
+
+ uResCrc32 = 0;
+ if fRc:
+ uResCrc32 = crc32_of_file(sResFilePathRaw);
+ if uResCrc32 == uOrigCrc:
+ reporter.log('Snapshot merged successfully. Crc32 is correct');
+ fRc = True;
+ else:
+ reporter.error('Snapshot merging failed. Crc32 is invalid');
+ fRc = False;
+
+ self.oVBox.deleteHdByMedium(oResHd);
+
+ if oSession is not None:
+ if oHd is not None:
+ oSession.detachHd(sController, iPort = 0, iDevice = 0);
+
+ oSession.saveSettings(fClose = True);
+ if oHd is not None:
+ self.oVBox.deleteHdByMedium(oHd);
+
+ self.deleteVM(oVM);
+ return fRc;
+
+ def test1(self):
+ """
+ Executes test #1 thru the various configurations.
+ """
+ if not self.importVBoxApi():
+ return False;
+
+ sVmName = 'testvm';
+ reporter.testStart(sVmName);
+
+ aoDskFmts = self.oVBoxMgr.getArray(self.oVBox.systemProperties, 'mediumFormats')
+ if aoDskFmts is None or len(aoDskFmts) < 1:
+ return False;
+
+ fRc = True;
+ for sStorageCtrl in self.asStorageCtrls:
+ reporter.testStart(sStorageCtrl);
+ if sStorageCtrl == 'AHCI':
+ eStorageCtrl = vboxcon.StorageControllerType_IntelAhci;
+ elif sStorageCtrl == 'IDE':
+ eStorageCtrl = vboxcon.StorageControllerType_PIIX4;
+ elif sStorageCtrl == 'LsiLogicSAS':
+ eStorageCtrl = vboxcon.StorageControllerType_LsiLogicSas;
+ elif sStorageCtrl == 'LsiLogic':
+ eStorageCtrl = vboxcon.StorageControllerType_LsiLogic;
+ elif sStorageCtrl == 'BusLogic':
+ eStorageCtrl = vboxcon.StorageControllerType_BusLogic;
+ else:
+ eStorageCtrl = None;
+
+ for oDskFmt in aoDskFmts:
+ if oDskFmt.id in self.asDiskFormats:
+ reporter.testStart('%s' % (oDskFmt.id));
+ fRc = self.test1OneCfg(eStorageCtrl, oDskFmt);
+ reporter.testDone();
+ if not fRc:
+ break;
+
+ reporter.testDone();
+ if not fRc:
+ break;
+
+ reporter.testDone();
+ return fRc;
+
+if __name__ == '__main__':
+ sys.exit(tdStorageSnapshot().main(sys.argv));
diff --git a/src/VBox/ValidationKit/tests/storage/tdStorageStress1.py b/src/VBox/ValidationKit/tests/storage/tdStorageStress1.py
new file mode 100755
index 00000000..556aeb46
--- /dev/null
+++ b/src/VBox/ValidationKit/tests/storage/tdStorageStress1.py
@@ -0,0 +1,517 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Storage testcase using xfstests.
+"""
+
+__copyright__ = \
+"""
+Copyright (C) 2012-2019 Oracle Corporation
+
+This file is part of VirtualBox Open Source Edition (OSE), as
+available from http://www.virtualbox.org. This file is free software;
+you can redistribute it and/or modify it under the terms of the GNU
+General Public License (GPL) as published by the Free Software
+Foundation, in version 2 as it comes in the "COPYING" file of the
+VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+
+The contents of this file may alternatively be used under the terms
+of the Common Development and Distribution License Version 1.0
+(CDDL) only, as it comes in the "COPYING.CDDL" file of the
+VirtualBox OSE distribution, in which case the provisions of the
+CDDL are applicable instead of those of the GPL.
+
+You may elect to license modified versions of this file under the
+terms and conditions of either the GPL or the CDDL or both.
+"""
+__version__ = "$Id: tdStorageStress1.py $"
+
+
+# Standard Python imports.
+import os;
+import sys;
+
+# Only the main script needs to modify the path.
+try: __file__
+except: __file__ = sys.argv[0];
+g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))));
+sys.path.append(g_ksValidationKitDir);
+
+# Validation Kit imports.
+from testdriver import reporter;
+from testdriver import base;
+from testdriver import vbox;
+from testdriver import vboxcon;
+
+def _ControllerTypeToName(eControllerType):
+ """ Translate a controller type to a name. """
+ if eControllerType == vboxcon.StorageControllerType_PIIX3 or eControllerType == vboxcon.StorageControllerType_PIIX4:
+ sType = "IDE Controller";
+ elif eControllerType == vboxcon.StorageControllerType_IntelAhci:
+ sType = "SATA Controller";
+ elif eControllerType == vboxcon.StorageControllerType_LsiLogicSas:
+ sType = "SAS Controller";
+ elif eControllerType == vboxcon.StorageControllerType_LsiLogic or eControllerType == vboxcon.StorageControllerType_BusLogic:
+ sType = "SCSI Controller";
+ else:
+ sType = "Storage Controller";
+ return sType;
+
+class tdStorageStress(vbox.TestDriver): # pylint: disable=R0902
+ """
+ Storage testcase.
+ """
+
+ def __init__(self):
+ vbox.TestDriver.__init__(self);
+ self.asRsrcs = None;
+ self.oGuestToGuestVM = None;
+ self.oGuestToGuestSess = None;
+ self.oGuestToGuestTxs = None;
+ self.asTestVMsDef = ['tst-debian'];
+ self.asTestVMs = self.asTestVMsDef;
+ self.asSkipVMs = [];
+ self.asVirtModesDef = ['hwvirt', 'hwvirt-np', 'raw',]
+ self.asVirtModes = self.asVirtModesDef
+ self.acCpusDef = [1, 2,]
+ self.acCpus = self.acCpusDef;
+ self.asStorageCtrlsDef = ['AHCI', 'IDE', 'LsiLogicSAS', 'LsiLogic', 'BusLogic'];
+ self.asStorageCtrls = self.asStorageCtrlsDef;
+ self.asDiskFormatsDef = ['VDI', 'VMDK', 'VHD', 'QED', 'Parallels', 'QCOW'];
+ self.asDiskFormats = self.asDiskFormatsDef;
+ self.asTestsDef = ['xfstests'];
+ self.asTests = self.asTestsDef;
+ self.asGuestFs = ['xfs', 'ext4', 'btrfs'];
+ self.asGuestFsDef = self.asGuestFs;
+ self.asIscsiTargetsDef = ['aurora|iqn.2011-03.home.aurora:aurora.storagebench|1'];
+ self.asIscsiTargets = self.asIscsiTargetsDef;
+ self.asDirsDef = ['/run/media/alexander/OWCSSD/alexander', \
+ '/run/media/alexander/CrucialSSD/alexander', \
+ '/run/media/alexander/HardDisk/alexander', \
+ '/home/alexander'];
+ self.asDirs = self.asDirsDef;
+
+ #
+ # Overridden methods.
+ #
+ def showUsage(self):
+ rc = vbox.TestDriver.showUsage(self);
+ reporter.log('');
+ reporter.log('tdStorageBenchmark1 Options:');
+ reporter.log(' --virt-modes <m1[:m2[:]]');
+ reporter.log(' Default: %s' % (':'.join(self.asVirtModesDef)));
+ reporter.log(' --cpu-counts <c1[:c2[:]]');
+ reporter.log(' Default: %s' % (':'.join(str(c) for c in self.acCpusDef)));
+ reporter.log(' --storage-ctrls <type1[:type2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asStorageCtrls)));
+ reporter.log(' --disk-formats <type1[:type2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asDiskFormats)));
+ reporter.log(' --disk-dirs <path1[:path2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asDirs)));
+ reporter.log(' --iscsi-targets <target1[:target2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asIscsiTargets)));
+ reporter.log(' --tests <test1[:test2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asTests)));
+ reporter.log(' --guest-fs <fs1[:fs2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asGuestFs)));
+ reporter.log(' --test-vms <vm1[:vm2[:...]]>');
+ reporter.log(' Test the specified VMs in the given order. Use this to change');
+ reporter.log(' the execution order or limit the choice of VMs');
+ reporter.log(' Default: %s (all)' % (':'.join(self.asTestVMsDef)));
+ reporter.log(' --skip-vms <vm1[:vm2[:...]]>');
+ reporter.log(' Skip the specified VMs when testing.');
+ return rc;
+
+ def parseOption(self, asArgs, iArg): # pylint: disable=R0912,R0915
+ if asArgs[iArg] == '--virt-modes':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--virt-modes" takes a colon separated list of modes');
+ self.asVirtModes = asArgs[iArg].split(':');
+ for s in self.asVirtModes:
+ if s not in self.asVirtModesDef:
+ raise base.InvalidOption('The "--virt-modes" value "%s" is not valid; valid values are: %s' \
+ % (s, ' '.join(self.asVirtModesDef)));
+ elif asArgs[iArg] == '--cpu-counts':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--cpu-counts" takes a colon separated list of cpu counts');
+ self.acCpus = [];
+ for s in asArgs[iArg].split(':'):
+ try: c = int(s);
+ except: raise base.InvalidOption('The "--cpu-counts" value "%s" is not an integer' % (s,));
+ if c <= 0: raise base.InvalidOption('The "--cpu-counts" value "%s" is zero or negative' % (s,));
+ self.acCpus.append(c);
+ elif asArgs[iArg] == '--storage-ctrls':
+ iArg += 1;
+ if iArg >= len(asArgs):
+ raise base.InvalidOption('The "--storage-ctrls" takes a colon separated list of Storage controller types');
+ self.asStorageCtrls = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--disk-formats':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--disk-formats" takes a colon separated list of disk formats');
+ self.asDiskFormats = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--disk-dirs':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--disk-dirs" takes a colon separated list of directories');
+ self.asDirs = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--iscsi-targets':
+ iArg += 1;
+ if iArg >= len(asArgs):
+ raise base.InvalidOption('The "--iscsi-targets" takes a colon separated list of iscsi targets');
+ self.asIscsiTargets = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--tests':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--tests" takes a colon separated list of disk formats');
+ self.asTests = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--guest-fs':
+ iArg += 1;
+ if iArg >= len(asArgs):
+ raise base.InvalidOption('The "--guest-fs" takes a colon separated list of filesystem identifiers');
+ self.asGuestFs = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--test-vms':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--test-vms" takes colon separated list');
+ self.asTestVMs = asArgs[iArg].split(':');
+ for s in self.asTestVMs:
+ if s not in self.asTestVMsDef:
+ raise base.InvalidOption('The "--test-vms" value "%s" is not valid; valid values are: %s' \
+ % (s, ' '.join(self.asTestVMsDef)));
+ elif asArgs[iArg] == '--skip-vms':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--skip-vms" takes colon separated list');
+ self.asSkipVMs = asArgs[iArg].split(':');
+ for s in self.asSkipVMs:
+ if s not in self.asTestVMsDef:
+ reporter.log('warning: The "--test-vms" value "%s" does not specify any of our test VMs.' % (s));
+ else:
+ return vbox.TestDriver.parseOption(self, asArgs, iArg);
+ return iArg + 1;
+
+ def completeOptions(self):
+ # Remove skipped VMs from the test list.
+ for sVM in self.asSkipVMs:
+ try: self.asTestVMs.remove(sVM);
+ except: pass;
+
+ return vbox.TestDriver.completeOptions(self);
+
+ def getResourceSet(self):
+ # Construct the resource list the first time it's queried.
+ if self.asRsrcs is None:
+ self.asRsrcs = [];
+ if 'tst-debian' in self.asTestVMs:
+ self.asRsrcs.append('4.2/storage/debian.vdi');
+
+ return self.asRsrcs;
+
+ def actionConfig(self):
+ # Some stupid trickery to guess the location of the iso. ## fixme - testsuite unzip ++
+ sVBoxValidationKit_iso = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ '../../VBoxValidationKitStorIo.iso'));
+ if not os.path.isfile(sVBoxValidationKit_iso):
+ sVBoxValidationKit_iso = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ '../../VBoxValidationKitStorIo.iso'));
+ if not os.path.isfile(sVBoxValidationKit_iso):
+ sVBoxValidationKit_iso = '/mnt/ramdisk/vbox/svn/trunk/validationkit/VBoxValidationKitStorIo.iso';
+ if not os.path.isfile(sVBoxValidationKit_iso):
+ sVBoxValidationKit_iso = '/mnt/ramdisk/vbox/svn/trunk/testsuite/VBoxTestSuiteStorIo.iso';
+ if not os.path.isfile(sVBoxValidationKit_iso):
+ sCur = os.getcwd();
+ for i in range(0, 10):
+ sVBoxValidationKit_iso = os.path.join(sCur, 'validationkit/VBoxValidationKitStorIo.iso');
+ if os.path.isfile(sVBoxValidationKit_iso):
+ break;
+ sVBoxValidationKit_iso = os.path.join(sCur, 'testsuite/VBoxTestSuiteStorIo.iso');
+ if os.path.isfile(sVBoxValidationKit_iso):
+ break;
+ sCur = os.path.abspath(os.path.join(sCur, '..'));
+ if i is None: pass; # shut up pychecker/pylint.
+ if not os.path.isfile(sVBoxValidationKit_iso):
+ sVBoxValidationKit_iso = '/mnt/VirtualBox/VBoxValidationKitStorIo.iso';
+ if not os.path.isfile(sVBoxValidationKit_iso):
+ sVBoxValidationKit_iso = '/mnt/VirtualBox/VBoxTestSuiteStorIo.iso';
+
+
+
+ # Make sure vboxapi has been imported so we can use the constants.
+ if not self.importVBoxApi():
+ return False;
+
+ #
+ # Configure the VMs we're going to use.
+ #
+
+ # Linux VMs
+ if 'tst-debian' in self.asTestVMs:
+ oVM = self.createTestVM('tst-debian', 1, '4.2/storage/debian.vdi', sKind = 'Debian_64', fIoApic = True, \
+ eNic0AttachType = vboxcon.NetworkAttachmentType_NAT, \
+ eNic0Type = vboxcon.NetworkAdapterType_Am79C973, \
+ sDvdImage = sVBoxValidationKit_iso);
+ if oVM is None:
+ return False;
+
+ return True;
+
+ def actionExecute(self):
+ """
+ Execute the testcase.
+ """
+ fRc = self.test1();
+ return fRc;
+
+
+ #
+ # Test execution helpers.
+ #
+
+ def test1RunTestProgs(self, oSession, oTxsSession, fRc, sTestName, sGuestFs):
+ """
+ Runs all the test programs on the test machine.
+ """
+ _ = oSession;
+
+ reporter.testStart(sTestName);
+
+ sMkfsCmd = 'mkfs.' + sGuestFs;
+
+ # Prepare test disks, just create filesystem without partition
+ reporter.testStart('Preparation');
+ fRc = fRc and self.txsRunTest(oTxsSession, 'Create FS 1', 60000, \
+ '/sbin/' + sMkfsCmd,
+ (sMkfsCmd, '/dev/sdb'));
+
+ fRc = fRc and self.txsRunTest(oTxsSession, 'Create FS 2', 60000, \
+ '/sbin/' + sMkfsCmd,
+ (sMkfsCmd, '/dev/sdc'));
+
+ # Create test and scratch directory
+ fRc = fRc and self.txsRunTest(oTxsSession, 'Create /mnt/test', 10000, \
+ '/bin/mkdir',
+ ('mkdir', '/mnt/test'));
+
+ fRc = fRc and self.txsRunTest(oTxsSession, 'Create /mnt/scratch', 10000, \
+ '/bin/mkdir',
+ ('mkdir', '/mnt/scratch'));
+
+ # Mount test and scratch directory.
+ fRc = fRc and self.txsRunTest(oTxsSession, 'Mount /mnt/test', 10000, \
+ '/bin/mount',
+ ('mount', '/dev/sdb','/mnt/test'));
+
+ fRc = fRc and self.txsRunTest(oTxsSession, 'Mount /mnt/scratch', 10000, \
+ '/bin/mount',
+ ('mount', '/dev/sdc','/mnt/scratch'));
+
+ fRc = fRc and self.txsRunTest(oTxsSession, 'Copying xfstests', 10000, \
+ '/bin/cp',
+ ('cp', '-r','${CDROM}/${OS.ARCH}/xfstests', '/tmp'));
+
+ reporter.testDone();
+
+ # Run xfstests (this sh + cd crap is required because the cwd for the script must be in the root
+ # of the xfstests directory...)
+ reporter.testStart('xfstests');
+ if fRc and 'xfstests' in self.asTests:
+ fRc = self.txsRunTest(oTxsSession, 'xfstests', 3600000,
+ '/bin/sh',
+ ('sh', '-c', '(cd /tmp/xfstests && ./check -g auto)'),
+ ('TEST_DIR=/mnt/test', 'TEST_DEV=/dev/sdb', 'SCRATCH_MNT=/mnt/scratch', 'SCRATCH_DEV=/dev/sdc',
+ 'FSTYP=' + sGuestFs));
+ reporter.testDone();
+ else:
+ reporter.testDone(fSkipped = True);
+
+ reporter.testDone(not fRc);
+ return fRc;
+
+ # pylint: disable=R0913
+
+ def test1OneCfg(self, sVmName, eStorageController, sDiskFormat, sDiskPath1, sDiskPath2, \
+ sGuestFs, cCpus, fHwVirt, fNestedPaging):
+ """
+ Runs the specified VM thru test #1.
+
+ Returns a success indicator on the general test execution. This is not
+ the actual test result.
+ """
+ oVM = self.getVmByName(sVmName);
+
+ # Reconfigure the VM
+ fRc = True;
+ oSession = self.openSession(oVM);
+ if oSession is not None:
+ # Attach HD
+ fRc = oSession.ensureControllerAttached(_ControllerTypeToName(eStorageController));
+ fRc = fRc and oSession.setStorageControllerType(eStorageController, _ControllerTypeToName(eStorageController));
+
+ if sDiskFormat == "iSCSI":
+ listNames = [];
+ listValues = [];
+ listValues = sDiskPath1.split('|');
+ listNames.append('TargetAddress');
+ listNames.append('TargetName');
+ listNames.append('LUN');
+
+ if self.fpApiVer >= 5.0:
+ oHd = oSession.oVBox.createMedium(sDiskFormat, sDiskPath1, vboxcon.AccessMode_ReadWrite, \
+ vboxcon.DeviceType_HardDisk);
+ else:
+ oHd = oSession.oVBox.createHardDisk(sDiskFormat, sDiskPath1);
+ oHd.type = vboxcon.MediumType_Normal;
+ oHd.setProperties(listNames, listValues);
+
+ # Attach it.
+ if fRc is True:
+ try:
+ if oSession.fpApiVer >= 4.0:
+ oSession.o.machine.attachDevice(_ControllerTypeToName(eStorageController), \
+ 1, 0, vboxcon.DeviceType_HardDisk, oHd);
+ else:
+ oSession.o.machine.attachDevice(_ControllerTypeToName(eStorageController), \
+ 1, 0, vboxcon.DeviceType_HardDisk, oHd.id);
+ except:
+ reporter.errorXcpt('attachDevice("%s",%s,%s,HardDisk,"%s") failed on "%s"' \
+ % (_ControllerTypeToName(eStorageController), 1, 0, oHd.id, oSession.sName) );
+ fRc = False;
+ else:
+ reporter.log('attached "%s" to %s' % (sDiskPath1, oSession.sName));
+ else:
+ fRc = fRc and oSession.createAndAttachHd(sDiskPath1, sDiskFormat, _ControllerTypeToName(eStorageController), \
+ cb = 10*1024*1024*1024, iPort = 1, fImmutable = False);
+ fRc = fRc and oSession.createAndAttachHd(sDiskPath2, sDiskFormat, _ControllerTypeToName(eStorageController), \
+ cb = 10*1024*1024*1024, iPort = 2, fImmutable = False);
+ fRc = fRc and oSession.enableVirtEx(fHwVirt);
+ fRc = fRc and oSession.enableNestedPaging(fNestedPaging);
+ fRc = fRc and oSession.setCpuCount(cCpus);
+ fRc = fRc and oSession.saveSettings();
+ fRc = oSession.close() and fRc and True; # pychecker hack.
+ oSession = None;
+ else:
+ fRc = False;
+
+ # Start up.
+ if fRc is True:
+ self.logVmInfo(oVM);
+ oSession, oTxsSession = self.startVmAndConnectToTxsViaTcp(sVmName, fCdWait = False, fNatForwardingForTxs = True);
+ if oSession is not None:
+ self.addTask(oTxsSession);
+
+ # Fudge factor - Allow the guest to finish starting up.
+ self.sleep(5);
+
+ fRc = self.test1RunTestProgs(oSession, oTxsSession, fRc, 'stress testing', sGuestFs);
+
+ # cleanup.
+ self.removeTask(oTxsSession);
+ self.terminateVmBySession(oSession)
+
+ # Remove disk
+ oSession = self.openSession(oVM);
+ if oSession is not None:
+ try:
+ oSession.o.machine.detachDevice(_ControllerTypeToName(eStorageController), 1, 0);
+ oSession.o.machine.detachDevice(_ControllerTypeToName(eStorageController), 2, 0);
+
+ # Remove storage controller if it is not an IDE controller.
+ if eStorageController is not vboxcon.StorageControllerType_PIIX3 \
+ and eStorageController is not vboxcon.StorageControllerType_PIIX4:
+ oSession.o.machine.removeStorageController(_ControllerTypeToName(eStorageController));
+
+ oSession.saveSettings();
+ oSession.oVBox.deleteHdByLocation(sDiskPath1);
+ oSession.oVBox.deleteHdByLocation(sDiskPath2);
+ oSession.saveSettings();
+ oSession.close();
+ oSession = None;
+ except:
+ reporter.errorXcpt('failed to detach/delete disks %s and %s from storage controller' % \
+ (sDiskPath1, sDiskPath2));
+ else:
+ fRc = False;
+ else:
+ fRc = False;
+ return fRc;
+
+ def test1OneVM(self, sVmName):
+ """
+ Runs one VM thru the various configurations.
+ """
+ reporter.testStart(sVmName);
+ fRc = True;
+ for sStorageCtrl in self.asStorageCtrls:
+ reporter.testStart(sStorageCtrl);
+
+ if sStorageCtrl == 'AHCI':
+ eStorageCtrl = vboxcon.StorageControllerType_IntelAhci;
+ elif sStorageCtrl == 'IDE':
+ eStorageCtrl = vboxcon.StorageControllerType_PIIX4;
+ elif sStorageCtrl == 'LsiLogicSAS':
+ eStorageCtrl = vboxcon.StorageControllerType_LsiLogicSas;
+ elif sStorageCtrl == 'LsiLogic':
+ eStorageCtrl = vboxcon.StorageControllerType_LsiLogic;
+ elif sStorageCtrl == 'BusLogic':
+ eStorageCtrl = vboxcon.StorageControllerType_BusLogic;
+ else:
+ eStorageCtrl = None;
+
+ for sDiskFormat in self.asDiskFormats:
+ reporter.testStart('%s' % (sDiskFormat,));
+
+ asPaths = self.asDirs;
+
+ for sDir in asPaths:
+ reporter.testStart('%s' % (sDir,));
+
+ sPathDisk1 = sDir + "/disk1.disk";
+ sPathDisk2 = sDir + "/disk2.disk";
+
+ for sGuestFs in self.asGuestFs:
+ reporter.testStart('%s' % (sGuestFs,));
+
+ for cCpus in self.acCpus:
+ if cCpus == 1: reporter.testStart('1 cpu');
+ else: reporter.testStart('%u cpus' % (cCpus,));
+
+ for sVirtMode in self.asVirtModes:
+ if sVirtMode == 'raw' and cCpus > 1:
+ continue;
+ hsVirtModeDesc = {};
+ hsVirtModeDesc['raw'] = 'Raw-mode';
+ hsVirtModeDesc['hwvirt'] = 'HwVirt';
+ hsVirtModeDesc['hwvirt-np'] = 'NestedPaging';
+ reporter.testStart(hsVirtModeDesc[sVirtMode]);
+
+ fHwVirt = sVirtMode != 'raw';
+ fNestedPaging = sVirtMode == 'hwvirt-np';
+ fRc = self.test1OneCfg(sVmName, eStorageCtrl, sDiskFormat, sPathDisk1, sPathDisk2, \
+ sGuestFs, cCpus, fHwVirt, fNestedPaging) and fRc and True;
+ reporter.testDone();
+ reporter.testDone();
+ reporter.testDone();
+ reporter.testDone();
+ reporter.testDone();
+ reporter.testDone();
+ reporter.testDone();
+ return fRc;
+
+ def test1(self):
+ """
+ Executes test #1.
+ """
+
+ # Loop thru the test VMs.
+ for sVM in self.asTestVMs:
+ # run test on the VM.
+ if not self.test1OneVM(sVM):
+ fRc = False;
+ else:
+ fRc = True;
+
+ return fRc;
+
+
+
+if __name__ == '__main__':
+ sys.exit(tdStorageStress().main(sys.argv));
+