summaryrefslogtreecommitdiffstats
path: root/src/VBox/ValidationKit/tests/storage
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:17:27 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:17:27 +0000
commitf215e02bf85f68d3a6106c2a1f4f7f063f819064 (patch)
tree6bb5b92c046312c4e95ac2620b10ddf482d3fa8b /src/VBox/ValidationKit/tests/storage
parentInitial commit. (diff)
downloadvirtualbox-f215e02bf85f68d3a6106c2a1f4f7f063f819064.tar.xz
virtualbox-f215e02bf85f68d3a6106c2a1f4f7f063f819064.zip
Adding upstream version 7.0.14-dfsg.upstream/7.0.14-dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/VBox/ValidationKit/tests/storage')
-rw-r--r--src/VBox/ValidationKit/tests/storage/Makefile.kmk56
-rwxr-xr-xsrc/VBox/ValidationKit/tests/storage/remoteexecutor.py314
-rwxr-xr-xsrc/VBox/ValidationKit/tests/storage/storagecfg.py681
-rwxr-xr-xsrc/VBox/ValidationKit/tests/storage/tdStorageBenchmark1.py1469
-rwxr-xr-xsrc/VBox/ValidationKit/tests/storage/tdStorageRawDrive1.py1692
-rwxr-xr-xsrc/VBox/ValidationKit/tests/storage/tdStorageSnapshotMerging1.py414
-rwxr-xr-xsrc/VBox/ValidationKit/tests/storage/tdStorageStress1.py513
7 files changed, 5139 insertions, 0 deletions
diff --git a/src/VBox/ValidationKit/tests/storage/Makefile.kmk b/src/VBox/ValidationKit/tests/storage/Makefile.kmk
new file mode 100644
index 00000000..bcb88ec8
--- /dev/null
+++ b/src/VBox/ValidationKit/tests/storage/Makefile.kmk
@@ -0,0 +1,56 @@
+# $Id: Makefile.kmk $
+## @file
+# VirtualBox Validation Kit - Storage Tests.
+#
+
+#
+# Copyright (C) 2012-2023 Oracle and/or its affiliates.
+#
+# This file is part of VirtualBox base platform packages, as
+# available from https://www.virtualbox.org.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation, in version 3 of the
+# License.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <https://www.gnu.org/licenses>.
+#
+# The contents of this file may alternatively be used under the terms
+# of the Common Development and Distribution License Version 1.0
+# (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
+# in the VirtualBox distribution, in which case the provisions of the
+# CDDL are applicable instead of those of the GPL.
+#
+# You may elect to license modified versions of this file under the
+# terms and conditions of either the GPL or the CDDL or both.
+#
+# SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
+#
+
+SUB_DEPTH = ../../../../..
+include $(KBUILD_PATH)/subheader.kmk
+
+
+INSTALLS += ValidationKitTestsStorage
+ValidationKitTestsStorage_TEMPLATE = VBoxValidationKitR3
+ValidationKitTestsStorage_INST = $(INST_VALIDATIONKIT)tests/storage/
+ValidationKitTestsStorage_EXEC_SOURCES := \
+ $(PATH_SUB_CURRENT)/tdStorageBenchmark1.py \
+ $(PATH_SUB_CURRENT)/tdStorageSnapshotMerging1.py \
+ $(PATH_SUB_CURRENT)/tdStorageStress1.py \
+ $(PATH_SUB_CURRENT)/tdStorageRawDrive1.py \
+ $(PATH_SUB_CURRENT)/remoteexecutor.py \
+ $(PATH_SUB_CURRENT)/storagecfg.py
+
+VBOX_VALIDATIONKIT_PYTHON_SOURCES += $(ValidationKitTestsStorage_EXEC_SOURCES)
+
+$(evalcall def_vbox_validationkit_process_python_sources)
+include $(FILE_KBUILD_SUB_FOOTER)
+
diff --git a/src/VBox/ValidationKit/tests/storage/remoteexecutor.py b/src/VBox/ValidationKit/tests/storage/remoteexecutor.py
new file mode 100755
index 00000000..7f9b1532
--- /dev/null
+++ b/src/VBox/ValidationKit/tests/storage/remoteexecutor.py
@@ -0,0 +1,314 @@
+# -*- coding: utf-8 -*-
+# $Id: remoteexecutor.py $
+
+"""
+VirtualBox Validation Kit - Storage benchmark, test execution helpers.
+"""
+
+__copyright__ = \
+"""
+Copyright (C) 2016-2023 Oracle and/or its affiliates.
+
+This file is part of VirtualBox base platform packages, as
+available from https://www.virtualbox.org.
+
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation, in version 3 of the
+License.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, see <https://www.gnu.org/licenses>.
+
+The contents of this file may alternatively be used under the terms
+of the Common Development and Distribution License Version 1.0
+(CDDL), a copy of it is provided in the "COPYING.CDDL" file included
+in the VirtualBox distribution, in which case the provisions of the
+CDDL are applicable instead of those of the GPL.
+
+You may elect to license modified versions of this file under the
+terms and conditions of either the GPL or the CDDL or both.
+
+SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
+"""
+__version__ = "$Revision: 155244 $"
+
+
+# Standard Python imports.
+import array;
+import os;
+import shutil;
+import sys;
+if sys.version_info[0] >= 3:
+ from io import StringIO as StringIO; # pylint: disable=import-error,no-name-in-module,useless-import-alias
+else:
+ from StringIO import StringIO as StringIO; # pylint: disable=import-error,no-name-in-module,useless-import-alias
+import subprocess;
+
+# Validation Kit imports.
+from common import utils;
+from testdriver import reporter;
+
+
+
+class StdInOutBuffer(object):
+ """ Standard input output buffer """
+
+ def __init__(self, sInput = None):
+ self.sInput = StringIO();
+ if sInput is not None:
+ self.sInput.write(self._toString(sInput));
+ self.sInput.seek(0);
+ self.sOutput = '';
+
+ def _toString(self, sText):
+ """
+ Converts any possible array to
+ a string.
+ """
+ if isinstance(sText, array.array):
+ try:
+ if sys.version_info < (3, 9, 0):
+ # Removed since Python 3.9.
+ return str(sText.tostring()); # pylint: disable=no-member
+ return str(sText.tobytes());
+ except:
+ pass;
+ elif isinstance(sText, bytes):
+ return sText.decode('utf-8');
+
+ return sText;
+
+ def read(self, cb):
+ """file.read"""
+ return self.sInput.read(cb);
+
+ def write(self, sText):
+ """file.write"""
+ self.sOutput += self._toString(sText);
+ return None;
+
+ def getOutput(self):
+ """
+ Returns the output of the buffer.
+ """
+ return self.sOutput;
+
+ def close(self):
+ """ file.close """
+ return;
+
+class RemoteExecutor(object):
+ """
+ Helper for executing tests remotely through TXS or locally
+ """
+
+ def __init__(self, oTxsSession = None, asBinaryPaths = None, sScratchPath = None):
+ self.oTxsSession = oTxsSession;
+ self.asPaths = asBinaryPaths;
+ self.sScratchPath = sScratchPath;
+ if self.asPaths is None:
+ self.asPaths = [ ];
+
+ def _getBinaryPath(self, sBinary):
+ """
+ Returns the complete path of the given binary if found
+ from the configured search path or None if not found.
+ """
+ for sPath in self.asPaths:
+ sFile = sPath + '/' + sBinary;
+ if self.isFile(sFile):
+ return sFile;
+ return sBinary;
+
+ def _sudoExecuteSync(self, asArgs, sInput):
+ """
+ Executes a sudo child process synchronously.
+ Returns a tuple [True, 0] if the process executed successfully
+ and returned 0, otherwise [False, rc] is returned.
+ """
+ reporter.log('Executing [sudo]: %s' % (asArgs, ));
+ reporter.flushall();
+ fRc = True;
+ sOutput = '';
+ sError = '';
+ try:
+ oProcess = utils.sudoProcessPopen(asArgs, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
+ stderr=subprocess.PIPE, shell = False, close_fds = False);
+
+ sOutput, sError = oProcess.communicate(sInput);
+ iExitCode = oProcess.poll();
+
+ if iExitCode != 0:
+ fRc = False;
+ except:
+ reporter.errorXcpt();
+ fRc = False;
+ reporter.log('Exit code [sudo]: %s (%s)' % (fRc, asArgs));
+ return (fRc, str(sOutput), str(sError));
+
+ def _execLocallyOrThroughTxs(self, sExec, asArgs, sInput, cMsTimeout):
+ """
+ Executes the given program locally or through TXS based on the
+ current config.
+ """
+ fRc = False;
+ sOutput = None;
+ if self.oTxsSession is not None:
+ reporter.log('Executing [remote]: %s %s %s' % (sExec, asArgs, sInput));
+ reporter.flushall();
+ oStdOut = StdInOutBuffer();
+ oStdErr = StdInOutBuffer();
+ oTestPipe = reporter.FileWrapperTestPipe();
+ oStdIn = None;
+ if sInput is not None:
+ oStdIn = StdInOutBuffer(sInput);
+ else:
+ oStdIn = '/dev/null'; # pylint: disable=redefined-variable-type
+ fRc = self.oTxsSession.syncExecEx(sExec, (sExec,) + asArgs,
+ oStdIn = oStdIn, oStdOut = oStdOut,
+ oStdErr = oStdErr, oTestPipe = oTestPipe,
+ cMsTimeout = cMsTimeout);
+ sOutput = oStdOut.getOutput();
+ sError = oStdErr.getOutput();
+ if fRc is False:
+ reporter.log('Exit code [remote]: %s (stdout: %s stderr: %s)' % (fRc, sOutput, sError));
+ else:
+ reporter.log('Exit code [remote]: %s' % (fRc,));
+ else:
+ fRc, sOutput, sError = self._sudoExecuteSync([sExec, ] + list(asArgs), sInput);
+ return (fRc, sOutput, sError);
+
+ def execBinary(self, sExec, asArgs, sInput = None, cMsTimeout = 3600000):
+ """
+ Executes the given binary with the given arguments
+ providing some optional input through stdin and
+ returning whether the process exited successfully and the output
+ in a string.
+ """
+
+ fRc = True;
+ sOutput = None;
+ sError = None;
+ sBinary = self._getBinaryPath(sExec);
+ if sBinary is not None:
+ fRc, sOutput, sError = self._execLocallyOrThroughTxs(sBinary, asArgs, sInput, cMsTimeout);
+ else:
+ fRc = False;
+ return (fRc, sOutput, sError);
+
+ def execBinaryNoStdOut(self, sExec, asArgs, sInput = None):
+ """
+ Executes the given binary with the given arguments
+ providing some optional input through stdin and
+ returning whether the process exited successfully.
+ """
+ fRc, _, _ = self.execBinary(sExec, asArgs, sInput);
+ return fRc;
+
+ def copyFile(self, sLocalFile, sFilename, cMsTimeout = 30000):
+ """
+ Copies the local file to the remote destination
+ if configured
+
+ Returns a file ID which can be used as an input parameter
+ to execBinary() resolving to the real filepath on the remote side
+ or locally.
+ """
+ sFileId = None;
+ if self.oTxsSession is not None:
+ sFileId = '${SCRATCH}/' + sFilename;
+ fRc = self.oTxsSession.syncUploadFile(sLocalFile, sFileId, cMsTimeout);
+ if not fRc:
+ sFileId = None;
+ else:
+ sFileId = self.sScratchPath + '/' + sFilename;
+ try:
+ shutil.copy(sLocalFile, sFileId);
+ except:
+ sFileId = None;
+
+ return sFileId;
+
+ def copyString(self, sContent, sFilename, cMsTimeout = 30000):
+ """
+ Creates a file remotely or locally with the given content.
+
+ Returns a file ID which can be used as an input parameter
+ to execBinary() resolving to the real filepath on the remote side
+ or locally.
+ """
+ sFileId = None;
+ if self.oTxsSession is not None:
+ sFileId = '${SCRATCH}/' + sFilename;
+ fRc = self.oTxsSession.syncUploadString(sContent, sFileId, cMsTimeout);
+ if not fRc:
+ sFileId = None;
+ else:
+ sFileId = self.sScratchPath + '/' + sFilename;
+ try:
+ with open(sFileId, 'wb') as oFile:
+ oFile.write(sContent);
+ except:
+ sFileId = None;
+
+ return sFileId;
+
+ def mkDir(self, sDir, fMode = 0o700, cMsTimeout = 30000):
+ """
+ Creates a new directory at the given location.
+ """
+ fRc = True;
+ if self.oTxsSession is not None:
+ fRc = self.oTxsSession.syncMkDir(sDir, fMode, cMsTimeout);
+ elif not os.path.isdir(sDir):
+ fRc = os.mkdir(sDir, fMode);
+
+ return fRc;
+
+ def rmDir(self, sDir, cMsTimeout = 30000):
+ """
+ Removes the given directory.
+ """
+ fRc = True;
+ if self.oTxsSession is not None:
+ fRc = self.oTxsSession.syncRmDir(sDir, cMsTimeout);
+ else:
+ fRc = self.execBinaryNoStdOut('rmdir', (sDir,));
+
+ return fRc;
+
+ def rmTree(self, sDir, cMsTimeout = 30000):
+ """
+ Recursively removes all files and sub directories including the given directory.
+ """
+ fRc = True;
+ if self.oTxsSession is not None:
+ fRc = self.oTxsSession.syncRmTree(sDir, cMsTimeout);
+ else:
+ try:
+ shutil.rmtree(sDir, ignore_errors=True);
+ except:
+ fRc = False;
+
+ return fRc;
+
+ def isFile(self, sPath, cMsTimeout = 30000):
+ """
+ Checks that the given file exists.
+ """
+ fRc = True;
+ if self.oTxsSession is not None:
+ fRc = self.oTxsSession.syncIsFile(sPath, cMsTimeout);
+ else:
+ try:
+ fRc = os.path.isfile(sPath);
+ except:
+ fRc = False;
+
+ return fRc;
diff --git a/src/VBox/ValidationKit/tests/storage/storagecfg.py b/src/VBox/ValidationKit/tests/storage/storagecfg.py
new file mode 100755
index 00000000..c6bb2266
--- /dev/null
+++ b/src/VBox/ValidationKit/tests/storage/storagecfg.py
@@ -0,0 +1,681 @@
+# -*- coding: utf-8 -*-
+# $Id: storagecfg.py $
+
+"""
+VirtualBox Validation Kit - Storage test configuration API.
+"""
+
+__copyright__ = \
+"""
+Copyright (C) 2016-2023 Oracle and/or its affiliates.
+
+This file is part of VirtualBox base platform packages, as
+available from https://www.virtualbox.org.
+
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation, in version 3 of the
+License.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, see <https://www.gnu.org/licenses>.
+
+The contents of this file may alternatively be used under the terms
+of the Common Development and Distribution License Version 1.0
+(CDDL), a copy of it is provided in the "COPYING.CDDL" file included
+in the VirtualBox distribution, in which case the provisions of the
+CDDL are applicable instead of those of the GPL.
+
+You may elect to license modified versions of this file under the
+terms and conditions of either the GPL or the CDDL or both.
+
+SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
+"""
+__version__ = "$Revision: 155244 $"
+
+# Standard Python imports.
+import os;
+import re;
+
+
+class StorageDisk(object):
+ """
+ Class representing a disk for testing.
+ """
+
+ def __init__(self, sPath, fRamDisk = False):
+ self.sPath = sPath;
+ self.fUsed = False;
+ self.fRamDisk = fRamDisk;
+
+ def getPath(self):
+ """
+ Return the disk path.
+ """
+ return self.sPath;
+
+ def isUsed(self):
+ """
+ Returns whether the disk is currently in use.
+ """
+ return self.fUsed;
+
+ def isRamDisk(self):
+ """
+ Returns whether the disk objecthas a RAM backing.
+ """
+ return self.fRamDisk;
+
+ def setUsed(self, fUsed):
+ """
+ Sets the used flag for the disk.
+ """
+ if fUsed:
+ if self.fUsed:
+ return False;
+
+ self.fUsed = True;
+ else:
+ self.fUsed = fUsed;
+
+ return True;
+
+class StorageConfigOs(object):
+ """
+ Base class for a single hosts OS storage configuration.
+ """
+
+ def _getDisksMatchingRegExpWithPath(self, sPath, sRegExp):
+ """
+ Adds new disks to the config matching the given regular expression.
+ """
+
+ lstDisks = [];
+ oRegExp = re.compile(sRegExp);
+ asFiles = os.listdir(sPath);
+ for sFile in asFiles:
+ if oRegExp.match(os.path.basename(sFile)) and os.path.exists(sPath + '/' + sFile):
+ lstDisks.append(StorageDisk(sPath + '/' + sFile));
+
+ return lstDisks;
+
+class StorageConfigOsSolaris(StorageConfigOs):
+ """
+ Class implementing the Solaris specifics for a storage configuration.
+ """
+
+ def __init__(self):
+ StorageConfigOs.__init__(self);
+ self.idxRamDisk = 0;
+
+ def _getActivePoolsStartingWith(self, oExec, sPoolIdStart):
+ """
+ Returns a list of pools starting with the given ID or None on failure.
+ """
+ lstPools = None;
+ fRc, sOutput, _ = oExec.execBinary('zpool', ('list', '-H'));
+ if fRc:
+ lstPools = [];
+ asPools = sOutput.splitlines();
+ for sPool in asPools:
+ if sPool.startswith(sPoolIdStart):
+ # Extract the whole name and add it to the list.
+ asItems = sPool.split('\t');
+ lstPools.append(asItems[0]);
+ return lstPools;
+
+ def _getActiveVolumesInPoolStartingWith(self, oExec, sPool, sVolumeIdStart):
+ """
+ Returns a list of active volumes for the given pool starting with the given
+ identifier or None on failure.
+ """
+ lstVolumes = None;
+ fRc, sOutput, _ = oExec.execBinary('zfs', ('list', '-H'));
+ if fRc:
+ lstVolumes = [];
+ asVolumes = sOutput.splitlines();
+ for sVolume in asVolumes:
+ if sVolume.startswith(sPool + '/' + sVolumeIdStart):
+ # Extract the whole name and add it to the list.
+ asItems = sVolume.split('\t');
+ lstVolumes.append(asItems[0]);
+ return lstVolumes;
+
+ def getDisksMatchingRegExp(self, sRegExp):
+ """
+ Returns a list of disks matching the regular expression.
+ """
+ return self._getDisksMatchingRegExpWithPath('/dev/dsk', sRegExp);
+
+ def getMntBase(self):
+ """
+ Returns the mountpoint base for the host.
+ """
+ return '/pools';
+
+ def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
+ """
+ Creates a new storage pool with the given disks and the given RAID level.
+ """
+ sZPoolRaid = None;
+ if len(asDisks) > 1 and (sRaidLvl == 'raid5' or sRaidLvl is None):
+ sZPoolRaid = 'raidz';
+
+ fRc = True;
+ if sZPoolRaid is not None:
+ fRc = oExec.execBinaryNoStdOut('zpool', ('create', '-f', sPool, sZPoolRaid,) + tuple(asDisks));
+ else:
+ fRc = oExec.execBinaryNoStdOut('zpool', ('create', '-f', sPool,) + tuple(asDisks));
+
+ return fRc;
+
+ def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
+ """
+ Creates and mounts a filesystem at the given mountpoint using the
+ given pool and volume IDs.
+ """
+ fRc = True;
+ if cbVol is not None:
+ fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, '-V', cbVol, sPool + '/' + sVol));
+ else:
+ fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, sPool + '/' + sVol));
+
+ # @todo Add proper parameters to set proper owner:group ownership, the testcase broke in r133060 for Solaris
+ # because ceating directories is now done using the python mkdir API instead of calling 'sudo mkdir...'.
+ # No one noticed though because testboxstor1 went out of action before...
+ # Will get fixed as soon as I'm back home.
+ if fRc:
+ fRc = oExec.execBinaryNoStdOut('chmod', ('777', sMountPoint));
+
+ return fRc;
+
+ def destroyVolume(self, oExec, sPool, sVol):
+ """
+ Destroys the given volume.
+ """
+ fRc = oExec.execBinaryNoStdOut('zfs', ('destroy', sPool + '/' + sVol));
+ return fRc;
+
+ def destroyPool(self, oExec, sPool):
+ """
+ Destroys the given storage pool.
+ """
+ fRc = oExec.execBinaryNoStdOut('zpool', ('destroy', sPool));
+ return fRc;
+
+ def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
+ """
+ Cleans up any pools and volumes starting with the name in the given
+ parameters.
+ """
+ fRc = True;
+ lstPools = self._getActivePoolsStartingWith(oExec, sPoolIdStart);
+ if lstPools is not None:
+ for sPool in lstPools:
+ lstVolumes = self._getActiveVolumesInPoolStartingWith(oExec, sPool, sVolIdStart);
+ if lstVolumes is not None:
+ # Destroy all the volumes first
+ for sVolume in lstVolumes:
+ fRc2 = oExec.execBinaryNoStdOut('zfs', ('destroy', sVolume));
+ if not fRc2:
+ fRc = fRc2;
+
+ # Destroy the pool
+ fRc2 = self.destroyPool(oExec, sPool);
+ if not fRc2:
+ fRc = fRc2;
+ else:
+ fRc = False;
+ else:
+ fRc = False;
+
+ return fRc;
+
+ def createRamDisk(self, oExec, cbRamDisk):
+ """
+ Creates a RAM backed disk with the given size.
+ """
+ oDisk = None;
+ sRamDiskName = 'ramdisk%u' % (self.idxRamDisk,);
+ fRc, _ , _ = oExec.execBinary('ramdiskadm', ('-a', sRamDiskName, str(cbRamDisk)));
+ if fRc:
+ self.idxRamDisk += 1;
+ oDisk = StorageDisk('/dev/ramdisk/%s' % (sRamDiskName, ), True);
+
+ return oDisk;
+
+ def destroyRamDisk(self, oExec, oDisk):
+ """
+ Destroys the given ramdisk object.
+ """
+ sRamDiskName = os.path.basename(oDisk.getPath());
+ return oExec.execBinaryNoStdOut('ramdiskadm', ('-d', sRamDiskName));
+
+class StorageConfigOsLinux(StorageConfigOs):
+ """
+ Class implementing the Linux specifics for a storage configuration.
+ """
+
+ def __init__(self):
+ StorageConfigOs.__init__(self);
+ self.dSimplePools = { }; # Simple storage pools which don't use lvm (just one partition)
+ self.dMounts = { }; # Pool/Volume to mountpoint mapping.
+
+ def _getDmRaidLevelFromLvl(self, sRaidLvl):
+ """
+ Converts our raid level indicators to something mdadm can understand.
+ """
+ if sRaidLvl is None or sRaidLvl == 'raid0':
+ return 'stripe';
+ if sRaidLvl == 'raid5':
+ return '5';
+ if sRaidLvl == 'raid1':
+ return 'mirror';
+ return 'stripe';
+
+ def getDisksMatchingRegExp(self, sRegExp):
+ """
+ Returns a list of disks matching the regular expression.
+ """
+ return self._getDisksMatchingRegExpWithPath('/dev/', sRegExp);
+
+ def getMntBase(self):
+ """
+ Returns the mountpoint base for the host.
+ """
+ return '/mnt';
+
+ def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
+ """
+ Creates a new storage pool with the given disks and the given RAID level.
+ """
+ fRc = True;
+ if len(asDisks) == 1 and sRaidLvl is None:
+ # Doesn't require LVM, put into the simple pools dictionary so we can
+ # use it when creating a volume later.
+ self.dSimplePools[sPool] = asDisks[0];
+ else:
+ # If a RAID is required use dm-raid first to create one.
+ asLvmPvDisks = asDisks;
+ fRc = oExec.execBinaryNoStdOut('mdadm', ('--create', '/dev/md0', '--assume-clean',
+ '--level=' + self._getDmRaidLevelFromLvl(sRaidLvl),
+ '--raid-devices=' + str(len(asDisks))) + tuple(asDisks));
+ if fRc:
+ # /dev/md0 is the only block device to use for our volume group.
+ asLvmPvDisks = [ '/dev/md0' ];
+
+ # Create a physical volume on every disk first.
+ for sLvmPvDisk in asLvmPvDisks:
+ fRc = oExec.execBinaryNoStdOut('pvcreate', (sLvmPvDisk, ));
+ if not fRc:
+ break;
+
+ if fRc:
+ # Create volume group with all physical volumes included
+ fRc = oExec.execBinaryNoStdOut('vgcreate', (sPool, ) + tuple(asLvmPvDisks));
+ return fRc;
+
+ def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
+ """
+ Creates and mounts a filesystem at the given mountpoint using the
+ given pool and volume IDs.
+ """
+ fRc = True;
+ sBlkDev = None;
+ if sPool in self.dSimplePools:
+ sDiskPath = self.dSimplePools.get(sPool);
+ if sDiskPath.find('zram') != -1:
+ sBlkDev = sDiskPath;
+ else:
+ # Create a partition with the requested size
+ sFdiskScript = ';\n'; # Single partition filling everything
+ if cbVol is not None:
+ sFdiskScript = ',' + str(cbVol // 512) + '\n'; # Get number of sectors
+ fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', sDiskPath), \
+ sFdiskScript);
+ if fRc:
+ if sDiskPath.find('nvme') != -1:
+ sBlkDev = sDiskPath + 'p1';
+ else:
+ sBlkDev = sDiskPath + '1';
+ else:
+ if cbVol is None:
+ fRc = oExec.execBinaryNoStdOut('lvcreate', ('-l', '100%FREE', '-n', sVol, sPool));
+ else:
+ fRc = oExec.execBinaryNoStdOut('lvcreate', ('-L', str(cbVol), '-n', sVol, sPool));
+ if fRc:
+ sBlkDev = '/dev/mapper' + sPool + '-' + sVol;
+
+ if fRc is True and sBlkDev is not None:
+ # Create a filesystem and mount it
+ fRc = oExec.execBinaryNoStdOut('mkfs.ext4', ('-F', '-F', sBlkDev,));
+ fRc = fRc and oExec.mkDir(sMountPoint);
+ fRc = fRc and oExec.execBinaryNoStdOut('mount', (sBlkDev, sMountPoint));
+ if fRc:
+ self.dMounts[sPool + '/' + sVol] = sMountPoint;
+ return fRc;
+
+ def destroyVolume(self, oExec, sPool, sVol):
+ """
+ Destroys the given volume.
+ """
+ # Unmount first
+ sMountPoint = self.dMounts[sPool + '/' + sVol];
+ fRc = oExec.execBinaryNoStdOut('umount', (sMountPoint,));
+ self.dMounts.pop(sPool + '/' + sVol);
+ oExec.rmDir(sMountPoint);
+ if sPool in self.dSimplePools:
+ # Wipe partition table
+ sDiskPath = self.dSimplePools.get(sPool);
+ if sDiskPath.find('zram') == -1:
+ fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', '--delete', \
+ sDiskPath));
+ else:
+ fRc = oExec.execBinaryNoStdOut('lvremove', (sPool + '/' + sVol,));
+ return fRc;
+
+ def destroyPool(self, oExec, sPool):
+ """
+ Destroys the given storage pool.
+ """
+ fRc = True;
+ if sPool in self.dSimplePools:
+ self.dSimplePools.pop(sPool);
+ else:
+ fRc = oExec.execBinaryNoStdOut('vgremove', (sPool,));
+ return fRc;
+
+ def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
+ """
+ Cleans up any pools and volumes starting with the name in the given
+ parameters.
+ """
+ # @todo: Needs implementation, for LVM based configs a similar approach can be used
+ # as for Solaris.
+ _ = oExec;
+ _ = sPoolIdStart;
+ _ = sVolIdStart;
+ return True;
+
+ def createRamDisk(self, oExec, cbRamDisk):
+ """
+ Creates a RAM backed disk with the given size.
+ """
+ # Make sure the ZRAM module is loaded.
+ oDisk = None;
+ fRc = oExec.execBinaryNoStdOut('modprobe', ('zram',));
+ if fRc:
+ fRc, sOut, _ = oExec.execBinary('zramctl', ('--raw', '-f', '-s', str(cbRamDisk)));
+ if fRc:
+ oDisk = StorageDisk(sOut.rstrip(), True);
+
+ return oDisk;
+
+ def destroyRamDisk(self, oExec, oDisk):
+ """
+ Destroys the given ramdisk object.
+ """
+ return oExec.execBinaryNoStdOut('zramctl', ('-r', oDisk.getPath()));
+
+## @name Host disk config types.
+## @{
+g_ksDiskCfgStatic = 'StaticDir';
+g_ksDiskCfgRegExp = 'RegExp';
+g_ksDiskCfgList = 'DiskList';
+## @}
+
+class DiskCfg(object):
+ """
+ Host disk configuration.
+ """
+
+ def __init__(self, sTargetOs, sCfgType, oDisks):
+ self.sTargetOs = sTargetOs;
+ self.sCfgType = sCfgType;
+ self.oDisks = oDisks;
+
+ def getTargetOs(self):
+ return self.sTargetOs;
+
+ def getCfgType(self):
+ return self.sCfgType;
+
+ def isCfgStaticDir(self):
+ return self.sCfgType == g_ksDiskCfgStatic;
+
+ def isCfgRegExp(self):
+ return self.sCfgType == g_ksDiskCfgRegExp;
+
+ def isCfgList(self):
+ return self.sCfgType == g_ksDiskCfgList;
+
+ def getDisks(self):
+ return self.oDisks;
+
+class StorageCfg(object):
+ """
+ Storage configuration helper class taking care of the different host OS.
+ """
+
+ def __init__(self, oExec, oDiskCfg):
+ self.oExec = oExec;
+ self.lstDisks = [ ]; # List of disks present in the system.
+ self.dPools = { }; # Dictionary of storage pools.
+ self.dVols = { }; # Dictionary of volumes.
+ self.iPoolId = 0;
+ self.iVolId = 0;
+ self.oDiskCfg = oDiskCfg;
+
+ fRc = True;
+ oStorOs = None;
+ if oDiskCfg.getTargetOs() == 'solaris':
+ oStorOs = StorageConfigOsSolaris();
+ elif oDiskCfg.getTargetOs() == 'linux':
+ oStorOs = StorageConfigOsLinux(); # pylint: disable=redefined-variable-type
+ elif not oDiskCfg.isCfgStaticDir():
+ # For unknown hosts only allow a static testing directory we don't care about setting up
+ fRc = False;
+
+ if fRc:
+ self.oStorOs = oStorOs;
+ if oDiskCfg.isCfgRegExp():
+ self.lstDisks = oStorOs.getDisksMatchingRegExp(oDiskCfg.getDisks());
+ elif oDiskCfg.isCfgList():
+ # Assume a list of of disks and add.
+ for sDisk in oDiskCfg.getDisks():
+ self.lstDisks.append(StorageDisk(sDisk));
+ elif oDiskCfg.isCfgStaticDir():
+ if not os.path.exists(oDiskCfg.getDisks()):
+ self.oExec.mkDir(oDiskCfg.getDisks(), 0o700);
+
+ def __del__(self):
+ self.cleanup();
+ self.oDiskCfg = None;
+
+ def cleanup(self):
+ """
+ Cleans up any created storage configs.
+ """
+
+ if not self.oDiskCfg.isCfgStaticDir():
+ # Destroy all volumes first.
+ for sMountPoint in list(self.dVols.keys()): # pylint: disable=consider-iterating-dictionary
+ self.destroyVolume(sMountPoint);
+
+ # Destroy all pools.
+ for sPool in list(self.dPools.keys()): # pylint: disable=consider-iterating-dictionary
+ self.destroyStoragePool(sPool);
+
+ self.dVols.clear();
+ self.dPools.clear();
+ self.iPoolId = 0;
+ self.iVolId = 0;
+
+ def getRawDisk(self):
+ """
+ Returns a raw disk device from the list of free devices for use.
+ """
+
+ for oDisk in self.lstDisks:
+ if oDisk.isUsed() is False:
+ oDisk.setUsed(True);
+ return oDisk.getPath();
+
+ return None;
+
+ def getUnusedDiskCount(self):
+ """
+ Returns the number of unused disks.
+ """
+
+ cDisksUnused = 0;
+ for oDisk in self.lstDisks:
+ if not oDisk.isUsed():
+ cDisksUnused += 1;
+
+ return cDisksUnused;
+
+ def createStoragePool(self, cDisks = 0, sRaidLvl = None,
+ cbPool = None, fRamDisk = False):
+ """
+ Create a new storage pool
+ """
+ lstDisks = [ ];
+ fRc = True;
+ sPool = None;
+
+ if not self.oDiskCfg.isCfgStaticDir():
+ if fRamDisk:
+ oDisk = self.oStorOs.createRamDisk(self.oExec, cbPool);
+ if oDisk is not None:
+ lstDisks.append(oDisk);
+ cDisks = 1;
+ else:
+ if cDisks == 0:
+ cDisks = self.getUnusedDiskCount();
+
+ for oDisk in self.lstDisks:
+ if not oDisk.isUsed():
+ oDisk.setUsed(True);
+ lstDisks.append(oDisk);
+ if len(lstDisks) == cDisks:
+ break;
+
+ # Enough drives to satisfy the request?
+ if len(lstDisks) == cDisks:
+ # Create a list of all device paths
+ lstDiskPaths = [ ];
+ for oDisk in lstDisks:
+ lstDiskPaths.append(oDisk.getPath());
+
+ # Find a name for the pool
+ sPool = 'pool' + str(self.iPoolId);
+ self.iPoolId += 1;
+
+ fRc = self.oStorOs.createStoragePool(self.oExec, sPool, lstDiskPaths, sRaidLvl);
+ if fRc:
+ self.dPools[sPool] = lstDisks;
+ else:
+ self.iPoolId -= 1;
+ else:
+ fRc = False;
+
+ # Cleanup in case of error.
+ if not fRc:
+ for oDisk in lstDisks:
+ oDisk.setUsed(False);
+ if oDisk.isRamDisk():
+ self.oStorOs.destroyRamDisk(self.oExec, oDisk);
+ else:
+ sPool = 'StaticDummy';
+
+ return fRc, sPool;
+
+ def destroyStoragePool(self, sPool):
+ """
+ Destroys the storage pool with the given ID.
+ """
+
+ fRc = True;
+
+ if not self.oDiskCfg.isCfgStaticDir():
+ lstDisks = self.dPools.get(sPool);
+ if lstDisks is not None:
+ fRc = self.oStorOs.destroyPool(self.oExec, sPool);
+ if fRc:
+ # Mark disks as unused
+ self.dPools.pop(sPool);
+ for oDisk in lstDisks:
+ oDisk.setUsed(False);
+ if oDisk.isRamDisk():
+ self.oStorOs.destroyRamDisk(self.oExec, oDisk);
+ else:
+ fRc = False;
+
+ return fRc;
+
+ def createVolume(self, sPool, cbVol = None):
+ """
+ Creates a new volume from the given pool returning the mountpoint.
+ """
+
+ fRc = True;
+ sMountPoint = None;
+ if not self.oDiskCfg.isCfgStaticDir():
+ if sPool in self.dPools:
+ sVol = 'vol' + str(self.iVolId);
+ sMountPoint = self.oStorOs.getMntBase() + '/' + sVol;
+ self.iVolId += 1;
+ fRc = self.oStorOs.createVolume(self.oExec, sPool, sVol, sMountPoint, cbVol);
+ if fRc:
+ self.dVols[sMountPoint] = (sVol, sPool);
+ else:
+ self.iVolId -= 1;
+ else:
+ fRc = False;
+ else:
+ sMountPoint = self.oDiskCfg.getDisks();
+
+ return fRc, sMountPoint;
+
+ def destroyVolume(self, sMountPoint):
+ """
+ Destroy the volume at the given mount point.
+ """
+
+ fRc = True;
+ if not self.oDiskCfg.isCfgStaticDir():
+ sVol, sPool = self.dVols.get(sMountPoint);
+ if sVol is not None:
+ fRc = self.oStorOs.destroyVolume(self.oExec, sPool, sVol);
+ if fRc:
+ self.dVols.pop(sMountPoint);
+ else:
+ fRc = False;
+
+ return fRc;
+
+ def mkDirOnVolume(self, sMountPoint, sDir, fMode = 0o700):
+ """
+ Creates a new directory on the volume pointed to by the given mount point.
+ """
+ return self.oExec.mkDir(sMountPoint + '/' + sDir, fMode);
+
+ def cleanupLeftovers(self):
+ """
+ Tries to cleanup any leftover pools and volumes from a failed previous run.
+ """
+ if not self.oDiskCfg.isCfgStaticDir():
+ return self.oStorOs.cleanupPoolsAndVolumes(self.oExec, 'pool', 'vol');
+
+ fRc = True;
+ if os.path.exists(self.oDiskCfg.getDisks()):
+ for sEntry in os.listdir(self.oDiskCfg.getDisks()):
+ fRc = fRc and self.oExec.rmTree(os.path.join(self.oDiskCfg.getDisks(), sEntry));
+
+ return fRc;
diff --git a/src/VBox/ValidationKit/tests/storage/tdStorageBenchmark1.py b/src/VBox/ValidationKit/tests/storage/tdStorageBenchmark1.py
new file mode 100755
index 00000000..17dc09b2
--- /dev/null
+++ b/src/VBox/ValidationKit/tests/storage/tdStorageBenchmark1.py
@@ -0,0 +1,1469 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# $Id: tdStorageBenchmark1.py $
+
+"""
+VirtualBox Validation Kit - Storage benchmark.
+"""
+
+__copyright__ = \
+"""
+Copyright (C) 2012-2023 Oracle and/or its affiliates.
+
+This file is part of VirtualBox base platform packages, as
+available from https://www.virtualbox.org.
+
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation, in version 3 of the
+License.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, see <https://www.gnu.org/licenses>.
+
+The contents of this file may alternatively be used under the terms
+of the Common Development and Distribution License Version 1.0
+(CDDL), a copy of it is provided in the "COPYING.CDDL" file included
+in the VirtualBox distribution, in which case the provisions of the
+CDDL are applicable instead of those of the GPL.
+
+You may elect to license modified versions of this file under the
+terms and conditions of either the GPL or the CDDL or both.
+
+SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
+"""
+__version__ = "$Revision: 155244 $"
+
+
+# Standard Python imports.
+import os;
+import socket;
+import sys;
+if sys.version_info[0] >= 3:
+ from io import StringIO as StringIO; # pylint: disable=import-error,no-name-in-module,useless-import-alias
+else:
+ from StringIO import StringIO as StringIO; # pylint: disable=import-error,no-name-in-module,useless-import-alias
+
+# Only the main script needs to modify the path.
+try: __file__
+except: __file__ = sys.argv[0];
+g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))));
+sys.path.append(g_ksValidationKitDir);
+
+# Validation Kit imports.
+from common import constants;
+from common import utils;
+from testdriver import reporter;
+from testdriver import base;
+from testdriver import vbox;
+from testdriver import vboxcon;
+from testdriver import vboxwrappers;
+
+import remoteexecutor;
+import storagecfg;
+
+
+class FioTest(object):
+ """
+ Flexible I/O tester testcase.
+ """
+
+ kdHostIoEngine = {
+ 'solaris': ('solarisaio', False),
+ 'linux': ('libaio', True)
+ };
+
+ def __init__(self, oExecutor, dCfg = None):
+ self.oExecutor = oExecutor;
+ self.sCfgFileId = None;
+ self.dCfg = dCfg;
+ self.sError = None;
+ self.sResult = None;
+
+ def prepare(self, cMsTimeout = 30000):
+ """ Prepares the testcase """
+ reporter.testStart('Fio');
+
+ sTargetOs = self.dCfg.get('TargetOs', 'linux');
+ sIoEngine, fDirectIo = self.kdHostIoEngine.get(sTargetOs);
+ if sIoEngine is None:
+ return False;
+
+ cfgBuf = StringIO();
+ cfgBuf.write('[global]\n');
+ cfgBuf.write('bs=' + str(self.dCfg.get('RecordSize', 4096)) + '\n');
+ cfgBuf.write('ioengine=' + sIoEngine + '\n');
+ cfgBuf.write('iodepth=' + str(self.dCfg.get('QueueDepth', 32)) + '\n');
+ cfgBuf.write('size=' + str(self.dCfg.get('TestsetSize', 2147483648)) + '\n');
+ if fDirectIo:
+ cfgBuf.write('direct=1\n');
+ else:
+ cfgBuf.write('direct=0\n');
+ cfgBuf.write('directory=' + self.dCfg.get('FilePath', '/mnt') + '\n');
+ cfgBuf.write('filename=fio.test.file');
+
+ cfgBuf.write('[seq-write]\n');
+ cfgBuf.write('rw=write\n');
+ cfgBuf.write('stonewall\n');
+
+ cfgBuf.write('[rand-write]\n');
+ cfgBuf.write('rw=randwrite\n');
+ cfgBuf.write('stonewall\n');
+
+ cfgBuf.write('[seq-read]\n');
+ cfgBuf.write('rw=read\n');
+ cfgBuf.write('stonewall\n');
+
+ cfgBuf.write('[rand-read]\n');
+ cfgBuf.write('rw=randread\n');
+ cfgBuf.write('stonewall\n');
+
+ self.sCfgFileId = self.oExecutor.copyString(cfgBuf.getvalue(), 'aio-test', cMsTimeout);
+ return self.sCfgFileId is not None;
+
+ def run(self, cMsTimeout = 30000):
+ """ Runs the testcase """
+ fRc, sOutput, sError = self.oExecutor.execBinary('fio', (self.sCfgFileId,), cMsTimeout = cMsTimeout);
+ if fRc:
+ self.sResult = sOutput;
+ else:
+ self.sError = ('Binary: fio\n' +
+ '\nOutput:\n\n' +
+ sOutput +
+ '\nError:\n\n' +
+ sError);
+ return fRc;
+
+ def cleanup(self):
+ """ Cleans up any leftovers from the testcase. """
+ reporter.testDone();
+ return True;
+
+ def reportResult(self):
+ """
+ Reports the test results to the test manager.
+ """
+ return True;
+
+ def getErrorReport(self):
+ """
+ Returns the error report in case the testcase failed.
+ """
+ return self.sError;
+
+class IozoneTest(object):
+ """
+ I/O zone testcase.
+ """
+ def __init__(self, oExecutor, dCfg = None):
+ self.oExecutor = oExecutor;
+ self.sResult = None;
+ self.sError = None;
+ self.lstTests = [ ('initial writers', 'FirstWrite'),
+ ('rewriters', 'Rewrite'),
+ ('re-readers', 'ReRead'),
+ ('stride readers', 'StrideRead'),
+ ('reverse readers', 'ReverseRead'),
+ ('random readers', 'RandomRead'),
+ ('mixed workload', 'MixedWorkload'),
+ ('random writers', 'RandomWrite'),
+ ('pwrite writers', 'PWrite'),
+ ('pread readers', 'PRead'),
+ ('fwriters', 'FWrite'),
+ ('freaders', 'FRead'),
+ ('readers', 'FirstRead')];
+ self.sRecordSize = str(int(dCfg.get('RecordSize', 4096) / 1024));
+ self.sTestsetSize = str(int(dCfg.get('TestsetSize', 2147483648) / 1024));
+ self.sQueueDepth = str(int(dCfg.get('QueueDepth', 32)));
+ self.sFilePath = dCfg.get('FilePath', '/mnt/iozone');
+ self.fDirectIo = True;
+
+ sTargetOs = dCfg.get('TargetOs');
+ if sTargetOs == 'solaris':
+ self.fDirectIo = False;
+
+ def prepare(self, cMsTimeout = 30000):
+ """ Prepares the testcase """
+ reporter.testStart('IoZone');
+ _ = cMsTimeout;
+ return True; # Nothing to do.
+
+ def run(self, cMsTimeout = 30000):
+ """ Runs the testcase """
+ tupArgs = ('-r', self.sRecordSize, '-s', self.sTestsetSize, \
+ '-t', '1', '-T', '-F', self.sFilePath + '/iozone.tmp');
+ if self.fDirectIo:
+ tupArgs += ('-I',);
+ fRc, sOutput, sError = self.oExecutor.execBinary('iozone', tupArgs, cMsTimeout = cMsTimeout);
+ if fRc:
+ self.sResult = sOutput;
+ else:
+ self.sError = ('Binary: iozone\n' +
+ '\nOutput:\n\n' +
+ sOutput +
+ '\nError:\n\n' +
+ sError);
+ return fRc;
+
+ def cleanup(self):
+ """ Cleans up any leftovers from the testcase. """
+ reporter.testDone();
+ return True;
+
+ def reportResult(self):
+ """
+ Reports the test results to the test manager.
+ """
+
+ fRc = True;
+ if self.sResult is not None:
+ try:
+ asLines = self.sResult.splitlines();
+ for sLine in asLines:
+ sLine = sLine.strip();
+ if sLine.startswith('Children') is True:
+ # Extract the value
+ idxValue = sLine.rfind('=');
+ if idxValue == -1:
+ raise Exception('IozoneTest: Invalid state');
+
+ idxValue += 1;
+ while sLine[idxValue] == ' ':
+ idxValue += 1;
+
+ # Get the reported value, cut off after the decimal point
+ # it is not supported by the testmanager yet and is not really
+ # relevant anyway.
+ idxValueEnd = idxValue;
+ while sLine[idxValueEnd].isdigit():
+ idxValueEnd += 1;
+
+ for sNeedle, sTestVal in self.lstTests:
+ if sLine.rfind(sNeedle) != -1:
+ reporter.testValue(sTestVal, sLine[idxValue:idxValueEnd],
+ constants.valueunit.g_asNames[constants.valueunit.KILOBYTES_PER_SEC]);
+ break;
+ except:
+ fRc = False;
+ else:
+ fRc = False;
+
+ return fRc;
+
+ def getErrorReport(self):
+ """
+ Returns the error report in case the testcase failed.
+ """
+ return self.sError;
+
+class IoPerfTest(object):
+ """
+ IoPerf testcase.
+ """
+ def __init__(self, oExecutor, dCfg = None):
+ self.oExecutor = oExecutor;
+ self.sResult = None;
+ self.sError = None;
+ self.sRecordSize = str(dCfg.get('RecordSize', 4094));
+ self.sTestsetSize = str(dCfg.get('TestsetSize', 2147483648));
+ self.sQueueDepth = str(dCfg.get('QueueDepth', 32));
+ self.sFilePath = dCfg.get('FilePath', '/mnt');
+ self.fDirectIo = True;
+ self.asGstIoPerfPaths = [
+ '${CDROM}/vboxvalidationkit/${OS/ARCH}/IoPerf${EXESUFF}',
+ '${CDROM}/${OS/ARCH}/IoPerf${EXESUFF}',
+ ];
+
+ sTargetOs = dCfg.get('TargetOs');
+ if sTargetOs == 'solaris':
+ self.fDirectIo = False;
+
+ def _locateGstIoPerf(self):
+ """
+ Returns guest side path to FsPerf.
+ """
+ for sIoPerfPath in self.asGstIoPerfPaths:
+ if self.oExecutor.isFile(sIoPerfPath):
+ return sIoPerfPath;
+ reporter.log('Unable to find guest FsPerf in any of these places: %s' % ('\n '.join(self.asGstIoPerfPaths),));
+ return self.asGstIoPerfPaths[0];
+
+ def prepare(self, cMsTimeout = 30000):
+ """ Prepares the testcase """
+ _ = cMsTimeout;
+ return True; # Nothing to do.
+
+ def run(self, cMsTimeout = 30000):
+ """ Runs the testcase """
+ tupArgs = ('--block-size', self.sRecordSize, '--test-set-size', self.sTestsetSize, \
+ '--maximum-requests', self.sQueueDepth, '--dir', self.sFilePath + '/ioperfdir-1');
+ if self.fDirectIo:
+ tupArgs += ('--use-cache', 'off');
+ fRc, sOutput, sError = self.oExecutor.execBinary(self._locateGstIoPerf(), tupArgs, cMsTimeout = cMsTimeout);
+ if fRc:
+ self.sResult = sOutput;
+ else:
+ if sError is None:
+ sError = '';
+ if sOutput is None:
+ sOutput = '';
+ self.sError = ('Binary: IoPerf\n' +
+ '\nOutput:\n\n' +
+ sOutput +
+ '\nError:\n\n' +
+ sError);
+ return fRc;
+
+ def cleanup(self):
+ """ Cleans up any leftovers from the testcase. """
+ return True;
+
+ def reportResult(self):
+ """
+ Reports the test results to the test manager.
+ """
+ # Should be done using the test pipe already.
+ return True;
+
+ def getErrorReport(self):
+ """
+ Returns the error report in case the testcase failed.
+ """
+ return self.sError;
+
+class StorTestCfgMgr(object):
+ """
+ Manages the different testcases.
+ """
+
+ def __init__(self, aasTestLvls, aasTestsBlacklist, fnIsCfgSupported = None):
+ self.aasTestsBlacklist = aasTestsBlacklist;
+ self.at4TestLvls = [];
+ self.iTestLvl = 0;
+ self.fnIsCfgSupported = fnIsCfgSupported;
+ for asTestLvl in aasTestLvls:
+ if isinstance(asTestLvl, tuple):
+ asTestLvl, fSubTestStartAuto, fnTestFmt = asTestLvl;
+ self.at4TestLvls.append((0, fSubTestStartAuto, fnTestFmt, asTestLvl));
+ else:
+ self.at4TestLvls.append((0, True, None, asTestLvl));
+
+ self.at4TestLvls.reverse();
+
+ # Get the first non blacklisted test.
+ asTestCfg = self.getCurrentTestCfg();
+ while asTestCfg and self.isTestCfgBlacklisted(asTestCfg):
+ asTestCfg = self.advanceTestCfg();
+
+ iLvl = 0;
+ for sCfg in asTestCfg:
+ sSubTest = self.getTestIdString(sCfg, iLvl);
+ if sSubTest is not None:
+ reporter.testStart('%s' % (sSubTest,));
+ iLvl += 1;
+
+ def __del__(self):
+ # Make sure the tests are marked as done.
+ while self.iTestLvl < len(self.at4TestLvls):
+ reporter.testDone();
+ self.iTestLvl += 1;
+
+ def getTestIdString(self, oCfg, iLvl):
+ """
+ Returns a potentially formatted string for the test name.
+ """
+
+ # The order of the test levels is reversed so get the level starting
+ # from the end.
+ _, fSubTestStartAuto, fnTestFmt, _ = self.at4TestLvls[len(self.at4TestLvls) - 1 - iLvl];
+ if not fSubTestStartAuto:
+ return None;
+ if fnTestFmt is not None:
+ return fnTestFmt(oCfg);
+ return oCfg;
+
+ def isTestCfgBlacklisted(self, asTestCfg):
+ """
+ Returns whether the given test config is black listed.
+ """
+ fBlacklisted = False;
+
+ for asTestBlacklist in self.aasTestsBlacklist:
+ iLvl = 0;
+ fBlacklisted = True;
+ while iLvl < len(asTestBlacklist) and iLvl < len(asTestCfg):
+ if asTestBlacklist[iLvl] != asTestCfg[iLvl] and asTestBlacklist[iLvl] != '*':
+ fBlacklisted = False;
+ break;
+
+ iLvl += 1;
+
+ if not fBlacklisted and self.fnIsCfgSupported is not None:
+ fBlacklisted = not self.fnIsCfgSupported(asTestCfg);
+
+ return fBlacklisted;
+
+ def advanceTestCfg(self):
+ """
+ Advances to the next test config and returns it as an
+ array of strings or an empty config if there is no test left anymore.
+ """
+ iTestCfg, fSubTestStartAuto, fnTestFmt, asTestCfg = self.at4TestLvls[self.iTestLvl];
+ iTestCfg += 1;
+ self.at4TestLvls[self.iTestLvl] = (iTestCfg, fSubTestStartAuto, fnTestFmt, asTestCfg);
+ while iTestCfg == len(asTestCfg) and self.iTestLvl < len(self.at4TestLvls):
+ self.at4TestLvls[self.iTestLvl] = (0, fSubTestStartAuto, fnTestFmt, asTestCfg);
+ self.iTestLvl += 1;
+ if self.iTestLvl < len(self.at4TestLvls):
+ iTestCfg, fSubTestStartAuto, fnTestFmt, asTestCfg = self.at4TestLvls[self.iTestLvl];
+ iTestCfg += 1;
+ self.at4TestLvls[self.iTestLvl] = (iTestCfg, fSubTestStartAuto, fnTestFmt, asTestCfg);
+ if iTestCfg < len(asTestCfg):
+ self.iTestLvl = 0;
+ break;
+ else:
+ break; # We reached the end of our tests.
+
+ return self.getCurrentTestCfg();
+
+ def getCurrentTestCfg(self):
+ """
+ Returns the current not black listed test config as an array of strings.
+ """
+ asTestCfg = [];
+
+ if self.iTestLvl < len(self.at4TestLvls):
+ for t4TestLvl in self.at4TestLvls:
+ iTestCfg, _, _, asTestLvl = t4TestLvl;
+ asTestCfg.append(asTestLvl[iTestCfg]);
+
+ asTestCfg.reverse()
+
+ return asTestCfg;
+
+ def getNextTestCfg(self):
+ """
+ Returns the next not blacklisted test config or an empty list if
+ there is no test left.
+ """
+ asTestCfgCur = self.getCurrentTestCfg();
+
+ asTestCfg = self.advanceTestCfg();
+ while asTestCfg and self.isTestCfgBlacklisted(asTestCfg):
+ asTestCfg = self.advanceTestCfg();
+
+ # Compare the current and next config and close the approriate test
+ # categories.
+ #reporter.testDone(fSkippedLast);
+ if asTestCfg:
+ idxSame = 0;
+ while asTestCfgCur[idxSame] == asTestCfg[idxSame]:
+ idxSame += 1;
+
+ for i in range(idxSame, len(asTestCfg) - 1):
+ reporter.testDone();
+
+ for i in range(idxSame, len(asTestCfg)):
+ sSubTest = self.getTestIdString(asTestCfg[i], i);
+ if sSubTest is not None:
+ reporter.testStart('%s' % (sSubTest,));
+
+ else:
+ # No more tests, mark all tests as done
+ for i in range(0, len(asTestCfgCur) - 1):
+ reporter.testDone();
+
+ return asTestCfg;
+
+class tdStorageBenchmark(vbox.TestDriver): # pylint: disable=too-many-instance-attributes
+ """
+ Storage benchmark.
+ """
+
+ # Global storage configs for the testbox
+ kdStorageCfgs = {
+ # Testbox configs (Flag whether to test raw mode on the testbox, disk configuration)
+ 'testboxstor1.de.oracle.com': (True, storagecfg.DiskCfg('solaris', storagecfg.g_ksDiskCfgRegExp, r'c[3-9]t\dd0\Z')),
+ # Windows testbox doesn't return testboxstor2.de.oracle.com from socket.getfqdn()
+ 'testboxstor2': (False, storagecfg.DiskCfg('win', storagecfg.g_ksDiskCfgStatic, 'D:\\StorageTest')),
+
+ # Local test configs for the testcase developer
+ 'adaris': (True, storagecfg.DiskCfg('linux', storagecfg.g_ksDiskCfgStatic, \
+ '/home/alexander/StorageScratch')),
+ 'daedalus': (True, storagecfg.DiskCfg('darwin', storagecfg.g_ksDiskCfgStatic, \
+ '/Volumes/VirtualBox/Testsuite/StorageScratch')),
+ 'windows10': (True, storagecfg.DiskCfg('win', storagecfg.g_ksDiskCfgStatic, \
+ 'L:\\Testsuite\\StorageTest')),
+ };
+
+ # Available test sets.
+ kdTestSets = {
+ # Mostly for developing and debugging the testcase.
+ 'Fast': {
+ 'RecordSize': 65536,
+ 'TestsetSize': 104857600, # 100 MiB
+ 'QueueDepth': 32,
+ 'DiskSizeGb': 2
+ },
+ # For quick functionality tests where benchmark results are not required.
+ 'Functionality': {
+ 'RecordSize': 65536,
+ 'TestsetSize': 2147483648, # 2 GiB
+ 'QueueDepth': 32,
+ 'DiskSizeGb': 10
+ },
+ # For benchmarking the I/O stack.
+ 'Benchmark': {
+ 'RecordSize': 65536,
+ 'TestsetSize': 21474836480, # 20 Gib
+ 'QueueDepth': 32,
+ 'DiskSizeGb': 30
+ },
+ # For stress testing which takes a lot of time.
+ 'Stress': {
+ 'RecordSize': 65536,
+ 'TestsetSize': 2199023255552, # 2 TiB
+ 'QueueDepth': 32,
+ 'DiskSizeGb': 10000
+ },
+ };
+
+ # Dictionary mapping the virtualization mode mnemonics to a little less cryptic
+ # strings used in test descriptions.
+ kdVirtModeDescs = {
+ 'raw' : 'Raw-mode',
+ 'hwvirt' : 'HwVirt',
+ 'hwvirt-np' : 'NestedPaging'
+ };
+
+ kdHostIoCacheDescs = {
+ 'default' : 'HostCacheDef',
+ 'hostiocache' : 'HostCacheOn',
+ 'no-hostiocache' : 'HostCacheOff'
+ };
+
+ # Password ID for encryption.
+ ksPwId = 'EncPwId';
+
+ # Array indexes for the test configs.
+ kiVmName = 0;
+ kiStorageCtrl = 1;
+ kiHostIoCache = 2;
+ kiDiskFmt = 3;
+ kiDiskVar = 4;
+ kiCpuCount = 5;
+ kiVirtMode = 6;
+ kiTestSet = 7;
+ kiIoTest = 8;
+
+ def __init__(self):
+ vbox.TestDriver.__init__(self);
+ self.asRsrcs = None;
+ self.asTestVMsDef = ['tst-storage', 'tst-storage32'];
+ self.asTestVMs = self.asTestVMsDef;
+ self.asSkipVMs = [];
+ self.asVirtModesDef = ['hwvirt', 'hwvirt-np', 'raw',]
+ self.asVirtModes = self.asVirtModesDef;
+ self.acCpusDef = [1, 2];
+ self.acCpus = self.acCpusDef;
+ self.asStorageCtrlsDef = ['AHCI', 'IDE', 'LsiLogicSAS', 'LsiLogic', 'BusLogic', 'NVMe', 'VirtIoScsi'];
+ self.asStorageCtrls = self.asStorageCtrlsDef;
+ self.asHostIoCacheDef = ['default', 'hostiocache', 'no-hostiocache'];
+ self.asHostIoCache = self.asHostIoCacheDef;
+ self.asDiskFormatsDef = ['VDI', 'VMDK', 'VHD', 'QED', 'Parallels', 'QCOW', 'iSCSI'];
+ self.asDiskFormats = self.asDiskFormatsDef;
+ self.asDiskVariantsDef = ['Dynamic', 'Fixed', 'DynamicSplit2G', 'FixedSplit2G', 'Network'];
+ self.asDiskVariants = self.asDiskVariantsDef;
+ self.asTestsDef = ['ioperf'];
+ self.asTests = self.asTestsDef;
+ self.asTestSetsDef = ['Fast', 'Functionality', 'Benchmark', 'Stress'];
+ self.asTestSets = self.asTestSetsDef;
+ self.asIscsiTargetsDef = [ ]; # @todo: Configure one target for basic iSCSI testing
+ self.asIscsiTargets = self.asIscsiTargetsDef;
+ self.cDiffLvlsDef = 0;
+ self.cDiffLvls = self.cDiffLvlsDef;
+ self.fTestHost = False;
+ self.fUseScratch = False;
+ self.fRecreateStorCfg = True;
+ self.fReportBenchmarkResults = True;
+ self.fTestRawMode = False;
+ self.oStorCfg = None;
+ self.sIoLogPathDef = self.sScratchPath;
+ self.sIoLogPath = self.sIoLogPathDef;
+ self.fIoLog = False;
+ self.fUseRamDiskDef = False;
+ self.fUseRamDisk = self.fUseRamDiskDef;
+ self.fEncryptDiskDef = False;
+ self.fEncryptDisk = self.fEncryptDiskDef;
+ self.sEncryptPwDef = 'TestTestTest';
+ self.sEncryptPw = self.sEncryptPwDef;
+ self.sEncryptAlgoDef = 'AES-XTS256-PLAIN64';
+ self.sEncryptAlgo = self.sEncryptAlgoDef;
+
+ #
+ # Overridden methods.
+ #
+ def showUsage(self):
+ rc = vbox.TestDriver.showUsage(self);
+ reporter.log('');
+ reporter.log('tdStorageBenchmark1 Options:');
+ reporter.log(' --virt-modes <m1[:m2[:]]');
+ reporter.log(' Default: %s' % (':'.join(self.asVirtModesDef)));
+ reporter.log(' --cpu-counts <c1[:c2[:]]');
+ reporter.log(' Default: %s' % (':'.join(str(c) for c in self.acCpusDef)));
+ reporter.log(' --storage-ctrls <type1[:type2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asStorageCtrlsDef)));
+ reporter.log(' --host-io-cache <setting1[:setting2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asHostIoCacheDef)));
+ reporter.log(' --disk-formats <type1[:type2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asDiskFormatsDef)));
+ reporter.log(' --disk-variants <variant1[:variant2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asDiskVariantsDef)));
+ reporter.log(' --iscsi-targets <target1[:target2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asIscsiTargetsDef)));
+ reporter.log(' --tests <test1[:test2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asTestsDef)));
+ reporter.log(' --test-sets <set1[:set2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asTestSetsDef)));
+ reporter.log(' --diff-levels <number of diffs>');
+ reporter.log(' Default: %s' % (self.cDiffLvlsDef));
+ reporter.log(' --test-vms <vm1[:vm2[:...]]>');
+ reporter.log(' Test the specified VMs in the given order. Use this to change');
+ reporter.log(' the execution order or limit the choice of VMs');
+ reporter.log(' Default: %s (all)' % (':'.join(self.asTestVMsDef)));
+ reporter.log(' --skip-vms <vm1[:vm2[:...]]>');
+ reporter.log(' Skip the specified VMs when testing.');
+ reporter.log(' --test-host');
+ reporter.log(' Do all configured tests on the host first and report the results');
+ reporter.log(' to get a baseline');
+ reporter.log(' --use-scratch');
+ reporter.log(' Use the scratch directory for testing instead of setting up');
+ reporter.log(' fresh volumes on dedicated disks (for development)');
+ reporter.log(' --always-wipe-storage-cfg');
+ reporter.log(' Recreate the host storage config before each test');
+ reporter.log(' --dont-wipe-storage-cfg');
+ reporter.log(' Don\'t recreate the host storage config before each test');
+ reporter.log(' --report-benchmark-results');
+ reporter.log(' Report all benchmark results');
+ reporter.log(' --dont-report-benchmark-results');
+ reporter.log(' Don\'t report any benchmark results');
+ reporter.log(' --io-log-path <path>');
+ reporter.log(' Default: %s' % (self.sIoLogPathDef));
+ reporter.log(' --enable-io-log');
+ reporter.log(' Whether to enable I/O logging for each test');
+ reporter.log(' --use-ramdisk');
+ reporter.log(' Default: %s' % (self.fUseRamDiskDef));
+ reporter.log(' --encrypt-disk');
+ reporter.log(' Default: %s' % (self.fEncryptDiskDef));
+ reporter.log(' --encrypt-password');
+ reporter.log(' Default: %s' % (self.sEncryptPwDef));
+ reporter.log(' --encrypt-algorithm');
+ reporter.log(' Default: %s' % (self.sEncryptAlgoDef));
+ return rc;
+
+ def parseOption(self, asArgs, iArg): # pylint: disable=too-many-branches,too-many-statements
+ if asArgs[iArg] == '--virt-modes':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--virt-modes" takes a colon separated list of modes');
+ self.asVirtModes = asArgs[iArg].split(':');
+ for s in self.asVirtModes:
+ if s not in self.asVirtModesDef:
+ raise base.InvalidOption('The "--virt-modes" value "%s" is not valid; valid values are: %s' \
+ % (s, ' '.join(self.asVirtModesDef)));
+ elif asArgs[iArg] == '--cpu-counts':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--cpu-counts" takes a colon separated list of cpu counts');
+ self.acCpus = [];
+ for s in asArgs[iArg].split(':'):
+ try: c = int(s);
+ except: raise base.InvalidOption('The "--cpu-counts" value "%s" is not an integer' % (s,));
+ if c <= 0: raise base.InvalidOption('The "--cpu-counts" value "%s" is zero or negative' % (s,));
+ self.acCpus.append(c);
+ elif asArgs[iArg] == '--storage-ctrls':
+ iArg += 1;
+ if iArg >= len(asArgs):
+ raise base.InvalidOption('The "--storage-ctrls" takes a colon separated list of Storage controller types');
+ self.asStorageCtrls = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--host-io-cache':
+ iArg += 1;
+ if iArg >= len(asArgs):
+ raise base.InvalidOption('The "--host-io-cache" takes a colon separated list of I/O cache settings');
+ self.asHostIoCache = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--disk-formats':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--disk-formats" takes a colon separated list of disk formats');
+ self.asDiskFormats = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--disk-variants':
+ iArg += 1;
+ if iArg >= len(asArgs):
+ raise base.InvalidOption('The "--disk-variants" takes a colon separated list of disk variants');
+ self.asDiskVariants = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--iscsi-targets':
+ iArg += 1;
+ if iArg >= len(asArgs):
+ raise base.InvalidOption('The "--iscsi-targets" takes a colon separated list of iscsi targets');
+ self.asIscsiTargets = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--tests':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--tests" takes a colon separated list of tests to run');
+ self.asTests = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--test-sets':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--test-sets" takes a colon separated list of test sets');
+ self.asTestSets = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--diff-levels':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--diff-levels" takes an integer');
+ try: self.cDiffLvls = int(asArgs[iArg]);
+ except: raise base.InvalidOption('The "--diff-levels" value "%s" is not an integer' % (asArgs[iArg],));
+ elif asArgs[iArg] == '--test-vms':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--test-vms" takes colon separated list');
+ self.asTestVMs = asArgs[iArg].split(':');
+ for s in self.asTestVMs:
+ if s not in self.asTestVMsDef:
+ raise base.InvalidOption('The "--test-vms" value "%s" is not valid; valid values are: %s' \
+ % (s, ' '.join(self.asTestVMsDef)));
+ elif asArgs[iArg] == '--skip-vms':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--skip-vms" takes colon separated list');
+ self.asSkipVMs = asArgs[iArg].split(':');
+ for s in self.asSkipVMs:
+ if s not in self.asTestVMsDef:
+ reporter.log('warning: The "--test-vms" value "%s" does not specify any of our test VMs.' % (s));
+ elif asArgs[iArg] == '--test-host':
+ self.fTestHost = True;
+ elif asArgs[iArg] == '--use-scratch':
+ self.fUseScratch = True;
+ elif asArgs[iArg] == '--always-wipe-storage-cfg':
+ self.fRecreateStorCfg = True;
+ elif asArgs[iArg] == '--dont-wipe-storage-cfg':
+ self.fRecreateStorCfg = False;
+ elif asArgs[iArg] == '--report-benchmark-results':
+ self.fReportBenchmarkResults = True;
+ elif asArgs[iArg] == '--dont-report-benchmark-results':
+ self.fReportBenchmarkResults = False;
+ elif asArgs[iArg] == '--io-log-path':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--io-log-path" takes a path argument');
+ self.sIoLogPath = asArgs[iArg];
+ elif asArgs[iArg] == '--enable-io-log':
+ self.fIoLog = True;
+ elif asArgs[iArg] == '--use-ramdisk':
+ self.fUseRamDisk = True;
+ elif asArgs[iArg] == '--encrypt-disk':
+ self.fEncryptDisk = True;
+ elif asArgs[iArg] == '--encrypt-password':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--encrypt-password" takes a string');
+ self.sEncryptPw = asArgs[iArg];
+ elif asArgs[iArg] == '--encrypt-algorithm':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--encrypt-algorithm" takes a string');
+ self.sEncryptAlgo = asArgs[iArg];
+ else:
+ return vbox.TestDriver.parseOption(self, asArgs, iArg);
+ return iArg + 1;
+
+ def completeOptions(self):
+ # Remove skipped VMs from the test list.
+ for sVM in self.asSkipVMs:
+ try: self.asTestVMs.remove(sVM);
+ except: pass;
+
+ return vbox.TestDriver.completeOptions(self);
+
+ def getResourceSet(self):
+ # Construct the resource list the first time it's queried.
+ if self.asRsrcs is None:
+ self.asRsrcs = [];
+ if 'tst-storage' in self.asTestVMs:
+ self.asRsrcs.append('5.0/storage/tst-storage.vdi');
+ if 'tst-storage32' in self.asTestVMs:
+ self.asRsrcs.append('5.0/storage/tst-storage32.vdi');
+
+ return self.asRsrcs;
+
+ def actionConfig(self):
+
+ # Make sure vboxapi has been imported so we can use the constants.
+ if not self.importVBoxApi():
+ return False;
+
+ #
+ # Configure the VMs we're going to use.
+ #
+
+ # Linux VMs
+ if 'tst-storage' in self.asTestVMs:
+ oVM = self.createTestVM('tst-storage', 1, '5.0/storage/tst-storage.vdi', sKind = 'ArchLinux_64', fIoApic = True, \
+ eNic0AttachType = vboxcon.NetworkAttachmentType_NAT, \
+ eNic0Type = vboxcon.NetworkAdapterType_Am79C973, \
+ sDvdImage = self.sVBoxValidationKitIso);
+ if oVM is None:
+ return False;
+
+ if 'tst-storage32' in self.asTestVMs:
+ oVM = self.createTestVM('tst-storage32', 1, '5.0/storage/tst-storage32.vdi', sKind = 'ArchLinux', fIoApic = True, \
+ eNic0AttachType = vboxcon.NetworkAttachmentType_NAT, \
+ eNic0Type = vboxcon.NetworkAdapterType_Am79C973, \
+ sDvdImage = self.sVBoxValidationKitIso);
+ if oVM is None:
+ return False;
+
+ return True;
+
+ def actionExecute(self):
+ """
+ Execute the testcase.
+ """
+ fRc = self.test1();
+ return fRc;
+
+
+ #
+ # Test execution helpers.
+ #
+
+ def prepareStorage(self, oStorCfg, fRamDisk = False, cbPool = None):
+ """
+ Prepares the host storage for disk images or direct testing on the host.
+ """
+ # Create a basic pool with the default configuration.
+ sMountPoint = None;
+ fRc, sPoolId = oStorCfg.createStoragePool(cbPool = cbPool, fRamDisk = fRamDisk);
+ if fRc:
+ fRc, sMountPoint = oStorCfg.createVolume(sPoolId);
+ if not fRc:
+ sMountPoint = None;
+ oStorCfg.cleanup();
+
+ return sMountPoint;
+
+ def cleanupStorage(self, oStorCfg):
+ """
+ Cleans up any created storage space for a test.
+ """
+ return oStorCfg.cleanup();
+
+ def getGuestDisk(self, oSession, oTxsSession, eStorageController):
+ """
+ Gets the path of the disk in the guest to use for testing.
+ """
+ lstDisks = None;
+
+ # The naming scheme for NVMe is different and we don't have
+ # to query the guest for unformatted disks here because the disk with the OS
+ # is not attached to a NVMe controller.
+ if eStorageController == vboxcon.StorageControllerType_NVMe:
+ lstDisks = [ '/dev/nvme0n1' ];
+ else:
+ # Find a unformatted disk (no partition).
+ # @todo: This is a hack because LIST and STAT are not yet implemented
+ # in TXS (get to this eventually)
+ lstBlkDev = [ '/dev/sda', '/dev/sdb' ];
+ for sBlkDev in lstBlkDev:
+ fRc = oTxsSession.syncExec('/usr/bin/ls', ('ls', sBlkDev + '1'));
+ if not fRc:
+ lstDisks = [ sBlkDev ];
+ break;
+
+ _ = oSession;
+ return lstDisks;
+
+ def mountValidationKitIso(self, oVmExec):
+ """
+ Hack to get the vlaidation kit ISO mounted in the guest as it was left out
+ originally and I don't feel like respinning the disk image.
+ """
+ fRc = oVmExec.mkDir('/media');
+ if fRc:
+ fRc = oVmExec.mkDir('/media/cdrom');
+ if fRc:
+ fRc = oVmExec.execBinaryNoStdOut('mount', ('/dev/sr0', '/media/cdrom'));
+
+ return fRc;
+
+ def getDiskFormatVariantsForTesting(self, sDiskFmt, asVariants):
+ """
+ Returns a list of disk variants for testing supported by the given
+ disk format and selected for testing.
+ """
+ lstDskFmts = self.oVBoxMgr.getArray(self.oVBox.systemProperties, 'mediumFormats');
+ for oDskFmt in lstDskFmts:
+ if oDskFmt.id == sDiskFmt:
+ lstDskVariants = [];
+ lstCaps = self.oVBoxMgr.getArray(oDskFmt, 'capabilities');
+
+ if vboxcon.MediumFormatCapabilities_CreateDynamic in lstCaps \
+ and 'Dynamic' in asVariants:
+ lstDskVariants.append('Dynamic');
+
+ if vboxcon.MediumFormatCapabilities_CreateFixed in lstCaps \
+ and 'Fixed' in asVariants:
+ lstDskVariants.append('Fixed');
+
+ if vboxcon.MediumFormatCapabilities_CreateSplit2G in lstCaps \
+ and vboxcon.MediumFormatCapabilities_CreateDynamic in lstCaps \
+ and 'DynamicSplit2G' in asVariants:
+ lstDskVariants.append('DynamicSplit2G');
+
+ if vboxcon.MediumFormatCapabilities_CreateSplit2G in lstCaps \
+ and vboxcon.MediumFormatCapabilities_CreateFixed in lstCaps \
+ and 'FixedSplit2G' in asVariants:
+ lstDskVariants.append('FixedSplit2G');
+
+ if vboxcon.MediumFormatCapabilities_TcpNetworking in lstCaps \
+ and 'Network' in asVariants:
+ lstDskVariants.append('Network'); # Solely for iSCSI to get a non empty list
+
+ return lstDskVariants;
+
+ return [];
+
+ def convDiskToMediumVariant(self, sDiskVariant):
+ """
+ Returns a tuple of medium variant flags matching the given disk variant.
+ """
+ tMediumVariant = None;
+ if sDiskVariant == 'Dynamic':
+ tMediumVariant = (vboxcon.MediumVariant_Standard, );
+ elif sDiskVariant == 'Fixed':
+ tMediumVariant = (vboxcon.MediumVariant_Fixed, );
+ elif sDiskVariant == 'DynamicSplit2G':
+ tMediumVariant = (vboxcon.MediumVariant_Standard, vboxcon.MediumVariant_VmdkSplit2G);
+ elif sDiskVariant == 'FixedSplit2G':
+ tMediumVariant = (vboxcon.MediumVariant_Fixed, vboxcon.MediumVariant_VmdkSplit2G);
+
+ return tMediumVariant;
+
+ def getStorageCtrlFromName(self, sStorageCtrl):
+ """
+ Resolves the storage controller string to the matching constant.
+ """
+ eStorageCtrl = None;
+
+ if sStorageCtrl == 'AHCI':
+ eStorageCtrl = vboxcon.StorageControllerType_IntelAhci;
+ elif sStorageCtrl == 'IDE':
+ eStorageCtrl = vboxcon.StorageControllerType_PIIX4;
+ elif sStorageCtrl == 'LsiLogicSAS':
+ eStorageCtrl = vboxcon.StorageControllerType_LsiLogicSas;
+ elif sStorageCtrl == 'LsiLogic':
+ eStorageCtrl = vboxcon.StorageControllerType_LsiLogic;
+ elif sStorageCtrl == 'BusLogic':
+ eStorageCtrl = vboxcon.StorageControllerType_BusLogic;
+ elif sStorageCtrl == 'NVMe':
+ eStorageCtrl = vboxcon.StorageControllerType_NVMe;
+ elif sStorageCtrl == 'VirtIoScsi':
+ eStorageCtrl = vboxcon.StorageControllerType_VirtioSCSI;
+
+ return eStorageCtrl;
+
+ def getStorageDriverFromEnum(self, eStorageCtrl, fHardDisk):
+ """
+ Returns the appropriate driver name for the given storage controller
+ and a flag whether the driver has the generic SCSI driver attached.
+ """
+ if eStorageCtrl == vboxcon.StorageControllerType_IntelAhci:
+ if fHardDisk:
+ return ('ahci', False);
+ return ('ahci', True);
+ if eStorageCtrl == vboxcon.StorageControllerType_PIIX4:
+ return ('piix3ide', False);
+ if eStorageCtrl == vboxcon.StorageControllerType_LsiLogicSas:
+ return ('lsilogicsas', True);
+ if eStorageCtrl == vboxcon.StorageControllerType_LsiLogic:
+ return ('lsilogicscsi', True);
+ if eStorageCtrl == vboxcon.StorageControllerType_BusLogic:
+ return ('buslogic', True);
+ if eStorageCtrl == vboxcon.StorageControllerType_NVMe:
+ return ('nvme', False);
+ if eStorageCtrl == vboxcon.StorageControllerType_VirtioSCSI:
+ return ('virtio-scsi', True);
+
+ return ('<invalid>', False);
+
+ def isTestCfgSupported(self, asTestCfg):
+ """
+ Returns whether a specific test config is supported.
+ """
+
+ # Check whether the disk variant is supported by the selected format.
+ asVariants = self.getDiskFormatVariantsForTesting(asTestCfg[self.kiDiskFmt], [ asTestCfg[self.kiDiskVar] ]);
+ if not asVariants:
+ return False;
+
+ # For iSCSI check whether we have targets configured.
+ if asTestCfg[self.kiDiskFmt] == 'iSCSI' and not self.asIscsiTargets:
+ return False;
+
+ # Check for virt mode, CPU count and selected VM.
+ if asTestCfg[self.kiVirtMode] == 'raw' \
+ and ( asTestCfg[self.kiCpuCount] > 1 \
+ or asTestCfg[self.kiVmName] == 'tst-storage' \
+ or not self.fTestRawMode):
+ return False;
+
+ # IDE does not support the no host I/O cache setting
+ if asTestCfg[self.kiHostIoCache] == 'no-hostiocache' \
+ and asTestCfg[self.kiStorageCtrl] == 'IDE':
+ return False;
+
+ return True;
+
+ def fnFormatCpuString(self, cCpus):
+ """
+ Formats the CPU count to be readable.
+ """
+ if cCpus == 1:
+ return '1 cpu';
+ return '%u cpus' % (cCpus);
+
+ def fnFormatVirtMode(self, sVirtMode):
+ """
+ Formats the virtualization mode to be a little less cryptic for use in test
+ descriptions.
+ """
+ return self.kdVirtModeDescs[sVirtMode];
+
+ def fnFormatHostIoCache(self, sHostIoCache):
+ """
+ Formats the host I/O cache mode to be a little less cryptic for use in test
+ descriptions.
+ """
+ return self.kdHostIoCacheDescs[sHostIoCache];
+
+ def testBenchmark(self, sTargetOs, sBenchmark, sMountpoint, oExecutor, dTestSet, \
+ cMsTimeout = 3600000):
+ """
+ Runs the given benchmark on the test host.
+ """
+
+ dTestSet['FilePath'] = sMountpoint;
+ dTestSet['TargetOs'] = sTargetOs;
+
+ oTst = None;
+ if sBenchmark == 'iozone':
+ oTst = IozoneTest(oExecutor, dTestSet);
+ elif sBenchmark == 'fio':
+ oTst = FioTest(oExecutor, dTestSet); # pylint: disable=redefined-variable-type
+ elif sBenchmark == 'ioperf':
+ oTst = IoPerfTest(oExecutor, dTestSet); # pylint: disable=redefined-variable-type
+
+ if oTst is not None:
+ fRc = oTst.prepare();
+ if fRc:
+ fRc = oTst.run(cMsTimeout);
+ if fRc:
+ if self.fReportBenchmarkResults:
+ fRc = oTst.reportResult();
+ else:
+ reporter.testFailure('Running the testcase failed');
+ reporter.addLogString(oTst.getErrorReport(), sBenchmark + '.log',
+ 'log/release/client', 'Benchmark raw output');
+ else:
+ reporter.testFailure('Preparing the testcase failed');
+
+ oTst.cleanup();
+
+ return fRc;
+
+ def createHd(self, oSession, sDiskFormat, sDiskVariant, iDiffLvl, oHdParent, \
+ sDiskPath, cbDisk):
+ """
+ Creates a new disk with the given parameters returning the medium object
+ on success.
+ """
+
+ oHd = None;
+ if sDiskFormat == "iSCSI" and iDiffLvl == 0:
+ listNames = [];
+ listValues = [];
+ listValues = self.asIscsiTargets[0].split('|');
+ listNames.append('TargetAddress');
+ listNames.append('TargetName');
+ listNames.append('LUN');
+
+ if self.fpApiVer >= 5.0:
+ oHd = oSession.oVBox.createMedium(sDiskFormat, sDiskPath, vboxcon.AccessMode_ReadWrite, \
+ vboxcon.DeviceType_HardDisk);
+ else:
+ oHd = oSession.oVBox.createHardDisk(sDiskFormat, sDiskPath);
+ oHd.type = vboxcon.MediumType_Normal;
+ oHd.setProperties(listNames, listValues);
+ else:
+ if iDiffLvl == 0:
+ tMediumVariant = self.convDiskToMediumVariant(sDiskVariant);
+ oHd = oSession.createBaseHd(sDiskPath + '/base.img', sDiskFormat, cbDisk, \
+ cMsTimeout = 3600 * 1000, tMediumVariant = tMediumVariant);
+ else:
+ sDiskPath = sDiskPath + '/diff_%u.img' % (iDiffLvl);
+ oHd = oSession.createDiffHd(oHdParent, sDiskPath, None);
+
+ if oHd is not None and iDiffLvl == 0 and self.fEncryptDisk:
+ try:
+ oIProgress = oHd.changeEncryption('', self.sEncryptAlgo, self.sEncryptPw, self.ksPwId);
+ oProgress = vboxwrappers.ProgressWrapper(oIProgress, self.oVBoxMgr, self, 'Encrypting "%s"' % (sDiskPath,));
+ oProgress.wait(60*60000); # Wait for up to one hour, fixed disks take longer to encrypt.
+ if oProgress.logResult() is False:
+ raise base.GenError('Encrypting disk "%s" failed' % (sDiskPath, ));
+ except:
+ reporter.errorXcpt('changeEncryption("%s","%s","%s") failed on "%s"' \
+ % ('', self.sEncryptAlgo, self.sEncryptPw, oSession.sName) );
+ self.oVBox.deleteHdByMedium(oHd);
+ oHd = None;
+ else:
+ reporter.log('Encrypted "%s"' % (sDiskPath,));
+
+ return oHd;
+
+ def startVmAndConnect(self, sVmName):
+ """
+ Our own implementation of startVmAndConnectToTxsViaTcp to make it possible
+ to add passwords to a running VM when encryption is used.
+ """
+ oSession = self.startVmByName(sVmName);
+ if oSession is not None:
+ # Add password to the session in case encryption is used.
+ fRc = True;
+ if self.fEncryptDisk:
+ try:
+ if self.fpApiVer >= 7.0:
+ oSession.o.console.addEncryptionPassword(self.ksPwId, self.sEncryptPw, False);
+ else:
+ oSession.o.console.addDiskEncryptionPassword(self.ksPwId, self.sEncryptPw, False);
+ except:
+ reporter.logXcpt();
+ fRc = False;
+
+ # Connect to TXS.
+ if fRc:
+ reporter.log2('startVmAndConnect: Started(/prepared) "%s", connecting to TXS ...' % (sVmName,));
+ (fRc, oTxsSession) = self.txsDoConnectViaTcp(oSession, 15*60000, fNatForwardingForTxs = True);
+ if fRc is True:
+ if fRc is True:
+ # Success!
+ return (oSession, oTxsSession);
+ else:
+ reporter.error('startVmAndConnect: txsDoConnectViaTcp failed');
+ # If something went wrong while waiting for TXS to be started - take VM screenshot before terminate it
+
+ self.terminateVmBySession(oSession);
+
+ return (None, None);
+
+ def testOneCfg(self, sVmName, eStorageController, sHostIoCache, sDiskFormat, # pylint: disable=too-many-arguments,too-many-locals,too-many-statements
+ sDiskVariant, sDiskPath, cCpus, sIoTest, sVirtMode, sTestSet):
+ """
+ Runs the specified VM thru test #1.
+
+ Returns a success indicator on the general test execution. This is not
+ the actual test result.
+ """
+ oVM = self.getVmByName(sVmName);
+
+ dTestSet = self.kdTestSets.get(sTestSet);
+ cbDisk = dTestSet.get('DiskSizeGb') * 1024*1024*1024;
+ fHwVirt = sVirtMode != 'raw';
+ fNestedPaging = sVirtMode == 'hwvirt-np';
+
+ fRc = True;
+ if sDiskFormat == 'iSCSI':
+ sDiskPath = self.asIscsiTargets[0];
+ elif self.fUseScratch:
+ sDiskPath = self.sScratchPath;
+ else:
+ # If requested recreate the storage space to start with a clean config
+ # for benchmarks
+ if self.fRecreateStorCfg:
+ sMountPoint = self.prepareStorage(self.oStorCfg, self.fUseRamDisk, 2 * cbDisk);
+ if sMountPoint is not None:
+ # Create a directory where every normal user can write to.
+ self.oStorCfg.mkDirOnVolume(sMountPoint, 'test', 0o777);
+ sDiskPath = sMountPoint + '/test';
+ else:
+ fRc = False;
+ reporter.testFailure('Failed to prepare storage for VM');
+
+ if not fRc:
+ return fRc;
+
+ lstDisks = []; # List of disks we have to delete afterwards.
+
+ for iDiffLvl in range(self.cDiffLvls + 1):
+ sIoLogFile = None;
+
+ if iDiffLvl == 0:
+ reporter.testStart('Base');
+ else:
+ reporter.testStart('Diff %u' % (iDiffLvl));
+
+ # Reconfigure the VM
+ oSession = self.openSession(oVM);
+ if oSession is not None:
+ #
+ # Disable audio controller which shares the interrupt line with the BusLogic controller and is suspected to cause
+ # rare test failures because the device initialization fails.
+ #
+ fRc = oSession.setupAudio(vboxcon.AudioControllerType_AC97, False);
+ # Attach HD
+ fRc = fRc and oSession.ensureControllerAttached(self.controllerTypeToName(eStorageController));
+ fRc = fRc and oSession.setStorageControllerType(eStorageController,
+ self.controllerTypeToName(eStorageController));
+
+ if sHostIoCache == 'hostiocache':
+ fRc = fRc and oSession.setStorageControllerHostIoCache(self.controllerTypeToName(eStorageController), True);
+ elif sHostIoCache == 'no-hostiocache':
+ fRc = fRc and oSession.setStorageControllerHostIoCache(self.controllerTypeToName(eStorageController), False);
+
+ iDevice = 0;
+ if eStorageController in (vboxcon.StorageControllerType_PIIX3, vboxcon.StorageControllerType_PIIX4,):
+ iDevice = 1; # Master is for the OS.
+
+ oHdParent = None;
+ if iDiffLvl > 0:
+ oHdParent = lstDisks[0];
+ oHd = self.createHd(oSession, sDiskFormat, sDiskVariant, iDiffLvl, oHdParent, sDiskPath, cbDisk);
+ if oHd is not None:
+ lstDisks.insert(0, oHd);
+ try:
+ if oSession.fpApiVer >= 4.0:
+ oSession.o.machine.attachDevice(self.controllerTypeToName(eStorageController),
+ 0, iDevice, vboxcon.DeviceType_HardDisk, oHd);
+ else:
+ oSession.o.machine.attachDevice(self.controllerTypeToName(eStorageController),
+ 0, iDevice, vboxcon.DeviceType_HardDisk, oHd.id);
+ except:
+ reporter.errorXcpt('attachDevice("%s",%s,%s,HardDisk,"%s") failed on "%s"' \
+ % (self.controllerTypeToName(eStorageController), 1, 0, oHd.id, oSession.sName) );
+ fRc = False;
+ else:
+ reporter.log('attached "%s" to %s' % (sDiskPath, oSession.sName));
+ else:
+ fRc = False;
+
+ # Set up the I/O logging config if enabled
+ if fRc and self.fIoLog:
+ try:
+ oSession.o.machine.setExtraData('VBoxInternal2/EnableDiskIntegrityDriver', '1');
+
+ iLun = 0;
+ if eStorageController in (vboxcon.StorageControllerType_PIIX3, vboxcon.StorageControllerType_PIIX4,):
+ iLun = 1
+ sDrv, fDrvScsi = self.getStorageDriverFromEnum(eStorageController, True);
+ if fDrvScsi:
+ sCfgmPath = 'VBoxInternal/Devices/%s/0/LUN#%u/AttachedDriver/Config' % (sDrv, iLun);
+ else:
+ sCfgmPath = 'VBoxInternal/Devices/%s/0/LUN#%u/Config' % (sDrv, iLun);
+
+ sIoLogFile = '%s/%s.iolog' % (self.sIoLogPath, sDrv);
+ print(sCfgmPath);
+ print(sIoLogFile);
+ oSession.o.machine.setExtraData('%s/IoLog' % (sCfgmPath,), sIoLogFile);
+ except:
+ reporter.logXcpt();
+
+ fRc = fRc and oSession.enableVirtEx(fHwVirt);
+ fRc = fRc and oSession.enableNestedPaging(fNestedPaging);
+ fRc = fRc and oSession.setCpuCount(cCpus);
+ fRc = fRc and oSession.saveSettings();
+ fRc = oSession.close() and fRc and True; # pychecker hack.
+ oSession = None;
+ else:
+ fRc = False;
+
+ # Start up.
+ if fRc is True:
+ self.logVmInfo(oVM);
+ oSession, oTxsSession = self.startVmAndConnect(sVmName);
+ if oSession is not None:
+ self.addTask(oTxsSession);
+
+ # Fudge factor - Allow the guest to finish starting up.
+ self.sleep(5);
+
+ # Prepare the storage on the guest
+ lstBinaryPaths = ['/bin', '/sbin', '/usr/bin', '/usr/sbin' ];
+ oExecVm = remoteexecutor.RemoteExecutor(oTxsSession, lstBinaryPaths, '${SCRATCH}');
+ fRc = self.mountValidationKitIso(oExecVm);
+ if fRc:
+ oGstDiskCfg = storagecfg.DiskCfg('linux', storagecfg.g_ksDiskCfgList,
+ self.getGuestDisk(oSession, oTxsSession, eStorageController));
+ oStorCfgVm = storagecfg.StorageCfg(oExecVm, oGstDiskCfg);
+
+ iTry = 0;
+ while iTry < 3:
+ sMountPoint = self.prepareStorage(oStorCfgVm);
+ if sMountPoint is not None:
+ reporter.log('Prepared storage on %s try' % (iTry + 1,));
+ break;
+ iTry = iTry + 1;
+ self.sleep(5);
+
+ if sMountPoint is not None:
+ # 3 hours max (Benchmark and QED takes a lot of time)
+ self.testBenchmark('linux', sIoTest, sMountPoint, oExecVm, dTestSet, cMsTimeout = 3 * 3600 * 1000);
+ self.cleanupStorage(oStorCfgVm);
+ else:
+ reporter.testFailure('Failed to prepare storage for the guest benchmark');
+
+ # cleanup.
+ self.removeTask(oTxsSession);
+ self.terminateVmBySession(oSession);
+
+ # Add the I/O log if it exists and the test failed
+ if reporter.testErrorCount() > 0 \
+ and sIoLogFile is not None \
+ and os.path.exists(sIoLogFile):
+ reporter.addLogFile(sIoLogFile, 'misc/other', 'I/O log');
+ os.remove(sIoLogFile);
+ else:
+ reporter.testFailure('Failed to mount validation kit ISO');
+
+ else:
+ fRc = False;
+
+ # Remove disk
+ oSession = self.openSession(oVM);
+ if oSession is not None:
+ try:
+ oSession.o.machine.detachDevice(self.controllerTypeToName(eStorageController), 0, iDevice);
+
+ # Remove storage controller if it is not an IDE controller.
+ if eStorageController not in (vboxcon.StorageControllerType_PIIX3, vboxcon.StorageControllerType_PIIX4,):
+ oSession.o.machine.removeStorageController(self.controllerTypeToName(eStorageController));
+
+ oSession.saveSettings();
+ oSession.saveSettings();
+ oSession.close();
+ oSession = None;
+ except:
+ reporter.errorXcpt('failed to detach/delete disk %s from storage controller' % (sDiskPath));
+ else:
+ fRc = False;
+
+ reporter.testDone();
+
+ # Delete all disks
+ for oHd in lstDisks:
+ self.oVBox.deleteHdByMedium(oHd);
+
+ # Cleanup storage area
+ if sDiskFormat != 'iSCSI' and not self.fUseScratch and self.fRecreateStorCfg:
+ self.cleanupStorage(self.oStorCfg);
+
+ return fRc;
+
+ def testStorage(self, sDiskPath = None):
+ """
+ Runs the storage testcase through the selected configurations
+ """
+
+ aasTestCfgs = [];
+ aasTestCfgs.insert(self.kiVmName, self.asTestVMs);
+ aasTestCfgs.insert(self.kiStorageCtrl, self.asStorageCtrls);
+ aasTestCfgs.insert(self.kiHostIoCache, (self.asHostIoCache, True, self.fnFormatHostIoCache));
+ aasTestCfgs.insert(self.kiDiskFmt, self.asDiskFormats);
+ aasTestCfgs.insert(self.kiDiskVar, self.asDiskVariants);
+ aasTestCfgs.insert(self.kiCpuCount, (self.acCpus, True, self.fnFormatCpuString));
+ aasTestCfgs.insert(self.kiVirtMode, (self.asVirtModes, True, self.fnFormatVirtMode));
+ aasTestCfgs.insert(self.kiTestSet, self.asTestSets);
+ aasTestCfgs.insert(self.kiIoTest, (self.asTests, False, None));
+
+ aasTestsBlacklist = [];
+ aasTestsBlacklist.append(['tst-storage', 'BusLogic']); # 64bit Linux is broken with BusLogic
+
+ oTstCfgMgr = StorTestCfgMgr(aasTestCfgs, aasTestsBlacklist, self.isTestCfgSupported);
+
+ fRc = True;
+ asTestCfg = oTstCfgMgr.getCurrentTestCfg();
+ while asTestCfg:
+ fRc = self.testOneCfg(asTestCfg[self.kiVmName], self.getStorageCtrlFromName(asTestCfg[self.kiStorageCtrl]), \
+ asTestCfg[self.kiHostIoCache], asTestCfg[self.kiDiskFmt], asTestCfg[self.kiDiskVar],
+ sDiskPath, asTestCfg[self.kiCpuCount], asTestCfg[self.kiIoTest], \
+ asTestCfg[self.kiVirtMode], asTestCfg[self.kiTestSet]) and fRc and True; # pychecker hack.
+
+ asTestCfg = oTstCfgMgr.getNextTestCfg();
+
+ return fRc;
+
+ def test1(self):
+ """
+ Executes test #1.
+ """
+
+ fRc = True;
+ tupTstCfg = self.kdStorageCfgs.get(socket.getfqdn().lower());
+ if tupTstCfg is None:
+ tupTstCfg = self.kdStorageCfgs.get(socket.gethostname().lower());
+
+ # Test the host first if requested
+ if tupTstCfg is not None or self.fUseScratch:
+ self.fTestRawMode = tupTstCfg[0];
+ oDiskCfg = tupTstCfg[1];
+ lstBinaryPaths = ['/bin', '/sbin', '/usr/bin', '/usr/sbin', \
+ '/opt/csw/bin', '/usr/ccs/bin', '/usr/sfw/bin'];
+ oExecutor = remoteexecutor.RemoteExecutor(None, lstBinaryPaths, self.sScratchPath);
+ if not self.fUseScratch:
+ self.oStorCfg = storagecfg.StorageCfg(oExecutor, oDiskCfg);
+
+ # Try to cleanup any leftovers from a previous run first.
+ fRc = self.oStorCfg.cleanupLeftovers();
+ if not fRc:
+ reporter.error('Failed to cleanup any leftovers from a previous run');
+
+ if self.fTestHost:
+ reporter.testStart('Host');
+ if self.fUseScratch:
+ sMountPoint = self.sScratchPath;
+ else:
+ sMountPoint = self.prepareStorage(self.oStorCfg);
+ if sMountPoint is not None:
+ for sIoTest in self.asTests:
+ for sTestSet in self.asTestSets:
+ reporter.testStart(sTestSet);
+ dTestSet = self.kdTestSets.get(sTestSet);
+ self.testBenchmark(utils.getHostOs(), sIoTest, sMountPoint, oExecutor, dTestSet);
+ reporter.testDone();
+ self.cleanupStorage(self.oStorCfg);
+ else:
+ reporter.testFailure('Failed to prepare host storage');
+ fRc = False;
+ reporter.testDone();
+ else:
+ # Create the storage space first if it is not done before every test.
+ sMountPoint = None;
+ if self.fUseScratch:
+ sMountPoint = self.sScratchPath;
+ elif not self.fRecreateStorCfg:
+ reporter.testStart('Create host storage');
+ sMountPoint = self.prepareStorage(self.oStorCfg);
+ if sMountPoint is None:
+ reporter.testFailure('Failed to prepare host storage');
+ fRc = False;
+ self.oStorCfg.mkDirOnVolume(sMountPoint, 'test', 0o777);
+ sMountPoint = sMountPoint + '/test';
+ reporter.testDone();
+
+ if fRc:
+ # Run the storage tests.
+ if not self.testStorage(sMountPoint):
+ fRc = False;
+
+ if not self.fRecreateStorCfg and not self.fUseScratch:
+ self.cleanupStorage(self.oStorCfg);
+ else:
+ reporter.testFailure('Could not get disk configuration for host: %s' % (socket.getfqdn().lower()));
+ fRc = False;
+
+ return fRc;
+
+if __name__ == '__main__':
+ sys.exit(tdStorageBenchmark().main(sys.argv));
+
diff --git a/src/VBox/ValidationKit/tests/storage/tdStorageRawDrive1.py b/src/VBox/ValidationKit/tests/storage/tdStorageRawDrive1.py
new file mode 100755
index 00000000..482453de
--- /dev/null
+++ b/src/VBox/ValidationKit/tests/storage/tdStorageRawDrive1.py
@@ -0,0 +1,1692 @@
+
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+VirtualBox Validation Kit - VMDK raw disk tests.
+"""
+
+__copyright__ = \
+"""
+Copyright (C) 2013-2023 Oracle and/or its affiliates.
+
+This file is part of VirtualBox base platform packages, as
+available from https://www.virtualbox.org.
+
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation, in version 3 of the
+License.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, see <https://www.gnu.org/licenses>.
+
+The contents of this file may alternatively be used under the terms
+of the Common Development and Distribution License Version 1.0
+(CDDL), a copy of it is provided in the "COPYING.CDDL" file included
+in the VirtualBox distribution, in which case the provisions of the
+CDDL are applicable instead of those of the GPL.
+
+You may elect to license modified versions of this file under the
+terms and conditions of either the GPL or the CDDL or both.
+
+SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
+"""
+__version__ = "$Id: tdStorageRawDrive1.py $"
+
+# Standard Python imports.
+import os;
+import re;
+import sys;
+
+# Only the main script needs to modify the path.
+try: __file__
+except: __file__ = sys.argv[0];
+g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))));
+sys.path.append(g_ksValidationKitDir);
+
+# Validation Kit imports.
+from common import utils;
+from testdriver import reporter;
+from testdriver import base;
+from testdriver import vbox;
+from testdriver import vboxcon;
+from testdriver import vboxtestvms;
+from testdriver import vboxwrappers;
+
+
+class tdStorageRawDriveOs(vboxtestvms.BaseTestVm):
+ """
+ Base autostart helper class to provide common methods.
+ """
+ # pylint: disable=too-many-arguments
+ def __init__(self, oSet, oTstDrv, sVmName, sKind, sHdd, eNic0Type = None, cMbRam = None, \
+ cCpus = 1, fPae = None, sGuestAdditionsIso = None, sBootSector = None):
+ vboxtestvms.BaseTestVm.__init__(self, sVmName, oSet = oSet, sKind = sKind);
+ self.oTstDrv = oTstDrv;
+ self.sHdd = sHdd;
+ self.eNic0Type = eNic0Type;
+ self.cMbRam = cMbRam;
+ self.cCpus = cCpus;
+ self.fPae = fPae;
+ self.sGuestAdditionsIso = sGuestAdditionsIso;
+ self.asTestBuildDirs = oTstDrv.asTestBuildDirs;
+ self.sVBoxInstaller = "";
+ self.sVMDKPath='/home/vbox/vmdk';
+ self.asVirtModesSup = ['hwvirt-np',];
+ self.asParavirtModesSup = ['default',];
+ self.sBootSector = sBootSector;
+ self.sPathDelimiter = '/';
+
+ # Had to move it here from oTestDrv because the output is platform-dependent
+ self.asHdds = \
+ { '6.1/storage/t-mbr.vdi' :
+ {
+ 'Header' :
+ {
+ #Drive: /dev/sdb
+ 'Model' : '"ATA VBOX HARDDISK"',
+ 'UUID' : '62d4f394-0000-0000-0000-000000000000',
+ 'Size' : '2.0GiB',
+ 'Sector Size' : '512 bytes',
+ 'Scheme' : 'MBR',
+ },
+ 'Partitions' :
+ {
+ 'Partitions' :
+ [
+ '$(1) 07 10.0MiB 1.0MiB 0/ 32/33 1/102/37 no IFS',
+ '$(2) 83 10.0MiB 11.0MiB 5/ 93/33 11/ 29/14 no Linux',
+ '$(3) 07 10.0MiB 21.0MiB 2/172/43 3/242/47 no IFS',
+ '$(4) 07 10.0MiB 32.0MiB 4/ 20/17 5/ 90/21 no IFS',
+ '$(5) 83 10.0MiB 43.0MiB 5/122/54 6/192/58 no Linux',
+ '$(6) 07 10.0MiB 54.0MiB 6/225/28 8/ 40/32 no IFS',
+ '$(7) 83 10.0MiB 65.0MiB 8/ 73/ 2 9/143/ 6 no Linux',
+ '$(8) 07 1.9GiB 76.0MiB 9/175/39 260/243/47 no IFS',
+ ],
+ 'PartitionNumbers' : [1, 2, 3, 5, 6, 7, 8, 9],
+ },
+ } ,
+ '6.1/storage/t-gpt.vdi' :
+ {
+ 'Header' :
+ {
+ #Drive: /dev/sdc
+ 'Model' : '"ATA VBOX HARDDISK"',
+ 'UUID' : '7b642ab1-9d44-b844-a860-ce71e0686274',
+ 'Size' : '2.0GiB',
+ 'Sector Size' : '512 bytes',
+ 'Scheme' : 'GPT',
+ },
+ 'Partitions' :
+ {
+ 'Partitions' :
+ [
+ '$(1) WindowsBasicData 560b261d-081f-fb4a-8df8-c64fffcb2bd1 10.0MiB 1.0MiB off',
+ '$(2) LinuxData 629f66be-0254-7c4f-a328-cc033e4de124 10.0MiB 11.0MiB off',
+ '$(3) WindowsBasicData d3f56c96-3b28-7f44-a53d-85b8bc93bd91 10.0MiB 21.0MiB off',
+ '$(4) LinuxData 27c0f5ad-74c8-d54f-835f-06e51b3f10ef 10.0MiB 31.0MiB off',
+ '$(5) WindowsBasicData 6cf1fdf0-b2ae-3849-9cfa-c056f9d8b722 10.0MiB 41.0MiB off',
+ '$(6) LinuxData 017bcbed-8b96-be4d-925a-2f872194fbe6 10.0MiB 51.0MiB off',
+ '$(7) WindowsBasicData af6c4f89-8fc3-5049-9d98-3e2e98061073 10.0MiB 61.0MiB off',
+ '$(8) LinuxData 9704d7cd-810f-4d44-ac78-432ebc16143f 10.0MiB 71.0MiB off',
+ '$(9) WindowsBasicData a05f8e09-f9e7-5b4e-bb4e-e9f8fde3110e 1.9GiB 81.0MiB off',
+ ],
+ 'PartitionNumbers' : [1, 2, 3, 4, 5, 6, 7, 8, 9],
+ },
+
+ }
+ };
+ self.asActions = \
+ [
+ {
+ 'action' : 'whole drive',
+ 'options' : [],
+ 'data-crc' : {},
+ 'createType' : 'fullDevice',
+ 'extents' : { '6.1/storage/t-mbr.vdi' : ['RW 0 FLAT "$(disk)" 0',],
+ '6.1/storage/t-gpt.vdi' : ['RW 0 FLAT "$(disk)" 0',],
+ },
+ },
+ {
+ 'action' : '1 partition',
+ 'options' : ['--property', 'Partitions=1'],
+ 'data-crc' : {'6.1/storage/t-mbr.vdi' : 2681429243,
+ '6.1/storage/t-gpt.vdi' : 1391394051,
+ },
+ 'createType' : 'partitionedDevice',
+ 'extents' : { '6.1/storage/t-mbr.vdi' :
+ ['RW 2048 FLAT "vmdktest-pt.vmdk" 0',
+ 'RW 20480 FLAT "$(disk)" 2048',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 2048',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 4096',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 6144',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 8192',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 10240',
+ 'RW 4036608 ZERO',
+ 'RW 36028797014771712 ZERO',
+ ],
+ '6.1/storage/t-gpt.vdi' :
+ ['RW 1 FLAT "vmdktest-pt.vmdk" 0',
+ 'RW 2047 FLAT "vmdktest-pt.vmdk" 1',
+ 'RW 20480 FLAT "$(disk)" 2048',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 4026368 ZERO',
+ 'RW 36028797014771712 ZERO',
+ ],
+ },
+ },
+ {
+ 'action' : '2 partitions',
+ 'options' : ['--property', 'Partitions=1,$(4)'],
+ 'data-crc' : {'6.1/storage/t-mbr.vdi' : 2681429243,
+ '6.1/storage/t-gpt.vdi' : 1391394051,
+ },
+ 'createType' : 'partitionedDevice',
+ 'extents' : { '6.1/storage/t-mbr.vdi' :
+ ['RW 2048 FLAT "vmdktest-pt.vmdk" 0',
+ 'RW 20480 FLAT "$(disk)" 2048',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 2048',
+ 'RW 20480 FLAT "$(disk)" 65536',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 4096',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 6144',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 8192',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 10240',
+ 'RW 4036608 ZERO',
+ 'RW 36028797014771712 ZERO',
+ ],
+ '6.1/storage/t-gpt.vdi' :
+ ['RW 1 FLAT "vmdktest-pt.vmdk" 0',
+ 'RW 2047 FLAT "vmdktest-pt.vmdk" 1',
+ 'RW 20480 FLAT "$(disk)" 2048',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 FLAT "$(disk)" 63488',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 4026368 ZERO',
+ 'RW 36028797014771712 ZERO',
+ ],
+ },
+ },
+ {
+ 'action' : '1 partition with boot sector',
+ 'options' : ['--property', 'Partitions=1',
+ '--property-file', 'BootSector=$(bootsector)'],
+ 'data-crc' : {'6.1/storage/t-mbr.vdi' : 3980784439,
+ '6.1/storage/t-gpt.vdi' : 1152317131,
+ },
+ 'createType' : 'partitionedDevice',
+ 'extents' : { '6.1/storage/t-mbr.vdi' :
+ ['RW 2048 FLAT "vmdktest-pt.vmdk" 0',
+ 'RW 20480 FLAT "$(disk)" 2048',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 2048',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 4096',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 6144',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 8192',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 10240',
+ 'RW 4036608 ZERO',
+ 'RW 36028797014771712 ZERO',
+ ],
+ '6.1/storage/t-gpt.vdi' :
+ ['RW 1 FLAT "vmdktest-pt.vmdk" 0',
+ 'RW 2047 FLAT "vmdktest-pt.vmdk" 1',
+ 'RW 20480 FLAT "$(disk)" 2048',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 4026368 ZERO',
+ 'RW 36028797014771712 ZERO',
+ ],
+ },
+ },
+ {
+ 'action' : '2 partitions with boot sector',
+ 'options' : ['--property', 'Partitions=1,$(4)',
+ '--property-file', 'BootSector=$(bootsector)'],
+ 'data-crc' : {'6.1/storage/t-mbr.vdi' : 3980784439,
+ '6.1/storage/t-gpt.vdi' : 1152317131,
+ },
+ 'createType' : 'partitionedDevice',
+ 'extents' : { '6.1/storage/t-mbr.vdi' :
+ ['RW 2048 FLAT "vmdktest-pt.vmdk" 0',
+ 'RW 20480 FLAT "$(disk)" 2048',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 2048',
+ 'RW 20480 FLAT "$(disk)" 65536',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 4096',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 6144',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 8192',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 10240',
+ 'RW 4036608 ZERO',
+ 'RW 36028797014771712 ZERO',
+ ],
+ '6.1/storage/t-gpt.vdi' :
+ ['RW 1 FLAT "vmdktest-pt.vmdk" 0',
+ 'RW 2047 FLAT "vmdktest-pt.vmdk" 1',
+ 'RW 20480 FLAT "$(disk)" 2048',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 FLAT "$(disk)" 63488',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 4026368 ZERO',
+ 'RW 36028797014771712 ZERO',
+ ],
+ },
+ },
+ {
+ 'action' : '1 partition with relative names',
+ 'options' : ['--property', 'Partitions=1', '--property', 'Relative=1'],
+ 'data-crc' : {'6.1/storage/t-mbr.vdi' : 2681429243,
+ '6.1/storage/t-gpt.vdi' : 1391394051,
+ },
+ 'createType' : 'partitionedDevice',
+ 'extents' : { '6.1/storage/t-mbr.vdi' :
+ ['RW 2048 FLAT "vmdktest-pt.vmdk" 0',
+ 'RW 20480 FLAT "$(part)1" 0',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 2048',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 4096',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 6144',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 8192',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 10240',
+ 'RW 4036608 ZERO',
+ 'RW 36028797014771712 ZERO',
+ ],
+ '6.1/storage/t-gpt.vdi' :
+ ['RW 1 FLAT "vmdktest-pt.vmdk" 0',
+ 'RW 2047 FLAT "vmdktest-pt.vmdk" 1',
+ 'RW 20480 FLAT "$(part)1" 0',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 4026368 ZERO',
+ 'RW 36028797014771712 ZERO',
+ ],
+ },
+ },
+ {
+ 'action' : '2 partitions with relative names',
+ 'options' : ['--property', 'Partitions=1,$(4)', '--property', 'Relative=1'],
+ 'data-crc' : {'6.1/storage/t-mbr.vdi' : 2681429243,
+ '6.1/storage/t-gpt.vdi' : 1391394051,
+ },
+ 'createType' : 'partitionedDevice',
+ 'extents' : { '6.1/storage/t-mbr.vdi' :
+ ['RW 2048 FLAT "vmdktest-pt.vmdk" 0',
+ 'RW 20480 FLAT "$(part)1" 0',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 2048',
+ 'RW 20480 FLAT "$(part)$(4)" 0',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 4096',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 6144',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 8192',
+ 'RW 20480 ZERO',
+ 'RW 2048 FLAT "vmdktest-pt.vmdk" 10240',
+ 'RW 4036608 ZERO',
+ 'RW 36028797014771712 ZERO',
+ ],
+ '6.1/storage/t-gpt.vdi' :
+ ['RW 1 FLAT "vmdktest-pt.vmdk" 0',
+ 'RW 2047 FLAT "vmdktest-pt.vmdk" 1',
+ 'RW 20480 FLAT "$(part)1" 0',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 FLAT "$(part)$(4)" 0',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 20480 ZERO',
+ 'RW 4026368 ZERO',
+ 'RW 36028797014771712 ZERO',
+ ],
+ },
+ },
+ ];
+
+
+ def _findFile(self, sRegExp, asTestBuildDirs):
+ """
+ Returns a filepath based on the given regex and paths to look into
+ or None if no matching file is found.
+ """
+ oRegExp = re.compile(sRegExp);
+ for sTestBuildDir in asTestBuildDirs:
+ try:
+ #return most recent file if there are several ones matching the pattern
+ asFiles = [s for s in os.listdir(sTestBuildDir)
+ if os.path.isfile(os.path.join(sTestBuildDir, s))];
+ asFiles = (s for s in asFiles
+ if oRegExp.match(os.path.basename(s))
+ and os.path.exists(sTestBuildDir + '/' + s));
+ asFiles = sorted(asFiles, reverse = True,
+ key = lambda s, sTstBuildDir = sTestBuildDir: os.path.getmtime(os.path.join(sTstBuildDir, s)));
+ if asFiles:
+ return sTestBuildDir + '/' + asFiles[0];
+ except:
+ pass;
+ reporter.error('Failed to find a file matching "%s" in %s.' % (sRegExp, ','.join(asTestBuildDirs)));
+ return None;
+
+ def _waitAdditionsIsRunning(self, oGuest, fWaitTrayControl):
+ """
+ Check is the additions running
+ """
+ cAttempt = 0;
+ fRc = False;
+ while cAttempt < 30:
+ fRc = oGuest.additionsRunLevel in [vboxcon.AdditionsRunLevelType_Userland,
+ vboxcon.AdditionsRunLevelType_Desktop];
+ if fRc:
+ eServiceStatus, _ = oGuest.getFacilityStatus(vboxcon.AdditionsFacilityType_VBoxService);
+ fRc = eServiceStatus == vboxcon.AdditionsFacilityStatus_Active;
+ if fRc and not fWaitTrayControl:
+ break;
+ if fRc:
+ eServiceStatus, _ = oGuest.getFacilityStatus(vboxcon.AdditionsFacilityType_VBoxTrayClient);
+ fRc = eServiceStatus == vboxcon.AdditionsFacilityStatus_Active;
+ if fRc:
+ break;
+ self.oTstDrv.sleep(10);
+ cAttempt += 1;
+ return fRc;
+
+ def createSession(self, oSession, sName, sUser, sPassword, cMsTimeout = 10 * 1000, fIsError = True):
+ """
+ Creates (opens) a guest session.
+ Returns (True, IGuestSession) on success or (False, None) on failure.
+ """
+ oGuest = oSession.o.console.guest;
+ if sName is None:
+ sName = "<untitled>";
+ reporter.log('Creating session "%s" ...' % (sName,));
+ try:
+ oGuestSession = oGuest.createSession(sUser, sPassword, '', sName);
+ except:
+ # Just log, don't assume an error here (will be done in the main loop then).
+ reporter.maybeErrXcpt(fIsError, 'Creating a guest session "%s" failed; sUser="%s", pw="%s"'
+ % (sName, sUser, sPassword));
+ return (False, None);
+ reporter.log('Waiting for session "%s" to start within %dms...' % (sName, cMsTimeout));
+ aeWaitFor = [ vboxcon.GuestSessionWaitForFlag_Start, ];
+ try:
+ waitResult = oGuestSession.waitForArray(aeWaitFor, cMsTimeout);
+ #
+ # Be nice to Guest Additions < 4.3: They don't support session handling and
+ # therefore return WaitFlagNotSupported.
+ #
+ if waitResult not in (vboxcon.GuestSessionWaitResult_Start, vboxcon.GuestSessionWaitResult_WaitFlagNotSupported):
+ # Just log, don't assume an error here (will be done in the main loop then).
+ reporter.maybeErr(fIsError, 'Session did not start successfully, returned wait result: %d' % (waitResult,));
+ return (False, None);
+ reporter.log('Session "%s" successfully started' % (sName,));
+ except:
+ # Just log, don't assume an error here (will be done in the main loop then).
+ reporter.maybeErrXcpt(fIsError, 'Waiting for guest session "%s" (usr=%s;pw=%s) to start failed:'
+ % (sName, sUser, sPassword,));
+ return (False, None);
+ return (True, oGuestSession);
+
+ def closeSession(self, oGuestSession, fIsError = True):
+ """
+ Closes the guest session.
+ """
+ if oGuestSession is not None:
+ try:
+ sName = oGuestSession.name;
+ except:
+ return reporter.errorXcpt();
+ reporter.log('Closing session "%s" ...' % (sName,));
+ try:
+ oGuestSession.close();
+ oGuestSession = None;
+ except:
+ # Just log, don't assume an error here (will be done in the main loop then).
+ reporter.maybeErrXcpt(fIsError, 'Closing guest session "%s" failed:' % (sName,));
+ return False;
+ return True;
+
+ def guestProcessExecute(self, oGuestSession, sTestName, cMsTimeout, sExecName, asArgs = (),
+ fGetStdOut = True, fIsError = True):
+ """
+ Helper function to execute a program on a guest, specified in the current test.
+ Returns (True, ProcessStatus, ProcessExitCode, ProcessStdOutBuffer) on success or (False, 0, 0, None) on failure.
+ """
+ _ = sTestName;
+ fRc = True; # Be optimistic.
+ reporter.log2('Using session user=%s, name=%s, timeout=%d'
+ % (oGuestSession.user, oGuestSession.name, oGuestSession.timeout,));
+ #
+ # Start the process:
+ #
+ reporter.log2('Executing sCmd=%s, timeoutMS=%d, asArgs=%s'
+ % (sExecName, cMsTimeout, asArgs, ));
+ fTaskFlags = [];
+ if fGetStdOut:
+ fTaskFlags = [vboxcon.ProcessCreateFlag_WaitForStdOut,
+ vboxcon.ProcessCreateFlag_WaitForStdErr];
+ try:
+ oProcess = oGuestSession.processCreate(sExecName,
+ asArgs if self.oTstDrv.fpApiVer >= 5.0 else asArgs[1:],
+ [], fTaskFlags, cMsTimeout);
+ except:
+ reporter.maybeErrXcpt(fIsError, 'asArgs=%s' % (asArgs,));
+ return (False, 0, 0, None);
+ if oProcess is None:
+ return (reporter.error('oProcess is None! (%s)' % (asArgs,)), 0, 0, None);
+ #time.sleep(5); # try this if you want to see races here.
+ # Wait for the process to start properly:
+ reporter.log2('Process start requested, waiting for start (%dms) ...' % (cMsTimeout,));
+ iPid = -1;
+ aeWaitFor = [ vboxcon.ProcessWaitForFlag_Start, ];
+ aBuf = None;
+ try:
+ eWaitResult = oProcess.waitForArray(aeWaitFor, cMsTimeout);
+ except:
+ reporter.maybeErrXcpt(fIsError, 'waitforArray failed for asArgs=%s' % (asArgs,));
+ fRc = False;
+ else:
+ try:
+ eStatus = oProcess.status;
+ iPid = oProcess.PID;
+ except:
+ fRc = reporter.errorXcpt('asArgs=%s' % (asArgs,));
+ else:
+ reporter.log2('Wait result returned: %d, current process status is: %d' % (eWaitResult, eStatus,));
+ #
+ # Wait for the process to run to completion if necessary.
+ #
+ # Note! The above eWaitResult return value can be ignored as it will
+ # (mostly) reflect the process status anyway.
+ #
+ if eStatus == vboxcon.ProcessStatus_Started:
+ # What to wait for:
+ aeWaitFor = [ vboxcon.ProcessWaitForFlag_Terminate,
+ vboxcon.ProcessWaitForFlag_StdOut,
+ vboxcon.ProcessWaitForFlag_StdErr];
+ reporter.log2('Process (PID %d) started, waiting for termination (%dms), aeWaitFor=%s ...'
+ % (iPid, cMsTimeout, aeWaitFor));
+ acbFdOut = [0,0,0];
+ while True:
+ try:
+ eWaitResult = oProcess.waitForArray(aeWaitFor, cMsTimeout);
+ except KeyboardInterrupt: # Not sure how helpful this is, but whatever.
+ reporter.error('Process (PID %d) execution interrupted' % (iPid,));
+ try: oProcess.close();
+ except: pass;
+ break;
+ except:
+ fRc = reporter.errorXcpt('asArgs=%s' % (asArgs,));
+ break;
+ reporter.log2('Wait returned: %d' % (eWaitResult,));
+ # Process output:
+ for eFdResult, iFd, sFdNm in [ (vboxcon.ProcessWaitResult_StdOut, 1, 'stdout'),
+ (vboxcon.ProcessWaitResult_StdErr, 2, 'stderr'), ]:
+ if eWaitResult in (eFdResult, vboxcon.ProcessWaitResult_WaitFlagNotSupported):
+ reporter.log2('Reading %s ...' % (sFdNm,));
+ try:
+ abBuf = oProcess.read(iFd, 64 * 1024, cMsTimeout);
+ except KeyboardInterrupt: # Not sure how helpful this is, but whatever.
+ reporter.error('Process (PID %d) execution interrupted' % (iPid,));
+ try: oProcess.close();
+ except: pass;
+ except:
+ pass; ## @todo test for timeouts and fail on anything else!
+ else:
+ if abBuf:
+ reporter.log2('Process (PID %d) got %d bytes of %s data' % (iPid, len(abBuf), sFdNm,));
+ acbFdOut[iFd] += len(abBuf);
+ ## @todo Figure out how to uniform + append!
+ sBuf = '';
+ if sys.version_info >= (2, 7) and isinstance(abBuf, memoryview):
+ abBuf = abBuf.tobytes();
+ sBuf = abBuf.decode("utf-8");
+ else:
+ sBuf = str(abBuf);
+ if aBuf:
+ aBuf += sBuf;
+ else:
+ aBuf = sBuf;
+ ## Process input (todo):
+ #if eWaitResult in (vboxcon.ProcessWaitResult_StdIn, vboxcon.ProcessWaitResult_WaitFlagNotSupported):
+ # reporter.log2('Process (PID %d) needs stdin data' % (iPid,));
+ # Termination or error?
+ if eWaitResult in (vboxcon.ProcessWaitResult_Terminate,
+ vboxcon.ProcessWaitResult_Error,
+ vboxcon.ProcessWaitResult_Timeout,):
+ try: eStatus = oProcess.status;
+ except: fRc = reporter.errorXcpt('asArgs=%s' % (asArgs,));
+ reporter.log2('Process (PID %d) reported terminate/error/timeout: %d, status: %d'
+ % (iPid, eWaitResult, eStatus,));
+ break;
+ # End of the wait loop.
+ _, cbStdOut, cbStdErr = acbFdOut;
+ try: eStatus = oProcess.status;
+ except: fRc = reporter.errorXcpt('asArgs=%s' % (asArgs,));
+ reporter.log2('Final process status (PID %d) is: %d' % (iPid, eStatus));
+ reporter.log2('Process (PID %d) %d stdout, %d stderr' % (iPid, cbStdOut, cbStdErr));
+ #
+ # Get the final status and exit code of the process.
+ #
+ try:
+ uExitStatus = oProcess.status;
+ iExitCode = oProcess.exitCode;
+ except:
+ fRc = reporter.errorXcpt('asArgs=%s' % (asArgs,));
+ reporter.log2('Process (PID %d) has exit code: %d; status: %d ' % (iPid, iExitCode, uExitStatus));
+ return (fRc, uExitStatus, iExitCode, aBuf);
+
+ def uploadString(self, oGuestSession, sSrcString, sDst):
+ """
+ Upload the string into guest.
+ """
+ fRc = True;
+ try:
+ oFile = oGuestSession.fileOpenEx(sDst, vboxcon.FileAccessMode_ReadWrite, vboxcon.FileOpenAction_CreateOrReplace,
+ vboxcon.FileSharingMode_All, 0, []);
+ except:
+ fRc = reporter.errorXcpt('Upload string failed. Could not create and open the file %s' % sDst);
+ else:
+ try:
+ oFile.write(bytearray(sSrcString), 60*1000);
+ except:
+ fRc = reporter.errorXcpt('Upload string failed. Could not write the string into the file %s' % sDst);
+ try:
+ oFile.close();
+ except:
+ fRc = reporter.errorXcpt('Upload string failed. Could not close the file %s' % sDst);
+ return fRc;
+
+ def uploadFile(self, oGuestSession, sSrc, sDst):
+ """
+ Upload the string into guest.
+ """
+ fRc = True;
+ try:
+ if self.oTstDrv.fpApiVer >= 5.0:
+ oCurProgress = oGuestSession.fileCopyToGuest(sSrc, sDst, [0]);
+ else:
+ oCurProgress = oGuestSession.copyTo(sSrc, sDst, [0]);
+ except:
+ reporter.maybeErrXcpt(True, 'Upload file exception for sSrc="%s":'
+ % (self.sGuestAdditionsIso,));
+ fRc = False;
+ else:
+ if oCurProgress is not None:
+ oWrapperProgress = vboxwrappers.ProgressWrapper(oCurProgress, self.oTstDrv.oVBoxMgr, self.oTstDrv, "uploadFile");
+ oWrapperProgress.wait();
+ if not oWrapperProgress.isSuccess():
+ oWrapperProgress.logResult(fIgnoreErrors = False);
+ fRc = False;
+ else:
+ fRc = reporter.error('No progress object returned');
+ return fRc;
+
+ def downloadFile(self, oGuestSession, sSrc, sDst, fIgnoreErrors = False):
+ """
+ Get a file (sSrc) from the guest storing it on the host (sDst).
+ """
+ fRc = True;
+ try:
+ if self.oTstDrv.fpApiVer >= 5.0:
+ oCurProgress = oGuestSession.fileCopyFromGuest(sSrc, sDst, [0]);
+ else:
+ oCurProgress = oGuestSession.copyFrom(sSrc, sDst, [0]);
+ except:
+ if not fIgnoreErrors:
+ reporter.errorXcpt('Download file exception for sSrc="%s":' % (sSrc,));
+ else:
+ reporter.log('warning: Download file exception for sSrc="%s":' % (sSrc,));
+ fRc = False;
+ else:
+ if oCurProgress is not None:
+ oWrapperProgress = vboxwrappers.ProgressWrapper(oCurProgress, self.oTstDrv.oVBoxMgr,
+ self.oTstDrv, "downloadFile");
+ oWrapperProgress.wait();
+ if not oWrapperProgress.isSuccess():
+ oWrapperProgress.logResult(fIgnoreErrors);
+ fRc = False;
+ else:
+ if not fIgnoreErrors:
+ reporter.error('No progress object returned');
+ else:
+ reporter.log('warning: No progress object returned');
+ fRc = False;
+ return fRc;
+
+ def downloadFiles(self, oGuestSession, asFiles, fIgnoreErrors = False):
+ """
+ Convenience function to get files from the guest and stores it
+ into the scratch directory for later (manual) review.
+ Returns True on success.
+ Returns False on failure, logged.
+ """
+ fRc = True;
+ for sGstFile in asFiles:
+ ## @todo r=bird: You need to use the guest specific path functions here.
+ ## Best would be to add basenameEx to common/pathutils.py. See how joinEx
+ ## is used by BaseTestVm::pathJoin and such.
+ sTmpFile = os.path.join(self.oTstDrv.sScratchPath, 'tmp-' + os.path.basename(sGstFile));
+ reporter.log2('Downloading file "%s" to "%s" ...' % (sGstFile, sTmpFile));
+ # First try to remove (unlink) an existing temporary file, as we don't truncate the file.
+ try: os.unlink(sTmpFile);
+ except: pass;
+ ## @todo Check for already existing files on the host and create a new
+ # name for the current file to download.
+ fRc = self.downloadFile(oGuestSession, sGstFile, sTmpFile, fIgnoreErrors);
+ if fRc:
+ reporter.addLogFile(sTmpFile, 'misc/other', 'guest - ' + sGstFile);
+ else:
+ if fIgnoreErrors is not True:
+ reporter.error('error downloading file "%s" to "%s"' % (sGstFile, sTmpFile));
+ return fRc;
+ reporter.log('warning: file "%s" was not downloaded, ignoring.' % (sGstFile,));
+ return True;
+
+ def _checkVmIsReady(self, oGuestSession):
+ (fRc, _, _, _) = self.guestProcessExecute(oGuestSession, 'Start a guest process',
+ 30 * 1000, '/sbin/ifconfig',
+ ['ifconfig',],
+ False, False);
+ return fRc;
+
+ def waitVmIsReady(self, oSession, fWaitTrayControl):
+ """
+ Waits the VM is ready after start or reboot.
+ Returns result (true or false) and guest session obtained
+ """
+ _ = fWaitTrayControl;
+ # Give the VM a time to reboot
+ self.oTstDrv.sleep(30);
+ # Waiting the VM is ready.
+ # To do it, one will try to open the guest session and start the guest process in loop
+ if not self._waitAdditionsIsRunning(oSession.o.console.guest, False):
+ return (False, None);
+ cAttempt = 0;
+ oGuestSession = None;
+ fRc = False;
+ while cAttempt < 30:
+ fRc, oGuestSession = self.createSession(oSession, 'Session for user: vbox',
+ 'vbox', 'password', 10 * 1000, False);
+ if fRc:
+ fRc = self._checkVmIsReady(oGuestSession);
+ if fRc:
+ break;
+ self.closeSession(oGuestSession, False);
+ self.oTstDrv.sleep(10);
+ cAttempt += 1;
+ return (fRc, oGuestSession);
+
+ def _rebootVM(self, oGuestSession):
+ (fRc, _, _, _) = self.guestProcessExecute(oGuestSession, 'Reboot the VM',
+ 30 * 1000, '/usr/bin/sudo',
+ ['sudo', 'reboot'],
+ False, True);
+ if not fRc:
+ reporter.error('Calling the reboot utility failed');
+ return fRc;
+
+ def rebootVMAndCheckReady(self, oSession, oGuestSession):
+ """
+ Reboot the VM and wait the VM is ready.
+ Returns result and guest session obtained after reboot
+ """
+ reporter.testStart('Reboot VM and wait for readiness');
+ fRc = self._rebootVM(oGuestSession);
+ fRc = self.closeSession(oGuestSession, True) and fRc and True; # pychecker hack.
+ if fRc:
+ (fRc, oGuestSession) = self.waitVmIsReady(oSession, False);
+ if not fRc:
+ reporter.error('VM is not ready after reboot');
+ reporter.testDone();
+ return (fRc, oGuestSession);
+
+ def _powerDownVM(self, oGuestSession):
+ (fRc, _, _, _) = self.guestProcessExecute(oGuestSession, 'Power down the VM',
+ 30 * 1000, '/usr/bin/sudo',
+ ['sudo', 'poweroff'],
+ False, True);
+ if not fRc:
+ reporter.error('Calling the poweroff utility failed');
+ return fRc;
+
+ def powerDownVM(self, oGuestSession):
+ """
+ Power down the VM by calling guest process without wating
+ the VM is really powered off. Also, closes the guest session.
+ It helps the terminateBySession to stop the VM without aborting.
+ """
+ if oGuestSession is None:
+ return False;
+ reporter.testStart('Power down the VM');
+ fRc = self._powerDownVM(oGuestSession);
+ fRc = self.closeSession(oGuestSession, True) and fRc and True; # pychecker hack.
+ if not fRc:
+ reporter.error('Power down the VM failed');
+ reporter.testDone();
+ return fRc;
+
+ def installAdditions(self, oSession, oGuestSession, oVM):
+ """
+ Installs the Windows guest additions using the test execution service.
+ """
+ _ = oSession;
+ _ = oGuestSession;
+ _ = oVM;
+ reporter.error('Not implemented');
+ return False;
+
+ def installVirtualBox(self, oGuestSession):
+ """
+ Install VirtualBox in the guest.
+ """
+ _ = oGuestSession;
+ reporter.error('Not implemented');
+ return False;
+
+ def getResourceSet(self):
+ asRet = [];
+ if not os.path.isabs(self.sHdd):
+ asRet.append(self.sHdd);
+ return asRet;
+
+ def _createVmDoIt(self, oTestDrv, eNic0AttachType, sDvdImage):
+ """
+ Creates the VM.
+ Returns Wrapped VM object on success, None on failure.
+ """
+ _ = eNic0AttachType;
+ _ = sDvdImage;
+ return oTestDrv.createTestVM(self.sVmName, self.iGroup, self.sHdd, sKind = self.sKind, \
+ fIoApic = True, eNic0AttachType = vboxcon.NetworkAttachmentType_NAT, \
+ eNic0Type = self.eNic0Type, cMbRam = self.cMbRam, \
+ sHddControllerType = "SATA Controller", fPae = self.fPae, \
+ cCpus = self.cCpus, sDvdImage = self.sGuestAdditionsIso);
+
+ def _createVmPost(self, oTestDrv, oVM, eNic0AttachType, sDvdImage):
+ _ = eNic0AttachType;
+ _ = sDvdImage;
+ fRc = True;
+ oSession = oTestDrv.openSession(oVM);
+ if oSession is not None:
+ fRc = fRc and oSession.enableVirtEx(True);
+ # nested paging doesn't need for the test
+ #fRc = fRc and oSession.enableNestedPaging(True);
+ #fRc = fRc and oSession.enableNestedHwVirt(True);
+ # disable 3D until the error is fixed.
+ fRc = fRc and oSession.setAccelerate3DEnabled(False);
+ fRc = fRc and oSession.setVRamSize(256);
+ fRc = fRc and oSession.setVideoControllerType(vboxcon.GraphicsControllerType_VBoxSVGA);
+ fRc = fRc and oSession.enableUsbOhci(True);
+ fRc = fRc and oSession.enableUsbHid(True);
+ fRc = fRc and oSession.saveSettings();
+ fRc = oSession.close() and fRc and True; # pychecker hack.
+ oSession = None;
+ else:
+ fRc = False;
+ return oVM if fRc else None;
+
+ def getReconfiguredVm(self, oTestDrv, cCpus, sVirtMode, sParavirtMode = None):
+ #
+ # Current test uses precofigured VMs. This override disables any changes in the machine.
+ #
+ _ = cCpus;
+ _ = sVirtMode;
+ _ = sParavirtMode;
+ oVM = oTestDrv.getVmByName(self.sVmName);
+ if oVM is None:
+ return (False, None);
+ return (True, oVM);
+
+ def reattachHdd(self, oVM, sHdd, asHdds):
+ """
+ Attach required hdd and remove all others from asHdds list.
+ """
+ reporter.testStart("Reattach hdd");
+ oSession = self.oTstDrv.openSession(oVM);
+ fRc = False;
+ if oSession is not None:
+ # for simplicity and because we are using VMs having "SATA controller"
+ # we will add the hdds to only "SATA controller"
+ iPortNew = 0;
+ fFound = False;
+ try:
+ aoAttachments = self.oTstDrv.oVBox.oVBoxMgr.getArray(oVM, 'mediumAttachments');
+ except:
+ fRc = reporter.errorXcpt();
+ else:
+ for oAtt in aoAttachments:
+ try:
+ sCtrl = oAtt.controller
+ iPort = oAtt.port;
+ iDev = oAtt.device;
+ eType = oAtt.type;
+ except:
+ fRc = reporter.errorXcpt();
+ break;
+
+ fDetached = False;
+ if eType == vboxcon.DeviceType_HardDisk:
+ oMedium = oVM.getMedium(sCtrl, iPort, iDev);
+ if oMedium.location.endswith(sHdd):
+ fRc = True;
+ fFound = True;
+ break;
+ for sHddVar in asHdds:
+ if oMedium.location.endswith(sHddVar) \
+ or oMedium.parent is not None and oMedium.parent.location.endswith(sHddVar) :
+ (fRc, oOldHd) = oSession.detachHd(sCtrl, iPort, iDev);
+ if fRc and oOldHd is not None:
+ fRc = oSession.saveSettings();
+ if oMedium.parent is not None:
+ fRc = fRc and self.oTstDrv.oVBox.deleteHdByMedium(oOldHd);
+ else:
+ fRc = fRc and oOldHd.close();
+ fRc = fRc and oSession.saveSettings();
+ fDetached = True;
+ if not fDetached and sCtrl == 'SATA Controller' and iPort + 1 > iPortNew:
+ iPortNew = iPort + 1;
+ if not fFound:
+ fRc = oSession.attachHd(sHdd, 'SATA Controller', iPortNew, 0);
+ if fRc:
+ fRc = oSession.saveSettings();
+ else:
+ oSession.discadSettings();
+ fRc = oSession.close() and fRc and True; # pychecker hack
+ else:
+ reporter.error("Open session for '%s' failed" % self.sVmName);
+ fRc = False;
+ reporter.testDone();
+ return fRc;
+
+ def _callVBoxManage(self, oGuestSession, sTestName, cMsTimeout, asArgs = (),
+ fGetStdOut = True, fIsError = True):
+ return self.guestProcessExecute(oGuestSession, sTestName,
+ cMsTimeout, '/usr/bin/sudo',
+ ['/usr/bin/sudo', '/opt/VirtualBox/VBoxManage'] + asArgs, fGetStdOut, fIsError);
+
+ def listHostDrives(self, oGuestSession, sHdd):
+ """
+ Define path of the specified drive using 'VBoxManage list hostdrives'.
+ """
+ reporter.testStart("List host drives");
+ sDrive = None;
+ (fRc, _, _, aBuf) = self._callVBoxManage(oGuestSession, 'List host drives', 60 * 1000,
+ ['list', 'hostdrives'], True, True);
+ if not fRc:
+ reporter.error('List host drives in the VM %s failed' % (self.sVmName, ));
+ else:
+ if aBuf is None:
+ fRc = reporter.error('"List host drives" output is empty for the VM %s' % (self.sVmName, ));
+ else:
+ asHddData = self.asHdds[sHdd];
+
+ try: aBuf = str(aBuf); # pylint: disable=redefined-variable-type
+ except: pass;
+ asLines = aBuf.splitlines();
+ oRegExp = re.compile(r'^\s*([^:]+)\s*:\s*(.+)\s*$');
+
+ # pylint: disable=no-init
+ class ParseState(object):
+ kiNothing = 0;
+ kiDrive = 1;
+ kiPartition = 2;
+
+ iParseState = ParseState.kiNothing;
+ asKeysNotFound = asHddData['Header'].keys();
+ idxPartition = 0;
+ for sLine in asLines:
+ if not sLine or sLine.startswith('#') or sLine.startswith("\n"):
+ continue;
+ oMatch = oRegExp.match(sLine);
+ if oMatch is not None:
+ sKey = oMatch.group(1);
+ sValue = oMatch.group(2);
+ if sKey is not None and sKey == 'Drive':
+ # we found required disk if we found all required disk info and partitions
+ if sDrive and not asKeysNotFound and idxPartition >= len(asHddData['Partitions']['Partitions']):
+ break;
+ sDrive = sValue;
+ iParseState = ParseState.kiDrive;
+ asKeysNotFound = asKeysNotFound = asHddData['Header'].keys();
+ idxPartition = 0;
+ continue;
+ if iParseState == ParseState.kiDrive:
+ if sLine.strip().startswith('Partitions:'):
+ iParseState = ParseState.kiPartition;
+ continue;
+ if oMatch is None or sKey is None:
+ continue;
+ if sKey in asHddData['Header'].keys() and asHddData['Header'][sKey] == sValue:
+ asKeysNotFound.remove(sKey);
+ continue;
+ if iParseState == ParseState.kiPartition:
+ if idxPartition < len(asHddData['Partitions']['Partitions']):
+ sPart = asHddData['Partitions']['Partitions'][idxPartition];
+ sPart = sPart.replace('$(' + str(idxPartition + 1) + ')',
+ str(asHddData['Partitions']['PartitionNumbers'][idxPartition]));
+ if sLine.strip() == sPart:
+ idxPartition += 1;
+ continue;
+ fRc = sDrive and not asKeysNotFound and idxPartition >= len(asHddData['Partitions']['Partitions']);
+ if fRc:
+ reporter.log("Path to the drive '%s' in the VM '%s': %s " % (sHdd, self.sVmName, sDrive));
+ else:
+ reporter.error("Path to drive '%s' not found in the VM '%s'" % (sHdd, self.sVmName));
+ reporter.testDone();
+ return (fRc, sDrive);
+
+ def convertDiskToPartitionPrefix(self, sDisk):
+ return sDisk;
+
+ def checkVMDKDescriptor(self, asDescriptor, sHdd, sRawDrive, asAction):
+ """
+ Check VMDK descriptor of the disk created
+ """
+ if asDescriptor is None \
+ or asDescriptor[0] != '# Disk DescriptorFile' \
+ and asDescriptor[0] != '# Disk Descriptor File' \
+ and asDescriptor[0] != '#Disk Descriptor File' \
+ and asDescriptor[0] != '#Disk DescriptorFile':
+ return reporter.error("VMDK descriptor has invalid format");
+
+ # pylint: disable=no-init
+ class DescriptorParseState(object):
+ kiHeader = 1;
+ kiExtent = 2;
+ kiDatabase = 3;
+
+ asHddData = self.asHdds[sHdd];
+ iParseState = DescriptorParseState.kiHeader;
+
+ asHeader = { 'version' : '1',
+ 'CID' : '*',
+ 'parentCID' : 'ffffffff',
+ 'createType' : '$'
+ };
+
+ asDatabase = { 'ddb.virtualHWVersion' : '4',
+ 'ddb.adapterType' : 'ide',
+ 'ddb.uuid.image' : '*',
+ 'ddb.uuid.parent' : '00000000-0000-0000-0000-000000000000',
+ 'ddb.uuid.modification' : '00000000-0000-0000-0000-000000000000',
+ 'ddb.uuid.parentmodification' : '00000000-0000-0000-0000-000000000000'
+ };
+
+ oRegExp = re.compile(r'^\s*([^=]+)\s*=\s*\"*([^\"]+)\"*\s*$');
+ iExtentIdx = 0;
+
+ for sLine in asDescriptor:
+ if not sLine or sLine.startswith('#') or sLine.startswith("\n"):
+ continue;
+
+ if iParseState == DescriptorParseState.kiHeader:
+ if sLine.startswith('ddb.'):
+ return reporter.error("VMDK descriptor has invalid order of sections");
+ if sLine.startswith("RW") \
+ or sLine.startswith("RDONLY") \
+ or sLine.startswith("NOACCESS"):
+ iParseState = DescriptorParseState.kiExtent;
+ else:
+ oMatch = oRegExp.match(sLine);
+ if oMatch is None:
+ return reporter.error("VMDK descriptor contains lines in invalid form");
+ sKey = oMatch.group(1).strip();
+ sValue = oMatch.group(2).strip();
+ if sKey not in asHeader:
+ return reporter.error("VMDK descriptor has invalid format");
+ sDictValue = asHeader[sKey];
+ if sDictValue == '$':
+ sDictValue = asAction[sKey];
+ if sDictValue not in ('*', sValue):
+ return reporter.error("VMDK descriptor has value which was not expected");
+ continue;
+
+ if iParseState == DescriptorParseState.kiExtent:
+ if sLine.startswith('ddb.'):
+ iParseState = DescriptorParseState.kiDatabase;
+ else:
+ if not sLine.startswith("RW") \
+ and not sLine.startswith("RDONLY") \
+ and not sLine.startswith("NOACCESS"):
+ return reporter.error("VMDK descriptor has invalid order of sections");
+ sExtent = asAction['extents'][sHdd][iExtentIdx];
+ sExtent = sExtent.replace('$(disk)', sRawDrive);
+ sExtent = sExtent.replace('$(part)', self.convertDiskToPartitionPrefix(sRawDrive));
+ sExtent = re.sub(r'\$\((\d+)\)',
+ lambda oMatch: str(asHddData['Partitions']['PartitionNumbers'][int(oMatch.group(1)) - 1]),
+ sExtent);
+ if sExtent != sLine.strip():
+ return reporter.error("VMDK descriptor has invalid order of sections");
+ iExtentIdx += 1;
+ continue;
+
+ if iParseState == DescriptorParseState.kiDatabase:
+ if not sLine.startswith('ddb.'):
+ return reporter.error("VMDK descriptor has invalid order of sections");
+ oMatch = oRegExp.match(sLine);
+ if oMatch is None:
+ return reporter.error("VMDK descriptor contains lines in invalid form");
+ sKey = oMatch.group(1).strip();
+ sValue = oMatch.group(2).strip();
+ if sKey not in asDatabase:
+ return reporter.error("VMDK descriptor has invalid format");
+ sDictValue = asDatabase[sKey];
+ if sDictValue not in ('*', sValue):
+ return reporter.error("VMDK descriptor has value which was not expected");
+ continue;
+ return iParseState == DescriptorParseState.kiDatabase;
+
+ def _setPermissionsToVmdkFiles(self, oGuestSession):
+ """
+ Sets 0644 permissions to all files in the self.sVMDKPath allowing reading them by 'vbox' user.
+ """
+ (fRc, _, _, _) = self.guestProcessExecute(oGuestSession,
+ 'Allowing reading of the VMDK content by vbox user',
+ 30 * 1000, '/usr/bin/sudo',
+ ['/usr/bin/sudo', '/bin/chmod', '644',
+ self.sVMDKPath + '/vmdktest.vmdk', self.sVMDKPath + '/vmdktest-pt.vmdk'],
+ False, True);
+ return fRc;
+
+ def createDrives(self, oGuestSession, sHdd, sRawDrive):
+ """
+ Creates VMDK Raw file and check correctness
+ """
+ reporter.testStart("Create VMDK disks");
+ asHddData = self.asHdds[sHdd];
+ fRc = True;
+ try: oGuestSession.directoryCreate(self.sVMDKPath, 0o777, (vboxcon.DirectoryCreateFlag_Parents,));
+ except: fRc = reporter.errorXcpt('Create directory for VMDK files failed in the VM %s' % (self.sVmName));
+ if fRc:
+ sBootSectorGuestPath = self.sVMDKPath + self.sPathDelimiter + 't-bootsector.bin';
+ try: fExists = oGuestSession.fileExists(sBootSectorGuestPath, False);
+ except: fExists = False;
+ if not fExists:
+ sBootSectorPath = self.oTstDrv.getFullResourceName(self.sBootSector);
+ fRc = self.uploadFile(oGuestSession, sBootSectorPath, sBootSectorGuestPath);
+
+ for action in self.asActions:
+ reporter.testStart("Create VMDK disk: %s" % action["action"]);
+ asOptions = action['options'];
+ asOptions = [option.replace('$(bootsector)', sBootSectorGuestPath) for option in asOptions];
+ asOptions = [re.sub(r'\$\((\d+)\)',
+ lambda oMatch: str(asHddData['Partitions']['PartitionNumbers'][int(oMatch.group(1)) - 1]),
+ option)
+ for option in asOptions];
+ (fRc, _, _, _) = self._callVBoxManage(oGuestSession, 'Create VMDK disk', 60 * 1000,
+ ['createmedium', '--filename',
+ self.sVMDKPath + self.sPathDelimiter + 'vmdktest.vmdk',
+ '--format', 'VMDK', '--variant', 'RawDisk',
+ '--property', 'RawDrive=%s' % (sRawDrive,) ] + asOptions,
+ False, True);
+ if not fRc:
+ reporter.error('Create VMDK raw drive variant "%s" failed in the VM %s' % (action["action"], self.sVmName));
+ else:
+ fRc = self._setPermissionsToVmdkFiles(oGuestSession);
+ if not fRc:
+ reporter.error('Setting permissions to VMDK files failed');
+ else:
+ sSrcFile = self.sVMDKPath + self.sPathDelimiter + 'vmdktest.vmdk';
+ sDstFile = os.path.join(self.oTstDrv.sScratchPath, 'guest-vmdktest.vmdk');
+ reporter.log2('Downloading file "%s" to "%s" ...' % (sSrcFile, sDstFile));
+ # First try to remove (unlink) an existing temporary file, as we don't truncate the file.
+ try: os.unlink(sDstFile);
+ except: pass;
+ fRc = self.downloadFile(oGuestSession, sSrcFile, sDstFile, False);
+ if not fRc:
+ reporter.error('Download vmdktest.vmdk from guest to host failed');
+ else:
+ with open(sDstFile) as oFile: # pylint: disable=unspecified-encoding
+ asDescriptor = [row.strip() for row in oFile];
+ if not asDescriptor:
+ fRc = reporter.error('Reading vmdktest.vmdk from guest filed');
+ else:
+ fRc = self.checkVMDKDescriptor(asDescriptor, sHdd, sRawDrive, action);
+ if not fRc:
+ reporter.error('Cheking vmdktest.vmdk from guest filed');
+ elif action['data-crc']:
+ sSrcFile = self.sVMDKPath + self.sPathDelimiter + 'vmdktest-pt.vmdk';
+ sDstFile = os.path.join(self.oTstDrv.sScratchPath, 'guest-vmdktest-pt.vmdk');
+ reporter.log2('Downloading file "%s" to "%s" ...' % (sSrcFile, sDstFile));
+ # First try to remove (unlink) an existing temporary file, as we don't truncate the file.
+ try: os.unlink(sDstFile);
+ except: pass;
+ fRc = self.downloadFile(oGuestSession, sSrcFile, sDstFile, False);
+ if not fRc:
+ reporter.error('Download vmdktest-pt.vmdk from guest to host failed');
+ else:
+ uResCrc32 = utils.calcCrc32OfFile(sDstFile);
+ if uResCrc32 != action['data-crc'][sHdd]:
+ fRc = reporter.error('vmdktest-pt.vmdk does not match what was expected');
+ (fRc1, _, _, _) = self._callVBoxManage(oGuestSession, 'Delete VMDK disk', 60 * 1000,
+ ['closemedium',
+ self.sVMDKPath + self.sPathDelimiter + 'vmdktest.vmdk',
+ '--delete'],
+ False, True);
+ if not fRc1:
+ reporter.error('Delete VMDK raw drive variant "%s" failed in the VM %s' %
+ (action["action"], self.sVmName));
+ fRc = fRc and fRc1;
+ reporter.testDone();
+ if not fRc:
+ break;
+ else:
+ reporter.error('Create %s dir failed in the VM %s' % (self.sVMDKPath, self.sVmName));
+
+ reporter.testDone();
+ return fRc;
+
+
+class tdStorageRawDriveOsLinux(tdStorageRawDriveOs):
+ """
+ Autostart support methods for Linux guests.
+ """
+ # pylint: disable=too-many-arguments
+ def __init__(self, oSet, oTstDrv, sVmName, sKind, sHdd, eNic0Type = None, cMbRam = None, \
+ cCpus = 1, fPae = None, sGuestAdditionsIso = None, sBootSector = None):
+ tdStorageRawDriveOs.__init__(self, oSet, oTstDrv, sVmName, sKind, sHdd, eNic0Type, cMbRam, \
+ cCpus, fPae, sGuestAdditionsIso, sBootSector);
+ self.sVBoxInstaller = '^VirtualBox-.*\\.run$';
+ return;
+
+ def installAdditions(self, oSession, oGuestSession, oVM):
+ """
+ Install guest additions in the guest.
+ """
+ reporter.testStart('Install Guest Additions');
+ fRc = False;
+ # Install Kernel headers, which are required for actually installing the Linux Additions.
+ if oVM.OSTypeId.startswith('Debian') \
+ or oVM.OSTypeId.startswith('Ubuntu'):
+ (fRc, _, _, _) = self.guestProcessExecute(oGuestSession, 'Installing Kernel headers',
+ 5 * 60 *1000, '/usr/bin/apt-get',
+ ['/usr/bin/apt-get', 'install', '-y',
+ 'linux-headers-generic'],
+ False, True);
+ if not fRc:
+ reporter.error('Error installing Kernel headers');
+ else:
+ (fRc, _, _, _) = self.guestProcessExecute(oGuestSession, 'Installing Guest Additions depdendencies',
+ 5 * 60 *1000, '/usr/bin/apt-get',
+ ['/usr/bin/apt-get', 'install', '-y', 'build-essential',
+ 'perl'], False, True);
+ if not fRc:
+ reporter.error('Error installing additional installer dependencies');
+ elif oVM.OSTypeId.startswith('OL') \
+ or oVM.OSTypeId.startswith('Oracle') \
+ or oVM.OSTypeId.startswith('RHEL') \
+ or oVM.OSTypeId.startswith('Redhat') \
+ or oVM.OSTypeId.startswith('Cent'):
+ (fRc, _, _, _) = self.guestProcessExecute(oGuestSession, 'Installing Kernel headers',
+ 5 * 60 *1000, '/usr/bin/yum',
+ ['/usr/bin/yum', '-y', 'install', 'kernel-headers'],
+ False, True);
+ if not fRc:
+ reporter.error('Error installing Kernel headers');
+ else:
+ (fRc, _, _, _) = self.guestProcessExecute(oGuestSession, 'Installing Guest Additions depdendencies',
+ 5 * 60 *1000, '/usr/bin/yum',
+ ['/usr/bin/yum', '-y', 'install', 'make', 'automake', 'gcc',
+ 'kernel-devel', 'dkms', 'bzip2', 'perl'], False, True);
+ if not fRc:
+ reporter.error('Error installing additional installer dependencies');
+ else:
+ reporter.error('Installing Linux Additions for the "%s" is not supported yet' % oVM.OSTypeId);
+ fRc = False;
+ if fRc:
+ #
+ # The actual install.
+ # Also tell the installer to produce the appropriate log files.
+ #
+ (fRc, _, _, _) = self.guestProcessExecute(oGuestSession, 'Installing guest additions',
+ 10 * 60 *1000, '/usr/bin/sudo',
+ ['/usr/bin/sudo', '/bin/sh',
+ '/media/cdrom/VBoxLinuxAdditions.run'],
+ False, True);
+ if fRc:
+ # Due to the GA updates as separate process the above function returns before
+ # the actual installation finished. So just wait until the GA installed
+ fRc = self.closeSession(oGuestSession);
+ if fRc:
+ (fRc, oGuestSession) = self.waitVmIsReady(oSession, False);
+ # Download log files.
+ # Ignore errors as all files above might not be present for whatever reason.
+ #
+ if fRc:
+ asLogFile = [];
+ asLogFile.append('/var/log/vboxadd-install.log');
+ self.downloadFiles(oGuestSession, asLogFile, fIgnoreErrors = True);
+ else:
+ reporter.error('Installing guest additions failed: Error occured during vbox installer execution')
+ if fRc:
+ (fRc, oGuestSession) = self.rebootVMAndCheckReady(oSession, oGuestSession);
+ if not fRc:
+ reporter.error('Reboot after installing GuestAdditions failed');
+ reporter.testDone();
+ return (fRc, oGuestSession);
+
+ def installVirtualBox(self, oGuestSession):
+ """
+ Install VirtualBox in the guest.
+ """
+ reporter.testStart('Install Virtualbox into the guest VM');
+ sTestBuild = self._findFile(self.sVBoxInstaller, self.asTestBuildDirs);
+ reporter.log("Virtualbox install file: %s" % os.path.basename(sTestBuild));
+ fRc = sTestBuild is not None;
+ if fRc:
+ fRc = self.uploadFile(oGuestSession, sTestBuild,
+ '/tmp/' + os.path.basename(sTestBuild));
+ else:
+ reporter.error("VirtualBox install package is not defined");
+
+ if not fRc:
+ reporter.error('Upload the vbox installer into guest VM failed');
+ else:
+ (fRc, _, _, _) = self.guestProcessExecute(oGuestSession,
+ 'Allowing execution for the vbox installer',
+ 30 * 1000, '/usr/bin/sudo',
+ ['/usr/bin/sudo', '/bin/chmod', '755',
+ '/tmp/' + os.path.basename(sTestBuild)],
+ False, True);
+ if not fRc:
+ reporter.error('Allowing execution for the vbox installer failed');
+ if fRc:
+ (fRc, _, _, _) = self.guestProcessExecute(oGuestSession, 'Installing VBox',
+ 240 * 1000, '/usr/bin/sudo',
+ ['/usr/bin/sudo',
+ '/tmp/' + os.path.basename(sTestBuild),],
+ False, True);
+ if not fRc:
+ reporter.error('Installing VBox failed');
+ reporter.testDone();
+ return fRc;
+
+class tdStorageRawDriveOsDarwin(tdStorageRawDriveOs):
+ """
+ Autostart support methods for Darwin guests.
+ """
+ # pylint: disable=too-many-arguments
+ def __init__(self, oSet, oTstDrv, sVmName, sKind, sHdd, eNic0Type = None, cMbRam = None, \
+ cCpus = 1, fPae = None, sGuestAdditionsIso = None, sBootSector = None):
+ tdStorageRawDriveOs.__init__(self, oSet, oTstDrv, sVmName, sKind, sHdd, eNic0Type, cMbRam, \
+ cCpus, fPae, sGuestAdditionsIso, sBootSector);
+ raise base.GenError('Testing the autostart functionality for Darwin is not implemented');
+
+class tdStorageRawDriveOsSolaris(tdStorageRawDriveOs):
+ """
+ Autostart support methods for Solaris guests.
+ """
+ # pylint: disable=too-many-arguments
+ def __init__(self, oSet, oTstDrv, sVmName, sKind, sHdd, eNic0Type = None, cMbRam = None, \
+ cCpus = 1, fPae = None, sGuestAdditionsIso = None, sBootSector = None):
+ tdStorageRawDriveOs.__init__(self, oSet, oTstDrv, sVmName, sKind, sHdd, eNic0Type, cMbRam, \
+ cCpus, fPae, sGuestAdditionsIso, sBootSector);
+ raise base.GenError('Testing the autostart functionality for Solaris is not implemented');
+
+class tdStorageRawDriveOsWin(tdStorageRawDriveOs):
+ """
+ Autostart support methods for Windows guests.
+ """
+ # pylint: disable=too-many-arguments
+ def __init__(self, oSet, oTstDrv, sVmName, sKind, sHdd, eNic0Type = None, cMbRam = None, \
+ cCpus = 1, fPae = None, sGuestAdditionsIso = None, sBootSector = None):
+ tdStorageRawDriveOs.__init__(self, oSet, oTstDrv, sVmName, sKind, sHdd, eNic0Type, cMbRam, \
+ cCpus, fPae, sGuestAdditionsIso, sBootSector);
+ self.sVBoxInstaller = r'^VirtualBox-.*\.(exe|msi)$';
+ self.sVMDKPath=r'C:\Temp\vmdk';
+ self.sPathDelimiter = '\\';
+ self.asHdds['6.1/storage/t-mbr.vdi']['Header']['Model'] = '"VBOX HARDDISK"';
+ self.asHdds['6.1/storage/t-gpt.vdi']['Header']['Model'] = '"VBOX HARDDISK"';
+ self.asHdds['6.1/storage/t-mbr.vdi']['Partitions']['PartitionNumbers'] = [1, 2, 3, 4, 5, 6, 7, 8];
+ return;
+
+ def _checkVmIsReady(self, oGuestSession):
+ (fRc, _, _, _) = self.guestProcessExecute(oGuestSession, 'Start a guest process',
+ 30 * 1000, 'C:\\Windows\\System32\\ipconfig.exe',
+ ['C:\\Windows\\System32\\ipconfig.exe',],
+ False, False);
+ return fRc;
+
+ def _rebootVM(self, oGuestSession):
+ (fRc, _, _, _) = self.guestProcessExecute(oGuestSession, 'Reboot the VM',
+ 30 * 1000, 'C:\\Windows\\System32\\shutdown.exe',
+ ['C:\\Windows\\System32\\shutdown.exe', '/f',
+ '/r', '/t', '0'],
+ False, True);
+ if not fRc:
+ reporter.error('Calling the shutdown utility failed');
+ return fRc;
+
+ def _powerDownVM(self, oGuestSession):
+ (fRc, _, _, _) = self.guestProcessExecute(oGuestSession, 'Power down the VM',
+ 30 * 1000, 'C:\\Windows\\System32\\shutdown.exe',
+ ['C:\\Windows\\System32\\shutdown.exe', '/f',
+ '/s', '/t', '0'],
+ False, True);
+ if not fRc:
+ reporter.error('Calling the shutdown utility failed');
+ return fRc;
+
+ def _callVBoxManage(self, oGuestSession, sTestName, cMsTimeout, asArgs = (),
+ fGetStdOut = True, fIsError = True):
+ return self.guestProcessExecute(oGuestSession, sTestName,
+ cMsTimeout, r'C:\Program Files\Oracle\VirtualBox\VBoxManage.exe',
+ [r'C:\Program Files\Oracle\VirtualBox\VBoxManage.exe',] + asArgs, fGetStdOut, fIsError);
+
+ def _setPermissionsToVmdkFiles(self, oGuestSession):
+ """
+ Sets 0644 permissions to all files in the self.sVMDKPath allowing reading them by 'vbox' user.
+ """
+ _ = oGuestSession;
+ # It is not required in case of Windows
+ return True;
+
+ def installAdditions(self, oSession, oGuestSession, oVM):
+ """
+ Installs the Windows guest additions using the test execution service.
+ """
+ _ = oVM;
+ reporter.testStart('Install Guest Additions');
+ asLogFiles = [];
+ fRc = self.closeSession(oGuestSession, True); # pychecker hack.
+ try:
+ oCurProgress = oSession.o.console.guest.updateGuestAdditions(self.sGuestAdditionsIso, ['/l',], None);
+ except:
+ reporter.maybeErrXcpt(True, 'Updating Guest Additions exception for sSrc="%s":'
+ % (self.sGuestAdditionsIso,));
+ fRc = False;
+ else:
+ if oCurProgress is not None:
+ oWrapperProgress = vboxwrappers.ProgressWrapper(oCurProgress, self.oTstDrv.oVBoxMgr,
+ self.oTstDrv, "installAdditions");
+ oWrapperProgress.wait(cMsTimeout = 10 * 60 * 1000);
+ if not oWrapperProgress.isSuccess():
+ oWrapperProgress.logResult(fIgnoreErrors = False);
+ fRc = False;
+ else:
+ fRc = reporter.error('No progress object returned');
+
+ # Store the result and try download logs anyway.
+ fGaRc = fRc;
+ fRc, oGuestSession = self.createSession(oSession, 'Session for user: vbox',
+ 'vbox', 'password', 10 * 1000, True);
+ if fRc is True:
+ (fRc, oGuestSession) = self.rebootVMAndCheckReady(oSession, oGuestSession);
+ if fRc is True:
+ # Add the Windows Guest Additions installer files to the files we want to download
+ # from the guest.
+ sGuestAddsDir = 'C:/Program Files/Oracle/VirtualBox Guest Additions/';
+ asLogFiles.append(sGuestAddsDir + 'install.log');
+ # Note: There won't be a install_ui.log because of the silent installation.
+ asLogFiles.append(sGuestAddsDir + 'install_drivers.log');
+ # Download log files.
+ # Ignore errors as all files above might not be present (or in different locations)
+ # on different Windows guests.
+ #
+ self.downloadFiles(oGuestSession, asLogFiles, fIgnoreErrors = True);
+ else:
+ reporter.error('Reboot after installing GuestAdditions failed');
+ else:
+ reporter.error('Create session for user vbox after GA updating failed');
+ reporter.testDone();
+ return (fRc and fGaRc, oGuestSession);
+
+ def installVirtualBox(self, oGuestSession):
+ """
+ Install VirtualBox in the guest.
+ """
+ reporter.testStart('Install Virtualbox into the guest VM');
+ # Used windows image already contains the C:\Temp
+ sTestBuild = self._findFile(self.sVBoxInstaller, self.asTestBuildDirs);
+ reporter.log("Virtualbox install file: %s" % os.path.basename(sTestBuild));
+ fRc = sTestBuild is not None;
+ if fRc:
+ fRc = self.uploadFile(oGuestSession, sTestBuild,
+ 'C:\\Temp\\' + os.path.basename(sTestBuild));
+ else:
+ reporter.error("VirtualBox install package is not defined");
+
+ if not fRc:
+ reporter.error('Upload the installing into guest VM failed');
+ else:
+ if sTestBuild.endswith('.msi'):
+ sLogFile = 'C:/Temp/VBoxInstallLog.txt';
+ (fRc, _, _, _) = self.guestProcessExecute(oGuestSession, 'Installing VBox',
+ 600 * 1000, 'C:\\Windows\\System32\\msiexec.exe',
+ ['msiexec', '/quiet', '/norestart', '/i',
+ 'C:\\Temp\\' + os.path.basename(sTestBuild),
+ '/lv', sLogFile],
+ False, True);
+ if not fRc:
+ reporter.error('Installing the VBox from msi installer failed');
+ else:
+ sLogFile = 'C:/Temp/Virtualbox/VBoxInstallLog.txt';
+ (fRc, _, _, _) = self.guestProcessExecute(oGuestSession, 'Installing VBox',
+ 600 * 1000, 'C:\\Temp\\' + os.path.basename(sTestBuild),
+ ['C:\\Temp\\' + os.path.basename(sTestBuild), '-vvvv',
+ '--silent', '--logging',
+ '--msiparams', 'REBOOT=ReallySuppress'],
+ False, True);
+ if not fRc:
+ reporter.error('Installing the VBox failed');
+ else:
+ (_, _, _, aBuf) = self.guestProcessExecute(oGuestSession, 'Check installation',
+ 240 * 1000, 'C:\\Windows\\System32\\cmd.exe',
+ ['c:\\Windows\\System32\\cmd.exe', '/c',
+ 'dir', 'C:\\Program Files\\Oracle\\VirtualBox\\*.*'],
+ True, True);
+ reporter.log('Content of VirtualBxox folder:');
+ reporter.log(str(aBuf));
+ asLogFiles = [sLogFile,];
+ self.downloadFiles(oGuestSession, asLogFiles, fIgnoreErrors = True);
+ reporter.testDone();
+ return fRc;
+
+ def convertDiskToPartitionPrefix(self, sDisk):
+ # Convert \\.\PhysicalDriveX into \\.\HarddiskXPartition
+ oMatch = re.match(r'^\\\\.\\PhysicalDrive(\d+)$', sDisk);
+ if oMatch is None:
+ return None;
+ return r'\\.\Harddisk' + oMatch.group(1) + 'Partition';
+
+class tdStorageRawDrive(vbox.TestDriver): # pylint: disable=too-many-instance-attributes
+ """
+ Autostart testcase.
+ """
+ ksOsLinux = 'tst-linux';
+ ksOsWindows = 'tst-win';
+ ksOsDarwin = 'tst-darwin';
+ ksOsSolaris = 'tst-solaris';
+ ksOsFreeBSD = 'tst-freebsd';
+ ksBootSectorPath = '6.1/storage/t-bootsector.bin';
+ kasHdds = ['6.1/storage/t-gpt.vdi', '6.1/storage/t-mbr.vdi'];
+
+ def __init__(self):
+ vbox.TestDriver.__init__(self);
+ self.asRsrcs = None;
+ self.asSkipVMs = [];
+ ## @todo r=bird: The --test-build-dirs option as primary way to get the installation files to test
+ ## is not an acceptable test practice as we don't know wtf you're testing. See defect for more.
+ self.asTestBuildDirs = [os.path.join(self.sScratchPath, 'bin'),];
+ self.sGuestAdditionsIso = None; #'D:/AlexD/TestBox/TestAdditionalFiles/VBoxGuestAdditions_6.1.2.iso';
+ oSet = vboxtestvms.TestVmSet(self.oTestVmManager, acCpus = [2], asVirtModes = ['hwvirt-np',], fIgnoreSkippedVm = True);
+ # pylint: disable=line-too-long
+ self.asTestVmClasses = {
+ 'win' : None, #tdStorageRawDriveOsWin(oSet, self, self.ksOsWindows, 'Windows7_64', \
+ #'6.0/windows7piglit/windows7piglit.vdi', eNic0Type = None, cMbRam = 2048, \
+ #cCpus = 2, fPae = True, sGuestAdditionsIso = self.getGuestAdditionsIso(),
+ #sBootSector = self.ksBootSectorPath),
+ 'linux' : tdStorageRawDriveOsLinux(oSet, self, self.ksOsLinux, 'Ubuntu_64', \
+ '6.0/ub1804piglit/ub1804piglit.vdi', eNic0Type = None, \
+ cMbRam = 2048, cCpus = 2, fPae = None, sGuestAdditionsIso = self.getGuestAdditionsIso(),
+ sBootSector = self.ksBootSectorPath),
+ 'solaris' : None, #'tdAutostartOsSolaris',
+ 'darwin' : None #'tdAutostartOsDarwin'
+ };
+ oSet.aoTestVms.extend([oTestVm for oTestVm in self.asTestVmClasses.values() if oTestVm is not None]);
+ sOs = self.getBuildOs();
+ if sOs in self.asTestVmClasses:
+ for oTestVM in oSet.aoTestVms:
+ if oTestVM is not None:
+ oTestVM.fSkip = oTestVM != self.asTestVmClasses[sOs];
+ # pylint: enable=line-too-long
+ self.oTestVmSet = oSet;
+
+ #
+ # Overridden methods.
+ #
+
+ def showUsage(self):
+ rc = vbox.TestDriver.showUsage(self);
+ reporter.log('');
+ reporter.log('tdAutostart Options:');
+ reporter.log(' --test-build-dirs <path1[,path2[,...]]>');
+ reporter.log(' The list of directories with VirtualBox distros. Overrides default path.');
+ reporter.log(' Default path is $TESTBOX_SCRATCH_PATH/bin.');
+ reporter.log(' --vbox-<os>-build <path>');
+ reporter.log(' The path to vbox build for the specified OS.');
+ reporter.log(' The OS can be one of "win", "linux", "solaris" and "darwin".');
+ reporter.log(' This option alse enables corresponding VM for testing.');
+ reporter.log(' (Default behaviour is testing only VM having host-like OS.)');
+ return rc;
+
+ def parseOption(self, asArgs, iArg): # pylint: disable=too-many-branches,too-many-statements
+ if asArgs[iArg] == '--test-build-dirs':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--test-build-dirs" takes a path argument');
+ self.asTestBuildDirs = asArgs[iArg].split(',');
+ for oTestVm in self.oTestVmSet.aoTestVms:
+ oTestVm.asTestBuildDirs = self.asTestBuildDirs;
+ elif asArgs[iArg] in [ '--vbox-%s-build' % sKey for sKey in self.asTestVmClasses]:
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "%s" take a path argument' % (asArgs[iArg - 1],));
+ oMatch = re.match("--vbox-([^-]+)-build", asArgs[iArg - 1]);
+ if oMatch is not None:
+ sOs = oMatch.group(1);
+ oTestVm = self.asTestVmClasses.get(sOs);
+ if oTestVm is not None:
+ oTestVm.sTestBuild = asArgs[iArg];
+ oTestVm.fSkip = False;
+ else:
+ return vbox.TestDriver.parseOption(self, asArgs, iArg);
+ return iArg + 1;
+
+ def getResourceSet(self):
+ asRsrcs = self.kasHdds[:];
+ asRsrcs.extend([self.ksBootSectorPath,]);
+ asRsrcs.extend(vbox.TestDriver.getResourceSet(self));
+ return asRsrcs;
+
+ def actionConfig(self):
+ if not self.importVBoxApi(): # So we can use the constant below.
+ return False;
+ return self.oTestVmSet.actionConfig(self);
+
+ def actionExecute(self):
+ """
+ Execute the testcase.
+ """
+ return self.oTestVmSet.actionExecute(self, self.testAutostartOneVfg)
+
+ #
+ # Test execution helpers.
+ #
+ def testAutostartOneVfg(self, oVM, oTestVm):
+ fRc = True;
+ self.logVmInfo(oVM);
+
+ for sHdd in self.kasHdds:
+ reporter.testStart('%s with %s disk' % ( oTestVm.sVmName, sHdd))
+ fRc = oTestVm.reattachHdd(oVM, sHdd, self.kasHdds);
+ if fRc:
+ oSession = self.startVmByName(oTestVm.sVmName);
+ if oSession is not None:
+ (fRc, oGuestSession) = oTestVm.waitVmIsReady(oSession, True);
+ if fRc:
+ if fRc:
+ (fRc, oGuestSession) = oTestVm.installAdditions(oSession, oGuestSession, oVM);
+ if fRc:
+ fRc = oTestVm.installVirtualBox(oGuestSession);
+ if fRc:
+ (fRc, sRawDrive) = oTestVm.listHostDrives(oGuestSession, sHdd);
+ if fRc:
+ fRc = oTestVm.createDrives(oGuestSession, sHdd, sRawDrive);
+ if not fRc:
+ reporter.error('Create VMDK raw drives failed');
+ else:
+ reporter.error('List host drives failed');
+ else:
+ reporter.error('Installing VirtualBox in the guest failed');
+ else:
+ reporter.error('Creating Guest Additions failed');
+ else:
+ reporter.error('Waiting for start VM failed');
+ if oGuestSession is not None:
+ try: oTestVm.powerDownVM(oGuestSession);
+ except: pass;
+ try: self.terminateVmBySession(oSession);
+ except: pass;
+ fRc = oSession.close() and fRc and True; # pychecker hack.
+ oSession = None;
+ else:
+ fRc = False;
+ else:
+ reporter.error('Attaching %s to %s failed' % (sHdd, oTestVm.sVmName));
+ reporter.testDone();
+ return fRc;
+
+if __name__ == '__main__':
+ sys.exit(tdStorageRawDrive().main(sys.argv));
diff --git a/src/VBox/ValidationKit/tests/storage/tdStorageSnapshotMerging1.py b/src/VBox/ValidationKit/tests/storage/tdStorageSnapshotMerging1.py
new file mode 100755
index 00000000..d6904114
--- /dev/null
+++ b/src/VBox/ValidationKit/tests/storage/tdStorageSnapshotMerging1.py
@@ -0,0 +1,414 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# $Id: tdStorageSnapshotMerging1.py $
+
+"""
+VirtualBox Validation Kit - Storage snapshotting and merging testcase.
+"""
+
+__copyright__ = \
+"""
+Copyright (C) 2013-2023 Oracle and/or its affiliates.
+
+This file is part of VirtualBox base platform packages, as
+available from https://www.virtualbox.org.
+
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation, in version 3 of the
+License.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, see <https://www.gnu.org/licenses>.
+
+The contents of this file may alternatively be used under the terms
+of the Common Development and Distribution License Version 1.0
+(CDDL), a copy of it is provided in the "COPYING.CDDL" file included
+in the VirtualBox distribution, in which case the provisions of the
+CDDL are applicable instead of those of the GPL.
+
+You may elect to license modified versions of this file under the
+terms and conditions of either the GPL or the CDDL or both.
+
+SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
+"""
+__version__ = "$Revision: 155244 $"
+
+
+# Standard Python imports.
+import os;
+import sys;
+
+# Only the main script needs to modify the path.
+try: __file__
+except: __file__ = sys.argv[0];
+g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))));
+sys.path.append(g_ksValidationKitDir);
+
+# Validation Kit imports.
+from common import utils;
+from testdriver import reporter;
+from testdriver import base;
+from testdriver import vbox;
+from testdriver import vboxcon;
+from testdriver import vboxwrappers;
+
+# Python 3 hacks:
+if sys.version_info[0] >= 3:
+ long = int; # pylint: disable=redefined-builtin,invalid-name
+
+
+class tdStorageSnapshot(vbox.TestDriver): # pylint: disable=too-many-instance-attributes
+ """
+ Storage benchmark.
+ """
+ def __init__(self):
+ vbox.TestDriver.__init__(self);
+ self.asRsrcs = None;
+ self.oGuestToGuestVM = None;
+ self.oGuestToGuestSess = None;
+ self.oGuestToGuestTxs = None;
+ self.asStorageCtrlsDef = ['AHCI'];
+ self.asStorageCtrls = self.asStorageCtrlsDef;
+ #self.asDiskFormatsDef = ['VDI', 'VMDK', 'VHD', 'QED', 'Parallels', 'QCOW', 'iSCSI'];
+ self.asDiskFormatsDef = ['VDI', 'VMDK', 'VHD'];
+ self.asDiskFormats = self.asDiskFormatsDef;
+ self.sRndData = os.urandom(100*1024*1024);
+
+ #
+ # Overridden methods.
+ #
+ def showUsage(self):
+ rc = vbox.TestDriver.showUsage(self);
+ reporter.log('');
+ reporter.log('tdStorageSnapshot1 Options:');
+ reporter.log(' --storage-ctrls <type1[:type2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asStorageCtrls)));
+ reporter.log(' --disk-formats <type1[:type2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asDiskFormats)));
+ return rc;
+
+ def parseOption(self, asArgs, iArg): # pylint: disable=too-many-branches,too-many-statements
+ if asArgs[iArg] == '--storage-ctrls':
+ iArg += 1;
+ if iArg >= len(asArgs):
+ raise base.InvalidOption('The "--storage-ctrls" takes a colon separated list of Storage controller types');
+ self.asStorageCtrls = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--disk-formats':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--disk-formats" takes a colon separated list of disk formats');
+ self.asDiskFormats = asArgs[iArg].split(':');
+ else:
+ return vbox.TestDriver.parseOption(self, asArgs, iArg);
+ return iArg + 1;
+
+ def getResourceSet(self):
+ # Construct the resource list the first time it's queried.
+ if self.asRsrcs is None:
+ self.asRsrcs = ['5.3/storage/mergeMedium/t-orig.vdi',
+ '5.3/storage/mergeMedium/t-fixed.vdi',
+ '5.3/storage/mergeMedium/t-resized.vdi'];
+ return self.asRsrcs;
+
+ def actionExecute(self):
+ """
+ Execute the testcase.
+ """
+ fRc = self.test1();
+ return fRc;
+
+ def resizeMedium(self, oMedium, cbNewSize):
+ if oMedium.deviceType is not vboxcon.DeviceType_HardDisk:
+ return False;
+
+ if oMedium.type is not vboxcon.MediumType_Normal:
+ return False;
+
+ #currently only VDI can be resizable. Medium variant is not checked, because testcase creates disks itself
+ oMediumFormat = oMedium.mediumFormat;
+ if oMediumFormat.id != 'VDI':
+ return False;
+
+ cbCurrSize = oMedium.logicalSize;
+ # currently reduce is not supported
+ if cbNewSize < cbCurrSize:
+ return False;
+
+ try:
+ oProgressCom = oMedium.resize(cbNewSize);
+ except:
+ reporter.logXcpt('IMedium::resize failed on %s' % (oMedium.name));
+ return False;
+ oProgress = vboxwrappers.ProgressWrapper(oProgressCom, self.oVBoxMgr, self.oVBox.oTstDrv,
+ 'Resize medium %s' % (oMedium.name));
+ oProgress.wait(cMsTimeout = 15*60*1000); # 15 min
+ oProgress.logResult();
+ return True;
+
+ def getMedium(self, oVM, sController):
+ oMediumAttachments = oVM.getMediumAttachmentsOfController(sController);
+
+ for oAttachment in oMediumAttachments:
+ oMedium = oAttachment.medium;
+ if oMedium.deviceType is not vboxcon.DeviceType_HardDisk:
+ continue;
+ if oMedium.type is not vboxcon.MediumType_Normal:
+ continue;
+ return oMedium;
+
+ return None;
+
+ def getSnapshotMedium(self, oSnapshot, sController):
+ oVM = oSnapshot.machine;
+ oMedium = self.getMedium(oVM, sController);
+
+ aoMediumChildren = self.oVBoxMgr.getArray(oMedium, 'children')
+ if aoMediumChildren is None or not aoMediumChildren:
+ return None;
+
+ for oChildMedium in aoMediumChildren:
+ for uSnapshotId in oChildMedium.getSnapshotIds(oVM.id):
+ if uSnapshotId == oVM.id:
+ return oChildMedium;
+
+ return None;
+
+ def openMedium(self, sHd, fImmutable = False):
+ """
+ Opens medium in readonly mode.
+ Returns Medium object on success and None on failure. Error information is logged.
+ """
+ sFullName = self.oVBox.oTstDrv.getFullResourceName(sHd);
+ try:
+ oHd = self.oVBox.findHardDisk(sFullName);
+ except:
+ try:
+ if self.fpApiVer >= 4.1:
+ oHd = self.oVBox.openMedium(sFullName, vboxcon.DeviceType_HardDisk, vboxcon.AccessMode_ReadOnly, False);
+ elif self.fpApiVer >= 4.0:
+ oHd = self.oVBox.openMedium(sFullName, vboxcon.DeviceType_HardDisk, vboxcon.AccessMode_ReadOnly);
+ else:
+ oHd = self.oVBox.openHardDisk(sFullName, vboxcon.AccessMode_ReadOnly, False, "", False, "");
+
+ except:
+ reporter.errorXcpt('failed to open hd "%s"' % (sFullName));
+ return None;
+
+ try:
+ if fImmutable:
+ oHd.type = vboxcon.MediumType_Immutable;
+ else:
+ oHd.type = vboxcon.MediumType_Normal;
+
+ except:
+ if fImmutable:
+ reporter.errorXcpt('failed to set hd "%s" immutable' % (sHd));
+ else:
+ reporter.errorXcpt('failed to set hd "%s" normal' % (sHd));
+
+ return None;
+
+ return oHd;
+
+ def cloneMedium(self, oSrcHd, oTgtHd):
+ """
+ Clones medium into target medium.
+ """
+ try:
+ oProgressCom = oSrcHd.cloneTo(oTgtHd, (vboxcon.MediumVariant_Standard, ), None);
+ except:
+ reporter.errorXcpt('failed to clone medium %s to %s' % (oSrcHd.name, oTgtHd.name));
+ return False;
+ oProgress = vboxwrappers.ProgressWrapper(oProgressCom, self.oVBoxMgr, self.oVBox.oTstDrv,
+ 'clone base disk %s to %s' % (oSrcHd.name, oTgtHd.name));
+ oProgress.wait(cMsTimeout = 15*60*1000); # 15 min
+ oProgress.logResult();
+ return True;
+
+ def deleteVM(self, oVM):
+ try:
+ oVM.unregister(vboxcon.CleanupMode_DetachAllReturnNone);
+ except:
+ reporter.logXcpt();
+
+ if self.fpApiVer >= 4.0:
+ try:
+ if self.fpApiVer >= 4.3:
+ oProgressCom = oVM.deleteConfig([]);
+ else:
+ oProgressCom = oVM.delete(None);
+ except:
+ reporter.logXcpt();
+ else:
+ oProgress = vboxwrappers.ProgressWrapper(oProgressCom, self.oVBoxMgr, self.oVBox.oTstDrv,
+ 'Delete VM %s' % (oVM.name));
+ oProgress.wait(cMsTimeout = 15*60*1000); # 15 min
+ oProgress.logResult();
+ else:
+ try: oVM.deleteSettings();
+ except: reporter.logXcpt();
+
+ return None;
+
+ #
+ # Test execution helpers.
+ #
+
+ def test1OneCfg(self, eStorageController, oDskFmt):
+ """
+ Runs the specified VM thru test #1.
+
+ Returns a success indicator on the general test execution. This is not
+ the actual test result.
+ """
+
+ (asExts, aTypes) = oDskFmt.describeFileExtensions()
+ for i in range(0, len(asExts)): #pylint: disable=consider-using-enumerate
+ if aTypes[i] is vboxcon.DeviceType_HardDisk:
+ sExt = '.' + asExts[i]
+ break
+
+ if sExt is None:
+ return False;
+
+ oOrigBaseHd = self.openMedium('5.3/storage/mergeMedium/t-orig.vdi');
+ if oOrigBaseHd is None:
+ return False;
+
+ #currently only VDI can be resizable. Medium variant is not checked, because testcase creates disks itself
+ fFmtDynamic = oDskFmt.id == 'VDI';
+ sOrigWithDiffHd = '5.3/storage/mergeMedium/t-fixed.vdi'
+ uOrigCrc = long(0x7a417cbb);
+
+ if fFmtDynamic:
+ sOrigWithDiffHd = '5.3/storage/mergeMedium/t-resized.vdi';
+ uOrigCrc = long(0xa8f5daa3);
+
+ oOrigWithDiffHd = self.openMedium(sOrigWithDiffHd);
+ if oOrigWithDiffHd is None:
+ return False;
+
+ oVM = self.createTestVM('testvm', 1, None);
+ if oVM is None:
+ return False;
+
+ sController = self.controllerTypeToName(eStorageController);
+
+ # Reconfigure the VM
+ oSession = self.openSession(oVM);
+ if oSession is None:
+ return False;
+ # Attach HD
+
+ fRc = True;
+ sFile = 't-base' + sExt;
+ sHddPath = os.path.join(self.oVBox.oTstDrv.sScratchPath, sFile);
+ oHd = oSession.createBaseHd(sHddPath, sFmt=oDskFmt.id, cb=oOrigBaseHd.logicalSize,
+ cMsTimeout = 15 * 60 * 1000); # 15 min
+ #if oSession.createBaseHd can't create disk because it exists, oHd will point to some stub object anyway
+ fRc = fRc and oHd is not None and (oHd.logicalSize == oOrigBaseHd.logicalSize);
+ fRc = fRc and self.cloneMedium(oOrigBaseHd, oHd);
+
+ fRc = fRc and oSession.ensureControllerAttached(sController);
+ fRc = fRc and oSession.setStorageControllerType(eStorageController, sController);
+ fRc = fRc and oSession.saveSettings();
+ fRc = fRc and oSession.attachHd(sHddPath, sController, iPort = 0, fImmutable=False, fForceResource=False)
+
+ if fRc:
+ oSession.takeSnapshot('Base snapshot');
+ oSnapshot = oSession.findSnapshot('Base snapshot');
+
+ if oSnapshot is not None:
+ oSnapshotMedium = self.getSnapshotMedium(oSnapshot, sController);
+ fRc = oSnapshotMedium is not None;
+
+ if fFmtDynamic:
+ fRc = fRc and self.resizeMedium(oSnapshotMedium, oOrigWithDiffHd.logicalSize);
+ fRc = fRc and self.cloneMedium(oOrigWithDiffHd, oSnapshotMedium);
+ fRc = fRc and oSession.deleteSnapshot(oSnapshot.id, cMsTimeout = 120 * 1000);
+
+ if fRc:
+ # disk for result test by checksum
+ sResFilePath = os.path.join(self.oVBox.oTstDrv.sScratchPath, 't_res.vmdk');
+ sResFilePathRaw = os.path.join(self.oVBox.oTstDrv.sScratchPath, 't_res-flat.vmdk');
+ oResHd = oSession.createBaseHd(sResFilePath, sFmt='VMDK', cb=oOrigWithDiffHd.logicalSize,
+ tMediumVariant = (vboxcon.MediumVariant_Fixed, ),
+ cMsTimeout = 15 * 60 * 1000); # 15 min
+ fRc = oResHd is not None;
+ fRc = fRc and self.cloneMedium(oHd, oResHd);
+
+ uResCrc32 = long(0);
+ if fRc:
+ uResCrc32 = long(utils.calcCrc32OfFile(sResFilePathRaw));
+ if uResCrc32 == uOrigCrc:
+ reporter.log('Snapshot merged successfully. Crc32 is correct');
+ fRc = True;
+ else:
+ reporter.error('Snapshot merging failed. Crc32 is invalid');
+ fRc = False;
+
+ self.oVBox.deleteHdByMedium(oResHd);
+
+ if oSession is not None:
+ if oHd is not None:
+ oSession.detachHd(sController, iPort = 0, iDevice = 0);
+
+ oSession.saveSettings(fClose = True);
+ if oHd is not None:
+ self.oVBox.deleteHdByMedium(oHd);
+
+ self.deleteVM(oVM);
+ return fRc;
+
+ def test1(self):
+ """
+ Executes test #1 thru the various configurations.
+ """
+ if not self.importVBoxApi():
+ return False;
+
+ sVmName = 'testvm';
+ reporter.testStart(sVmName);
+
+ aoDskFmts = self.oVBoxMgr.getArray(self.oVBox.systemProperties, 'mediumFormats')
+ if aoDskFmts is None or not aoDskFmts:
+ return False;
+
+ fRc = True;
+ for sStorageCtrl in self.asStorageCtrls:
+ reporter.testStart(sStorageCtrl);
+ if sStorageCtrl == 'AHCI':
+ eStorageCtrl = vboxcon.StorageControllerType_IntelAhci;
+ elif sStorageCtrl == 'IDE':
+ eStorageCtrl = vboxcon.StorageControllerType_PIIX4;
+ elif sStorageCtrl == 'LsiLogicSAS':
+ eStorageCtrl = vboxcon.StorageControllerType_LsiLogicSas;
+ elif sStorageCtrl == 'LsiLogic':
+ eStorageCtrl = vboxcon.StorageControllerType_LsiLogic;
+ elif sStorageCtrl == 'BusLogic':
+ eStorageCtrl = vboxcon.StorageControllerType_BusLogic;
+ else:
+ eStorageCtrl = None;
+
+ for oDskFmt in aoDskFmts:
+ if oDskFmt.id in self.asDiskFormats:
+ reporter.testStart('%s' % (oDskFmt.id));
+ fRc = self.test1OneCfg(eStorageCtrl, oDskFmt);
+ reporter.testDone();
+ if not fRc:
+ break;
+
+ reporter.testDone();
+ if not fRc:
+ break;
+
+ reporter.testDone();
+ return fRc;
+
+if __name__ == '__main__':
+ sys.exit(tdStorageSnapshot().main(sys.argv));
diff --git a/src/VBox/ValidationKit/tests/storage/tdStorageStress1.py b/src/VBox/ValidationKit/tests/storage/tdStorageStress1.py
new file mode 100755
index 00000000..55beb6fd
--- /dev/null
+++ b/src/VBox/ValidationKit/tests/storage/tdStorageStress1.py
@@ -0,0 +1,513 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Storage testcase using xfstests.
+"""
+
+__copyright__ = \
+"""
+Copyright (C) 2012-2023 Oracle and/or its affiliates.
+
+This file is part of VirtualBox base platform packages, as
+available from https://www.virtualbox.org.
+
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation, in version 3 of the
+License.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, see <https://www.gnu.org/licenses>.
+
+The contents of this file may alternatively be used under the terms
+of the Common Development and Distribution License Version 1.0
+(CDDL), a copy of it is provided in the "COPYING.CDDL" file included
+in the VirtualBox distribution, in which case the provisions of the
+CDDL are applicable instead of those of the GPL.
+
+You may elect to license modified versions of this file under the
+terms and conditions of either the GPL or the CDDL or both.
+
+SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
+"""
+__version__ = "$Id: tdStorageStress1.py $"
+
+
+# Standard Python imports.
+import os;
+import sys;
+
+# Only the main script needs to modify the path.
+try: __file__
+except: __file__ = sys.argv[0];
+g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))));
+sys.path.append(g_ksValidationKitDir);
+
+# Validation Kit imports.
+from testdriver import reporter;
+from testdriver import base;
+from testdriver import vbox;
+from testdriver import vboxcon;
+
+
+class tdStorageStress(vbox.TestDriver): # pylint: disable=too-many-instance-attributes
+ """
+ Storage testcase.
+ """
+
+ def __init__(self):
+ vbox.TestDriver.__init__(self);
+ self.asRsrcs = None;
+ self.oGuestToGuestVM = None;
+ self.oGuestToGuestSess = None;
+ self.oGuestToGuestTxs = None;
+ self.asTestVMsDef = ['tst-debian'];
+ self.asTestVMs = self.asTestVMsDef;
+ self.asSkipVMs = [];
+ self.asVirtModesDef = ['hwvirt', 'hwvirt-np', 'raw',]
+ self.asVirtModes = self.asVirtModesDef
+ self.acCpusDef = [1, 2,]
+ self.acCpus = self.acCpusDef;
+ self.asStorageCtrlsDef = ['AHCI', 'IDE', 'LsiLogicSAS', 'LsiLogic', 'BusLogic'];
+ self.asStorageCtrls = self.asStorageCtrlsDef;
+ self.asDiskFormatsDef = ['VDI', 'VMDK', 'VHD', 'QED', 'Parallels', 'QCOW'];
+ self.asDiskFormats = self.asDiskFormatsDef;
+ self.asTestsDef = ['xfstests'];
+ self.asTests = self.asTestsDef;
+ self.asGuestFs = ['xfs', 'ext4', 'btrfs'];
+ self.asGuestFsDef = self.asGuestFs;
+ self.asIscsiTargetsDef = ['aurora|iqn.2011-03.home.aurora:aurora.storagebench|1'];
+ self.asIscsiTargets = self.asIscsiTargetsDef;
+ self.asDirsDef = ['/run/media/alexander/OWCSSD/alexander', \
+ '/run/media/alexander/CrucialSSD/alexander', \
+ '/run/media/alexander/HardDisk/alexander', \
+ '/home/alexander'];
+ self.asDirs = self.asDirsDef;
+
+ #
+ # Overridden methods.
+ #
+ def showUsage(self):
+ rc = vbox.TestDriver.showUsage(self);
+ reporter.log('');
+ reporter.log('tdStorageBenchmark1 Options:');
+ reporter.log(' --virt-modes <m1[:m2[:]]');
+ reporter.log(' Default: %s' % (':'.join(self.asVirtModesDef)));
+ reporter.log(' --cpu-counts <c1[:c2[:]]');
+ reporter.log(' Default: %s' % (':'.join(str(c) for c in self.acCpusDef)));
+ reporter.log(' --storage-ctrls <type1[:type2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asStorageCtrls)));
+ reporter.log(' --disk-formats <type1[:type2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asDiskFormats)));
+ reporter.log(' --disk-dirs <path1[:path2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asDirs)));
+ reporter.log(' --iscsi-targets <target1[:target2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asIscsiTargets)));
+ reporter.log(' --tests <test1[:test2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asTests)));
+ reporter.log(' --guest-fs <fs1[:fs2[:...]]>');
+ reporter.log(' Default: %s' % (':'.join(self.asGuestFs)));
+ reporter.log(' --test-vms <vm1[:vm2[:...]]>');
+ reporter.log(' Test the specified VMs in the given order. Use this to change');
+ reporter.log(' the execution order or limit the choice of VMs');
+ reporter.log(' Default: %s (all)' % (':'.join(self.asTestVMsDef)));
+ reporter.log(' --skip-vms <vm1[:vm2[:...]]>');
+ reporter.log(' Skip the specified VMs when testing.');
+ return rc;
+
+ def parseOption(self, asArgs, iArg): # pylint: disable=too-many-branches,too-many-statements
+ if asArgs[iArg] == '--virt-modes':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--virt-modes" takes a colon separated list of modes');
+ self.asVirtModes = asArgs[iArg].split(':');
+ for s in self.asVirtModes:
+ if s not in self.asVirtModesDef:
+ raise base.InvalidOption('The "--virt-modes" value "%s" is not valid; valid values are: %s' \
+ % (s, ' '.join(self.asVirtModesDef)));
+ elif asArgs[iArg] == '--cpu-counts':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--cpu-counts" takes a colon separated list of cpu counts');
+ self.acCpus = [];
+ for s in asArgs[iArg].split(':'):
+ try: c = int(s);
+ except: raise base.InvalidOption('The "--cpu-counts" value "%s" is not an integer' % (s,));
+ if c <= 0: raise base.InvalidOption('The "--cpu-counts" value "%s" is zero or negative' % (s,));
+ self.acCpus.append(c);
+ elif asArgs[iArg] == '--storage-ctrls':
+ iArg += 1;
+ if iArg >= len(asArgs):
+ raise base.InvalidOption('The "--storage-ctrls" takes a colon separated list of Storage controller types');
+ self.asStorageCtrls = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--disk-formats':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--disk-formats" takes a colon separated list of disk formats');
+ self.asDiskFormats = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--disk-dirs':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--disk-dirs" takes a colon separated list of directories');
+ self.asDirs = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--iscsi-targets':
+ iArg += 1;
+ if iArg >= len(asArgs):
+ raise base.InvalidOption('The "--iscsi-targets" takes a colon separated list of iscsi targets');
+ self.asIscsiTargets = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--tests':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--tests" takes a colon separated list of disk formats');
+ self.asTests = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--guest-fs':
+ iArg += 1;
+ if iArg >= len(asArgs):
+ raise base.InvalidOption('The "--guest-fs" takes a colon separated list of filesystem identifiers');
+ self.asGuestFs = asArgs[iArg].split(':');
+ elif asArgs[iArg] == '--test-vms':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--test-vms" takes colon separated list');
+ self.asTestVMs = asArgs[iArg].split(':');
+ for s in self.asTestVMs:
+ if s not in self.asTestVMsDef:
+ raise base.InvalidOption('The "--test-vms" value "%s" is not valid; valid values are: %s' \
+ % (s, ' '.join(self.asTestVMsDef)));
+ elif asArgs[iArg] == '--skip-vms':
+ iArg += 1;
+ if iArg >= len(asArgs): raise base.InvalidOption('The "--skip-vms" takes colon separated list');
+ self.asSkipVMs = asArgs[iArg].split(':');
+ for s in self.asSkipVMs:
+ if s not in self.asTestVMsDef:
+ reporter.log('warning: The "--test-vms" value "%s" does not specify any of our test VMs.' % (s));
+ else:
+ return vbox.TestDriver.parseOption(self, asArgs, iArg);
+ return iArg + 1;
+
+ def completeOptions(self):
+ # Remove skipped VMs from the test list.
+ for sVM in self.asSkipVMs:
+ try: self.asTestVMs.remove(sVM);
+ except: pass;
+
+ return vbox.TestDriver.completeOptions(self);
+
+ def getResourceSet(self):
+ # Construct the resource list the first time it's queried.
+ if self.asRsrcs is None:
+ self.asRsrcs = [];
+ if 'tst-debian' in self.asTestVMs:
+ self.asRsrcs.append('4.2/storage/debian.vdi');
+
+ return self.asRsrcs;
+
+ def actionConfig(self):
+ # Some stupid trickery to guess the location of the iso. ## fixme - testsuite unzip ++
+ sVBoxValidationKit_iso = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ '../../VBoxValidationKitStorIo.iso'));
+ if not os.path.isfile(sVBoxValidationKit_iso):
+ sVBoxValidationKit_iso = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ '../../VBoxValidationKitStorIo.iso'));
+ if not os.path.isfile(sVBoxValidationKit_iso):
+ sVBoxValidationKit_iso = '/mnt/ramdisk/vbox/svn/trunk/validationkit/VBoxValidationKitStorIo.iso';
+ if not os.path.isfile(sVBoxValidationKit_iso):
+ sVBoxValidationKit_iso = '/mnt/ramdisk/vbox/svn/trunk/testsuite/VBoxTestSuiteStorIo.iso';
+ if not os.path.isfile(sVBoxValidationKit_iso):
+ sCur = os.getcwd();
+ for i in range(0, 10):
+ sVBoxValidationKit_iso = os.path.join(sCur, 'validationkit/VBoxValidationKitStorIo.iso');
+ if os.path.isfile(sVBoxValidationKit_iso):
+ break;
+ sVBoxValidationKit_iso = os.path.join(sCur, 'testsuite/VBoxTestSuiteStorIo.iso');
+ if os.path.isfile(sVBoxValidationKit_iso):
+ break;
+ sCur = os.path.abspath(os.path.join(sCur, '..'));
+ if i is None: pass; # shut up pychecker/pylint.
+ if not os.path.isfile(sVBoxValidationKit_iso):
+ sVBoxValidationKit_iso = '/mnt/VirtualBox/VBoxValidationKitStorIo.iso';
+ if not os.path.isfile(sVBoxValidationKit_iso):
+ sVBoxValidationKit_iso = '/mnt/VirtualBox/VBoxTestSuiteStorIo.iso';
+
+
+
+ # Make sure vboxapi has been imported so we can use the constants.
+ if not self.importVBoxApi():
+ return False;
+
+ #
+ # Configure the VMs we're going to use.
+ #
+
+ # Linux VMs
+ if 'tst-debian' in self.asTestVMs:
+ oVM = self.createTestVM('tst-debian', 1, '4.2/storage/debian.vdi', sKind = 'Debian_64', fIoApic = True, \
+ eNic0AttachType = vboxcon.NetworkAttachmentType_NAT, \
+ eNic0Type = vboxcon.NetworkAdapterType_Am79C973, \
+ sDvdImage = sVBoxValidationKit_iso);
+ if oVM is None:
+ return False;
+
+ return True;
+
+ def actionExecute(self):
+ """
+ Execute the testcase.
+ """
+ fRc = self.test1();
+ return fRc;
+
+
+ #
+ # Test execution helpers.
+ #
+
+ def test1RunTestProgs(self, oSession, oTxsSession, fRc, sTestName, sGuestFs):
+ """
+ Runs all the test programs on the test machine.
+ """
+ _ = oSession;
+
+ reporter.testStart(sTestName);
+
+ sMkfsCmd = 'mkfs.' + sGuestFs;
+
+ # Prepare test disks, just create filesystem without partition
+ reporter.testStart('Preparation');
+ fRc = fRc and self.txsRunTest(oTxsSession, 'Create FS 1', 60000, \
+ '/sbin/' + sMkfsCmd,
+ (sMkfsCmd, '/dev/sdb'));
+
+ fRc = fRc and self.txsRunTest(oTxsSession, 'Create FS 2', 60000, \
+ '/sbin/' + sMkfsCmd,
+ (sMkfsCmd, '/dev/sdc'));
+
+ # Create test and scratch directory
+ fRc = fRc and self.txsRunTest(oTxsSession, 'Create /mnt/test', 10000, \
+ '/bin/mkdir',
+ ('mkdir', '/mnt/test'));
+
+ fRc = fRc and self.txsRunTest(oTxsSession, 'Create /mnt/scratch', 10000, \
+ '/bin/mkdir',
+ ('mkdir', '/mnt/scratch'));
+
+ # Mount test and scratch directory.
+ fRc = fRc and self.txsRunTest(oTxsSession, 'Mount /mnt/test', 10000, \
+ '/bin/mount',
+ ('mount', '/dev/sdb','/mnt/test'));
+
+ fRc = fRc and self.txsRunTest(oTxsSession, 'Mount /mnt/scratch', 10000, \
+ '/bin/mount',
+ ('mount', '/dev/sdc','/mnt/scratch'));
+
+ fRc = fRc and self.txsRunTest(oTxsSession, 'Copying xfstests', 10000, \
+ '/bin/cp',
+ ('cp', '-r','${CDROM}/${OS.ARCH}/xfstests', '/tmp'));
+
+ reporter.testDone();
+
+ # Run xfstests (this sh + cd crap is required because the cwd for the script must be in the root
+ # of the xfstests directory...)
+ reporter.testStart('xfstests');
+ if fRc and 'xfstests' in self.asTests:
+ fRc = self.txsRunTest(oTxsSession, 'xfstests', 3600000,
+ '/bin/sh',
+ ('sh', '-c', '(cd /tmp/xfstests && ./check -g auto)'),
+ ('TEST_DIR=/mnt/test', 'TEST_DEV=/dev/sdb', 'SCRATCH_MNT=/mnt/scratch', 'SCRATCH_DEV=/dev/sdc',
+ 'FSTYP=' + sGuestFs));
+ reporter.testDone();
+ else:
+ reporter.testDone(fSkipped = True);
+
+ reporter.testDone(not fRc);
+ return fRc;
+
+ # pylint: disable=too-many-arguments
+
+ def test1OneCfg(self, sVmName, eStorageController, sDiskFormat, sDiskPath1, sDiskPath2, \
+ sGuestFs, cCpus, fHwVirt, fNestedPaging):
+ """
+ Runs the specified VM thru test #1.
+
+ Returns a success indicator on the general test execution. This is not
+ the actual test result.
+ """
+ oVM = self.getVmByName(sVmName);
+
+ # Reconfigure the VM
+ fRc = True;
+ oSession = self.openSession(oVM);
+ if oSession is not None:
+ # Attach HD
+ fRc = oSession.ensureControllerAttached(self.controllerTypeToName(eStorageController));
+ fRc = fRc and oSession.setStorageControllerType(eStorageController, self.controllerTypeToName(eStorageController));
+
+ if sDiskFormat == "iSCSI":
+ listNames = [];
+ listValues = [];
+ listValues = sDiskPath1.split('|');
+ listNames.append('TargetAddress');
+ listNames.append('TargetName');
+ listNames.append('LUN');
+
+ if self.fpApiVer >= 5.0:
+ oHd = oSession.oVBox.createMedium(sDiskFormat, sDiskPath1, vboxcon.AccessMode_ReadWrite, \
+ vboxcon.DeviceType_HardDisk);
+ else:
+ oHd = oSession.oVBox.createHardDisk(sDiskFormat, sDiskPath1);
+ oHd.type = vboxcon.MediumType_Normal;
+ oHd.setProperties(listNames, listValues);
+
+ # Attach it.
+ if fRc is True:
+ try:
+ if oSession.fpApiVer >= 4.0:
+ oSession.o.machine.attachDevice(self.controllerTypeToName(eStorageController),
+ 1, 0, vboxcon.DeviceType_HardDisk, oHd);
+ else:
+ oSession.o.machine.attachDevice(self.controllerTypeToName(eStorageController),
+ 1, 0, vboxcon.DeviceType_HardDisk, oHd.id);
+ except:
+ reporter.errorXcpt('attachDevice("%s",%s,%s,HardDisk,"%s") failed on "%s"' \
+ % (self.controllerTypeToName(eStorageController), 1, 0, oHd.id, oSession.sName) );
+ fRc = False;
+ else:
+ reporter.log('attached "%s" to %s' % (sDiskPath1, oSession.sName));
+ else:
+ fRc = fRc and oSession.createAndAttachHd(sDiskPath1, sDiskFormat, self.controllerTypeToName(eStorageController),
+ cb = 10*1024*1024*1024, iPort = 1, fImmutable = False);
+ fRc = fRc and oSession.createAndAttachHd(sDiskPath2, sDiskFormat, self.controllerTypeToName(eStorageController),
+ cb = 10*1024*1024*1024, iPort = 2, fImmutable = False);
+ fRc = fRc and oSession.enableVirtEx(fHwVirt);
+ fRc = fRc and oSession.enableNestedPaging(fNestedPaging);
+ fRc = fRc and oSession.setCpuCount(cCpus);
+ fRc = fRc and oSession.saveSettings();
+ fRc = oSession.close() and fRc and True; # pychecker hack.
+ oSession = None;
+ else:
+ fRc = False;
+
+ # Start up.
+ if fRc is True:
+ self.logVmInfo(oVM);
+ oSession, oTxsSession = self.startVmAndConnectToTxsViaTcp(sVmName, fCdWait = False, fNatForwardingForTxs = True);
+ if oSession is not None:
+ self.addTask(oTxsSession);
+
+ # Fudge factor - Allow the guest to finish starting up.
+ self.sleep(5);
+
+ fRc = self.test1RunTestProgs(oSession, oTxsSession, fRc, 'stress testing', sGuestFs);
+
+ # cleanup.
+ self.removeTask(oTxsSession);
+ self.terminateVmBySession(oSession)
+
+ # Remove disk
+ oSession = self.openSession(oVM);
+ if oSession is not None:
+ try:
+ oSession.o.machine.detachDevice(self.controllerTypeToName(eStorageController), 1, 0);
+ oSession.o.machine.detachDevice(self.controllerTypeToName(eStorageController), 2, 0);
+
+ # Remove storage controller if it is not an IDE controller.
+ if eStorageController not in (vboxcon.StorageControllerType_PIIX3, vboxcon.StorageControllerType_PIIX4,):
+ oSession.o.machine.removeStorageController(self.controllerTypeToName(eStorageController));
+
+ oSession.saveSettings();
+ oSession.oVBox.deleteHdByLocation(sDiskPath1);
+ oSession.oVBox.deleteHdByLocation(sDiskPath2);
+ oSession.saveSettings();
+ oSession.close();
+ oSession = None;
+ except:
+ reporter.errorXcpt('failed to detach/delete disks %s and %s from storage controller' % \
+ (sDiskPath1, sDiskPath2));
+ else:
+ fRc = False;
+ else:
+ fRc = False;
+ return fRc;
+
+ def test1OneVM(self, sVmName):
+ """
+ Runs one VM thru the various configurations.
+ """
+ reporter.testStart(sVmName);
+ fRc = True;
+ for sStorageCtrl in self.asStorageCtrls:
+ reporter.testStart(sStorageCtrl);
+
+ if sStorageCtrl == 'AHCI':
+ eStorageCtrl = vboxcon.StorageControllerType_IntelAhci;
+ elif sStorageCtrl == 'IDE':
+ eStorageCtrl = vboxcon.StorageControllerType_PIIX4;
+ elif sStorageCtrl == 'LsiLogicSAS':
+ eStorageCtrl = vboxcon.StorageControllerType_LsiLogicSas;
+ elif sStorageCtrl == 'LsiLogic':
+ eStorageCtrl = vboxcon.StorageControllerType_LsiLogic;
+ elif sStorageCtrl == 'BusLogic':
+ eStorageCtrl = vboxcon.StorageControllerType_BusLogic;
+ else:
+ eStorageCtrl = None;
+
+ for sDiskFormat in self.asDiskFormats:
+ reporter.testStart('%s' % (sDiskFormat,));
+
+ asPaths = self.asDirs;
+
+ for sDir in asPaths:
+ reporter.testStart('%s' % (sDir,));
+
+ sPathDisk1 = sDir + "/disk1.disk";
+ sPathDisk2 = sDir + "/disk2.disk";
+
+ for sGuestFs in self.asGuestFs:
+ reporter.testStart('%s' % (sGuestFs,));
+
+ for cCpus in self.acCpus:
+ if cCpus == 1: reporter.testStart('1 cpu');
+ else: reporter.testStart('%u cpus' % (cCpus,));
+
+ for sVirtMode in self.asVirtModes:
+ if sVirtMode == 'raw' and cCpus > 1:
+ continue;
+ hsVirtModeDesc = {};
+ hsVirtModeDesc['raw'] = 'Raw-mode';
+ hsVirtModeDesc['hwvirt'] = 'HwVirt';
+ hsVirtModeDesc['hwvirt-np'] = 'NestedPaging';
+ reporter.testStart(hsVirtModeDesc[sVirtMode]);
+
+ fHwVirt = sVirtMode != 'raw';
+ fNestedPaging = sVirtMode == 'hwvirt-np';
+ fRc = self.test1OneCfg(sVmName, eStorageCtrl, sDiskFormat, sPathDisk1, sPathDisk2, \
+ sGuestFs, cCpus, fHwVirt, fNestedPaging) and fRc and True;
+ reporter.testDone();
+ reporter.testDone();
+ reporter.testDone();
+ reporter.testDone();
+ reporter.testDone();
+ reporter.testDone();
+ reporter.testDone();
+ return fRc;
+
+ def test1(self):
+ """
+ Executes test #1.
+ """
+
+ # Loop thru the test VMs.
+ for sVM in self.asTestVMs:
+ # run test on the VM.
+ if not self.test1OneVM(sVM):
+ fRc = False;
+ else:
+ fRc = True;
+
+ return fRc;
+
+
+
+if __name__ == '__main__':
+ sys.exit(tdStorageStress().main(sys.argv));
+