summaryrefslogtreecommitdiffstats
path: root/qa/workunits/rgw
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /qa/workunits/rgw
parentInitial commit. (diff)
downloadceph-upstream.tar.xz
ceph-upstream.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'qa/workunits/rgw')
-rwxr-xr-xqa/workunits/rgw/run-s3tests.sh83
-rwxr-xr-xqa/workunits/rgw/s3_bucket_quota.pl393
-rwxr-xr-xqa/workunits/rgw/s3_multipart_upload.pl151
-rwxr-xr-xqa/workunits/rgw/s3_user_quota.pl191
-rw-r--r--qa/workunits/rgw/s3_utilities.pm220
-rwxr-xr-xqa/workunits/rgw/test_rgw_orphan_list.sh511
6 files changed, 1549 insertions, 0 deletions
diff --git a/qa/workunits/rgw/run-s3tests.sh b/qa/workunits/rgw/run-s3tests.sh
new file mode 100755
index 00000000..913dd6a0
--- /dev/null
+++ b/qa/workunits/rgw/run-s3tests.sh
@@ -0,0 +1,83 @@
+#!/usr/bin/env bash
+set -ex
+
+# run s3-tests from current directory. assume working
+# ceph environment (radosgw-admin in path) and rgw on localhost:8000
+# (the vstart default).
+
+branch=$1
+[ -z "$1" ] && branch=master
+port=$2
+[ -z "$2" ] && port=8000 # this is vstart's default
+
+##
+
+if [ -e CMakeCache.txt ]; then
+ BIN_PATH=$PWD/bin
+elif [ -e $root_path/../build/CMakeCache.txt ]; then
+ cd $root_path/../build
+ BIN_PATH=$PWD/bin
+fi
+PATH=$PATH:$BIN_PATH
+
+dir=tmp.s3-tests.$$
+
+# clone and bootstrap
+mkdir $dir
+cd $dir
+git clone https://github.com/ceph/s3-tests
+cd s3-tests
+git checkout ceph-$branch
+VIRTUALENV_PYTHON=/usr/bin/python2 ./bootstrap
+cd ../..
+
+# users
+akey1=access1
+skey1=secret1
+radosgw-admin user create --uid=s3test1 --display-name='tester1' \
+ --access-key=$akey1 --secret=$skey1 --email=tester1@ceph.com
+
+akey2=access2
+skey2=secret2
+radosgw-admin user create --uid=s3test2 --display-name='tester2' \
+ --access-key=$akey2 --secret=$skey2 --email=tester2@ceph.com
+
+cat <<EOF > s3.conf
+[DEFAULT]
+## replace with e.g. "localhost" to run against local software
+host = 127.0.0.1
+## uncomment the port to use something other than 80
+port = $port
+## say "no" to disable TLS
+is_secure = no
+[fixtures]
+## all the buckets created will start with this prefix;
+## {random} will be filled with random characters to pad
+## the prefix to 30 characters long, and avoid collisions
+bucket prefix = s3testbucket-{random}-
+[s3 main]
+## the tests assume two accounts are defined, "main" and "alt".
+## user_id is a 64-character hexstring
+user_id = s3test1
+## display name typically looks more like a unix login, "jdoe" etc
+display_name = tester1
+## replace these with your access keys
+access_key = $akey1
+secret_key = $skey1
+email = tester1@ceph.com
+[s3 alt]
+## another user account, used for ACL-related tests
+user_id = s3test2
+display_name = tester2
+## the "alt" user needs to have email set, too
+email = tester2@ceph.com
+access_key = $akey2
+secret_key = $skey2
+EOF
+
+S3TEST_CONF=`pwd`/s3.conf $dir/s3-tests/virtualenv/bin/nosetests -a '!fails_on_rgw' -v
+
+rm -rf $dir
+
+echo OK.
+
diff --git a/qa/workunits/rgw/s3_bucket_quota.pl b/qa/workunits/rgw/s3_bucket_quota.pl
new file mode 100755
index 00000000..7f5476ef
--- /dev/null
+++ b/qa/workunits/rgw/s3_bucket_quota.pl
@@ -0,0 +1,393 @@
+#! /usr/bin/perl
+
+=head1 NAME
+
+s3_bucket_quota.pl - Script to test the rgw bucket quota functionality using s3 interface.
+
+=head1 SYNOPSIS
+
+Use:
+ perl s3_bucket_quota.pl [--help]
+
+Examples:
+ perl s3_bucket_quota.pl
+ or
+ perl s3_bucket_quota.pl --help
+
+=head1 DESCRIPTION
+
+This script intends to test the rgw bucket quota funcionality using s3 interface
+and reports the test results
+
+=head1 ARGUMENTS
+
+s3_bucket_quota.pl takes the following arguments:
+ --help
+ (optional) Displays the usage message.
+
+=cut
+
+use Amazon::S3;
+use Data::Dumper;
+#use strict;
+use IO::File;
+use Getopt::Long;
+use Digest::MD5;
+use Pod::Usage();
+use FindBin;
+use lib $FindBin::Bin;
+use s3_utilities;
+use Net::Domain qw(hostfqdn);
+
+my $help;
+
+Getopt::Long::GetOptions(
+ 'help' => \$help
+);
+Pod::Usage::pod2usage(-verbose => 1) && exit if ($help);
+
+#== local variables ===
+our $mytestfilename;
+my $mytestfilename1;
+my $logmsg;
+my $kruft;
+my $s3;
+my $hostdom = $ENV{RGW_FQDN}||hostfqdn();
+my $port = $ENV{RGW_PORT}||80;
+our $hostname = "$hostdom:$port";
+our $testfileloc;
+my $rgw_user = "qa_user";
+
+# Function that deletes the user $rgw_user and write to logfile.
+sub delete_user
+{
+ my $cmd = "$radosgw_admin user rm --uid=$rgw_user";
+ my $cmd_op = get_command_output($cmd);
+ if ($cmd_op !~ /aborting/){
+ print "user $rgw_user deleted\n";
+ } else {
+ print "user $rgw_user NOT deleted\n";
+ return 1;
+ }
+ return 0;
+}
+
+sub quota_set_max_size {
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-size=1048576000`;
+ if ($set_quota !~ /./){
+ print "quota set for the bucket: $bucketname \n";
+ } else {
+ print "quota set failed for the bucket: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_set_max_size_zero {
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-size=0`;
+ if ($set_quota !~ /./){
+ pass ("quota set for the bucket: $bucketname with max size as zero\n");
+ } else {
+ fail ("quota set with max size 0 failed for the bucket: $bucketname \n");
+ }
+ delete_bucket();
+}
+
+sub quota_set_max_objs_zero {
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-objects=0`;
+ if ($set_quota !~ /./){
+ pass ("quota set for the bucket: $bucketname with max objects as zero\n");
+ } else {
+ fail ("quota set with max objects 0 failed for the bucket: $bucketname \n");
+ }
+ delete_bucket();
+}
+
+sub quota_set_neg_size {
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-size=-1`;
+ if ($set_quota !~ /./){
+ pass ("quota set for the bucket: $bucketname with max size -1\n");
+ } else {
+ fail ("quota set failed for the bucket: $bucketname with max size -1 \n");
+ }
+ delete_bucket();
+}
+
+sub quota_set_neg_objs {
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-objects=-1`;
+ if ($set_quota !~ /./){
+ pass ("quota set for the bucket: $bucketname max objects -1 \n");
+ } else {
+ fail ("quota set failed for the bucket: $bucketname \n with max objects -1");
+ }
+ delete_bucket();
+}
+
+sub quota_set_user_objs {
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=bucket`;
+ my $set_quota1 = `$radosgw_admin quota set --bucket=$bucketname --max-objects=1`;
+ if ($set_quota1 !~ /./){
+ print "bucket quota max_objs set for the given user: $bucketname \n";
+ } else {
+ print "bucket quota max_objs set failed for the given user: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_set_user_size {
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=bucket`;
+ my $set_quota1 = `$radosgw_admin quota set --bucket=$bucketname --max-size=1048576000`;
+ if ($set_quota1 !~ /./){
+ print "bucket quota max size set for the given user: $bucketname \n";
+ } else {
+ print "bucket quota max size set failed for the user: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_set_max_obj {
+ # set max objects
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-objects=1`;
+ if ($set_quota !~ /./){
+ print "quota set for the bucket: $bucketname \n";
+ } else {
+ print "quota set failed for the bucket: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_enable {
+ my $en_quota = `$radosgw_admin quota enable --bucket=$bucketname`;
+ if ($en_quota !~ /./){
+ print "quota enabled for the bucket: $bucketname \n";
+ } else {
+ print "quota enable failed for the bucket: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_disable {
+ my $dis_quota = `$radosgw_admin quota disable --bucket=$bucketname`;
+ if ($dis_quota !~ /./){
+ print "quota disabled for the bucket: $bucketname \n";
+ } else {
+ print "quota disable failed for the bucket: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+# upload a file to the bucket
+sub upload_file {
+ print "adding file to bucket: $mytestfilename\n";
+ ($bucket->add_key_filename( $mytestfilename, $testfileloc,
+ { content_type => 'text/plain', },
+ ) and (print "upload file successful\n" ) and return 0 ) or (return 1);
+}
+
+# delete the bucket
+sub delete_bucket {
+ #($bucket->delete_key($mytestfilename1) and print "delete keys on bucket succeeded second time\n" ) or die $s3->err . "delete keys on bucket failed second time\n" . $s3->errstr;
+ ($bucket->delete_bucket) and (print "bucket delete succeeded \n") or die $s3->err . "delete bucket failed\n" . $s3->errstr;
+}
+
+# set bucket quota with max_objects and verify
+sub test_max_objects {
+ my $size = '10Mb';
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_max_obj();
+ quota_enable();
+ my $ret_value = upload_file();
+ if ($ret_value == 0){
+ pass ( "Test max objects passed" );
+ } else {
+ fail ( "Test max objects failed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# Set bucket quota for specific user and ensure max objects set for the user is validated
+sub test_max_objects_per_user{
+ my $size = '10Mb';
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_user_objs();
+ quota_enable();
+ my $ret_value = upload_file();
+ if ($ret_value == 0){
+ pass ( "Test max objects for the given user passed" );
+ } else {
+ fail ( "Test max objects for the given user failed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota with max_objects and try to exceed the max_objects and verify
+sub test_beyond_max_objs {
+ my $size = "10Mb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_max_obj();
+ quota_enable();
+ upload_file();
+ my $ret_value = readd_file();
+ if ($ret_value == 1){
+ pass ( "set max objects and test beyond max objects passed" );
+ } else {
+ fail ( "set max objects and test beyond max objects failed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota for a user with max_objects and try to exceed the max_objects and verify
+sub test_beyond_max_objs_user {
+ my $size = "10Mb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_user_objs();
+ quota_enable();
+ upload_file();
+ my $ret_value = readd_file();
+ if ($ret_value == 1){
+ pass ( "set max objects for a given user and test beyond max objects passed" );
+ } else {
+ fail ( "set max objects for a given user and test beyond max objects failed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota for max size and ensure it is validated
+sub test_quota_size {
+ my $ret_value;
+ my $size = "2Gb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_max_size();
+ quota_enable();
+ my $ret_value = upload_file();
+ if ($ret_value == 1) {
+ pass ( "set max size and ensure that objects upload beyond max size is not entertained" );
+ my $retdel = delete_keys($mytestfilename);
+ if ($retdel == 0) {
+ print "delete objects successful \n";
+ my $size1 = "1Gb";
+ create_file($size1);
+ my $ret_val1 = upload_file();
+ if ($ret_val1 == 0) {
+ pass ( "set max size and ensure that the max size is in effect" );
+ } else {
+ fail ( "set max size and ensure the max size takes effect" );
+ }
+ }
+ } else {
+ fail ( "set max size and ensure that objects beyond max size is not allowed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota for max size for a given user and ensure it is validated
+sub test_quota_size_user {
+ my $ret_value;
+ my $size = "2Gb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_user_size();
+ quota_enable();
+ my $ret_value = upload_file();
+ if ($ret_value == 1) {
+ pass ( "set max size for a given user and ensure that objects upload beyond max size is not entertained" );
+ my $retdel = delete_keys($mytestfilename);
+ if ($retdel == 0) {
+ print "delete objects successful \n";
+ my $size1 = "1Gb";
+ create_file($size1);
+ my $ret_val1 = upload_file();
+ if ($ret_val1 == 0) {
+ pass ( "set max size for a given user and ensure that the max size is in effect" );
+ } else {
+ fail ( "set max size for a given user and ensure the max size takes effect" );
+ }
+ }
+ } else {
+ fail ( "set max size for a given user and ensure that objects beyond max size is not allowed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota size but disable quota and verify
+sub test_quota_size_disabled {
+ my $ret_value;
+ my $size = "2Gb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_max_size();
+ quota_disable();
+ my $ret_value = upload_file();
+ if ($ret_value == 0) {
+ pass ( "bucket quota size doesnt take effect when quota is disabled" );
+ } else {
+ fail ( "bucket quota size doesnt take effect when quota is disabled" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota size for a given user but disable quota and verify
+sub test_quota_size_disabled_user {
+ my $ret_value;
+ my $size = "2Gb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_user_size();
+ quota_disable();
+ my $ret_value = upload_file();
+ if ($ret_value == 0) {
+ pass ( "bucket quota size for a given user doesnt take effect when quota is disabled" );
+ } else {
+ fail ( "bucket quota size for a given user doesnt take effect when quota is disabled" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota for specified user and verify
+
+#== Main starts here===
+ceph_os_info();
+test_max_objects();
+test_max_objects_per_user();
+test_beyond_max_objs();
+test_beyond_max_objs_user();
+quota_set_max_size_zero();
+quota_set_max_objs_zero();
+quota_set_neg_objs();
+quota_set_neg_size();
+test_quota_size();
+test_quota_size_user();
+test_quota_size_disabled();
+test_quota_size_disabled_user();
+
+print "OK";
diff --git a/qa/workunits/rgw/s3_multipart_upload.pl b/qa/workunits/rgw/s3_multipart_upload.pl
new file mode 100755
index 00000000..ab29e6b0
--- /dev/null
+++ b/qa/workunits/rgw/s3_multipart_upload.pl
@@ -0,0 +1,151 @@
+#! /usr/bin/perl
+
+=head1 NAME
+
+s3_multipart_upload.pl - Script to test rgw multipart upload using s3 interface.
+
+=head1 SYNOPSIS
+
+Use:
+ perl s3_multipart_upload.pl [--help]
+
+Examples:
+ perl s3_multipart_upload.pl
+ or
+ perl s3_multipart_upload.pl --help
+
+=head1 DESCRIPTION
+
+This script intends to test the rgw multipart upload followed by a download
+and verify checksum using s3 interface and reports test results
+
+=head1 ARGUMENTS
+
+s3_multipart_upload.pl takes the following arguments:
+ --help
+ (optional) Displays the usage message.
+
+=cut
+
+use Amazon::S3;
+use Data::Dumper;
+use IO::File;
+use Getopt::Long;
+use Digest::MD5;
+use Pod::Usage();
+use FindBin;
+use lib $FindBin::Bin;
+use s3_utilities;
+use Net::Domain qw(hostfqdn);
+
+my $help;
+
+Getopt::Long::GetOptions(
+ 'help' => \$help
+);
+Pod::Usage::pod2usage(-verbose => 1) && exit if ($help);
+
+#== local variables ===
+my $s3;
+my $hostdom = $ENV{RGW_FQDN}||hostfqdn();
+my $port = $ENV{RGW_PORT}||80;
+our $hostname = "$hostdom:$port";
+our $testfileloc;
+our $mytestfilename;
+
+# upload a file to the bucket
+sub upload_file {
+ my ($fsize, $i) = @_;
+ create_file($fsize, $i);
+ print "adding file to bucket $bucketname: $mytestfilename\n";
+ ($bucket->add_key_filename( $mytestfilename, $testfileloc,
+ { content_type => 'text/plain', },
+ ) and (print "upload file successful\n" ) and return 0 ) or (print "upload failed\n" and return 1);
+}
+
+# delete the bucket
+sub delete_bucket {
+ ($bucket->delete_bucket) and (print "bucket delete succeeded \n") or die $s3->err . "delete bucket failed\n" . $s3->errstr;
+}
+
+# Function to perform multipart upload of given file size to the user bucket via s3 interface
+sub multipart_upload
+{
+ my ($size, $parts) = @_;
+ # generate random user every time
+ my $user = rand();
+ # Divide the file size in to equal parts and upload to bucket in multiple parts
+ my $fsize = ($size/$parts);
+ my $fsize1;
+ run_s3($user);
+ if ($parts == 10){
+ $fsize1 = '100Mb';
+ } elsif ($parts == 100){
+ $fsize1 = '10Mb';
+ }
+ foreach my $i(1..$parts){
+ print "uploading file - part $i \n";
+ upload_file($fsize1, $i);
+ }
+ fetch_file_from_bucket($fsize1, $parts);
+ compare_cksum($fsize1, $parts);
+ purge_data($user);
+}
+
+# Function to download the files from bucket to verify there is no data corruption
+sub fetch_file_from_bucket
+{
+ # fetch file from the bucket
+ my ($fsize, $parts) = @_;
+ foreach my $i(1..$parts){
+ my $src_file = "$fsize.$i";
+ my $dest_file = "/tmp/downloadfile.$i";
+ print
+ "Downloading $src_file from bucket to $dest_file \n";
+ $response =
+ $bucket->get_key_filename( $src_file, GET,
+ $dest_file )
+ or die $s3->err . ": " . $s3->errstr;
+ }
+}
+
+# Compare the source file with destination file and verify checksum to ensure
+# the files are not corrupted
+sub compare_cksum
+{
+ my ($fsize, $parts)=@_;
+ my $md5 = Digest::MD5->new;
+ my $flag = 0;
+ foreach my $i (1..$parts){
+ my $src_file = "/tmp/"."$fsize".".$i";
+ my $dest_file = "/tmp/downloadfile".".$i";
+ open( FILE, $src_file )
+ or die "Error: Could not open $src_file for MD5 checksum...";
+ open( DLFILE, $dest_file )
+ or die "Error: Could not open $dest_file for MD5 checksum.";
+ binmode(FILE);
+ binmode(DLFILE);
+ my $md5sum = $md5->addfile(*FILE)->hexdigest;
+ my $md5sumdl = $md5->addfile(*DLFILE)->hexdigest;
+ close FILE;
+ close DLFILE;
+ # compare the checksums
+ if ( $md5sum eq $md5sumdl ) {
+ $flag++;
+ }
+ }
+ if ($flag == $parts){
+ pass("checksum verification for multipart upload passed" );
+ }else{
+ fail("checksum verification for multipart upload failed" );
+ }
+}
+
+#== Main starts here===
+ceph_os_info();
+check();
+# The following test runs multi part upload of file size 1Gb in 10 parts
+multipart_upload('1048576000', 10);
+# The following test runs multipart upload of 1 Gb file in 100 parts
+multipart_upload('1048576000', 100);
+print "OK";
diff --git a/qa/workunits/rgw/s3_user_quota.pl b/qa/workunits/rgw/s3_user_quota.pl
new file mode 100755
index 00000000..6d5c02a9
--- /dev/null
+++ b/qa/workunits/rgw/s3_user_quota.pl
@@ -0,0 +1,191 @@
+#! /usr/bin/perl
+
+=head1 NAME
+
+s3_user_quota.pl - Script to test the rgw user quota functionality using s3 interface.
+
+=head1 SYNOPSIS
+
+Use:
+ perl s3_user_quota.pl [--help]
+
+Examples:
+ perl s3_user_quota.pl
+ or
+ perl s3_user_quota.pl --help
+
+=head1 DESCRIPTION
+
+This script intends to test the rgw user quota funcionality using s3 interface
+and reports the test results
+
+=head1 ARGUMENTS
+
+s3_user_quota.pl takes the following arguments:
+ --help
+ (optional) Displays the usage message.
+
+=cut
+
+use Amazon::S3;
+use Data::Dumper;
+use IO::File;
+use Getopt::Long;
+use Digest::MD5;
+use Pod::Usage();
+use FindBin;
+use lib $FindBin::Bin;
+use s3_utilities;
+use Net::Domain qw(hostfqdn);
+
+my $help;
+
+Getopt::Long::GetOptions(
+ 'help' => \$help
+);
+Pod::Usage::pod2usage(-verbose => 1) && exit if ($help);
+
+#== local variables ===
+our $mytestfilename;
+my $mytestfilename1;
+my $logmsg;
+my $kruft;
+my $s3;
+my $hostdom = $ENV{RGW_FQDN}||hostfqdn();
+my $port = $ENV{RGW_PORT}||80;
+our $hostname = "$hostdom:$port";
+our $testfileloc;
+our $cnt;
+
+sub quota_set_max_size_per_user {
+ my ($maxsize, $size1,$rgw_user) = @_;
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=user --max-size=$maxsize`;
+ if (($set_quota !~ /./)&&($maxsize == 0)){
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 1){
+ pass("quota set for user: $rgw_user with max_size=$maxsize passed" );
+ }else {
+ fail("quota set for user: $rgw_user with max_size=$maxsize failed" );
+ }
+ } elsif (($set_quota !~ /./) && ($maxsize != 0)) {
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 0){
+ pass("quota set for user: $rgw_user with max_size=$maxsize passed" );
+ }else {
+ fail("quota set for user: $rgw_user with max_size=$maxsize failed" );
+ }
+ }
+ delete_keys($mytestfilename);
+ purge_data($rgw_user);
+ return 0;
+}
+
+sub max_size_per_user {
+ my ($maxsize, $size1,$rgw_user) = @_;
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=user --max-size=$maxsize`;
+ if (($set_quota !~ /./) && ($maxsize != 0)) {
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 0){
+ $cnt++;
+ }
+ }
+ return $cnt;
+}
+
+sub quota_set_max_obj_per_user {
+ # set max objects
+ my ($maxobjs, $size1, $rgw_user) = @_;
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=user --max-objects=$maxobjs`;
+ if (($set_quota !~ /./) && ($maxobjs == 0)){
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 1){
+ pass("quota set for user: $rgw_user with max_objects=$maxobjs passed" );
+ }else {
+ fail("quota set for user: $rgw_user with max_objects=$maxobjs failed" );
+ }
+ } elsif (($set_quota !~ /./) && ($maxobjs == 1)) {
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 0){
+ pass("quota set for user: $rgw_user with max_objects=$maxobjs passed" );
+ }else {
+ fail("quota set for user: $rgw_user with max_objects=$maxobjs failed" );
+ }
+ }
+ delete_keys($mytestfilename);
+ purge_data($rgw_user);
+}
+
+sub quota_enable_user {
+ my ($rgw_user) = @_;
+ my $en_quota = `$radosgw_admin quota enable --uid=$rgw_user --quota-scope=user`;
+ if ($en_quota !~ /./){
+ print "quota enabled for the user $rgw_user \n";
+ } else {
+ print "quota enable failed for the user $rgw_user \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_disable_user {
+ my $dis_quota = `$radosgw_admin quota disable --uid=$rgw_user --quota-scope=user`;
+ if ($dis_quota !~ /./){
+ print "quota disabled for the user $rgw_user \n";
+ } else {
+ print "quota disable failed for the user $rgw_user \n";
+ exit 1;
+ }
+ return 0;
+}
+
+# upload a file to the bucket
+sub upload_file {
+ print "adding file to bucket $bucketname: $mytestfilename\n";
+ ($bucket->add_key_filename( $mytestfilename, $testfileloc,
+ { content_type => 'text/plain', },
+ ) and (print "upload file successful\n" ) and return 0 ) or (return 1);
+}
+
+# delete the bucket
+sub delete_bucket {
+ ($bucket->delete_bucket) and (print "bucket delete succeeded \n") or die $s3->err . "delete bucket failed\n" . $s3->errstr;
+}
+
+#Function to upload the given file size to bucket and verify
+sub test_max_objs {
+ my ($size, $rgw_user) = @_;
+ create_file($size);
+ quota_enable_user($rgw_user);
+ my $ret_value = upload_file();
+ return $ret_value;
+}
+
+# set user quota and ensure it is validated
+sub test_user_quota_max_size{
+ my ($max_buckets,$size, $fsize) = @_;
+ my $usr = rand();
+ foreach my $i (1..$max_buckets){
+ my $ret_value = max_size_per_user($size, $fsize, $usr );
+ }
+ if ($ret_value == $max_buckets){
+ fail( "user quota max size for $usr failed on $max_buckets buckets" );
+ } else {
+ pass( "user quota max size for $usr passed on $max_buckets buckets" );
+ }
+ delete_keys($mytestfilename);
+ purge_data($usr);
+}
+
+#== Main starts here===
+ceph_os_info();
+check();
+quota_set_max_obj_per_user('0', '10Mb', 'usr1');
+quota_set_max_obj_per_user('1', '10Mb', 'usr2');
+quota_set_max_size_per_user(0, '10Mb', 'usr1');
+quota_set_max_size_per_user(1048576000, '1Gb', 'usr2');
+test_user_quota_max_size(3,1048576000,'100Mb');
+test_user_quota_max_size(2,1048576000, '1Gb');
+print "OK";
diff --git a/qa/workunits/rgw/s3_utilities.pm b/qa/workunits/rgw/s3_utilities.pm
new file mode 100644
index 00000000..12e6af0a
--- /dev/null
+++ b/qa/workunits/rgw/s3_utilities.pm
@@ -0,0 +1,220 @@
+# Common subroutines shared by the s3 testing code
+my $sec;
+my $min;
+my $hour;
+my $mon;
+my $year;
+my $mday;
+my $wday;
+my $yday;
+my $isdst;
+my $PASS_CNT = 0;
+my $FAIL_CNT = 0;
+
+our $radosgw_admin = $ENV{RGW_ADMIN}||"sudo radosgw-admin";
+
+# function to get the current time stamp from the test set up
+sub get_timestamp {
+ ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time);
+ if ($mon < 10) { $mon = "0$mon"; }
+ if ($hour < 10) { $hour = "0$hour"; }
+ if ($min < 10) { $min = "0$min"; }
+ if ($sec < 10) { $sec = "0$sec"; }
+ $year=$year+1900;
+ return $year . '_' . $mon . '_' . $mday . '_' . $hour . '_' . $min . '_' . $sec;
+}
+
+# Function to check if radosgw is already running
+sub get_status {
+ my $service = "radosgw";
+ my $cmd = "pgrep $service";
+ my $status = get_cmd_op($cmd);
+ if ($status =~ /\d+/ ){
+ return 0;
+ }
+ return 1;
+}
+
+# function to execute the command and return output
+sub get_cmd_op
+{
+ my $cmd = shift;
+ my $excmd = `$cmd`;
+ return $excmd;
+}
+
+#Function that executes the CLI commands and returns the output of the command
+sub get_command_output {
+ my $cmd_output = shift;
+ open( FH, ">>$test_log" );
+ print FH "\"$cmd_output\"\n";
+ my $exec_cmd = `$cmd_output 2>&1`;
+ print FH "$exec_cmd\n";
+ close(FH);
+ return $exec_cmd;
+}
+
+# Function to get the hostname
+sub get_hostname
+{
+ my $cmd = "hostname";
+ my $get_host = get_command_output($cmd);
+ chomp($get_host);
+ return($get_host);
+}
+
+sub pass {
+ my ($comment) = @_;
+ print "Comment required." unless length $comment;
+ chomp $comment;
+ print_border2();
+ print "Test case: $TC_CNT PASSED - $comment \n";
+ print_border2();
+ $PASS_CNT++;
+}
+
+sub fail {
+ my ($comment) = @_;
+ print "Comment required." unless length $comment;
+ chomp $comment;
+ print_border2();
+ print "Test case: $TC_CNT FAILED - $comment \n";
+ print_border2();
+ $FAIL_CNT++;
+}
+
+sub print_border2 {
+ print "~" x 90 . "\n";
+}
+
+# Function to create the user "qa_user" and extract the user access_key and secret_key of the user
+sub get_user_info
+{
+ my ($rgw_user) = @_;
+ my $cmd = "$radosgw_admin user create --uid=$rgw_user --display-name=$rgw_user";
+ my $cmd_op = get_command_output($cmd);
+ if ($cmd_op !~ /keys/){
+ return (0,0);
+ }
+ my @get_user = (split/\n/,$cmd_op);
+ foreach (@get_user) {
+ if ($_ =~ /access_key/ ){
+ $get_acc_key = $_;
+ } elsif ($_ =~ /secret_key/ ){
+ $get_sec_key = $_;
+ }
+ }
+ my $access_key = $get_acc_key;
+ my $acc_key = (split /:/, $access_key)[1];
+ $acc_key =~ s/\\//g;
+ $acc_key =~ s/ //g;
+ $acc_key =~ s/"//g;
+ $acc_key =~ s/,//g;
+ my $secret_key = $get_sec_key;
+ my $sec_key = (split /:/, $secret_key)[1];
+ $sec_key =~ s/\\//g;
+ $sec_key =~ s/ //g;
+ $sec_key =~ s/"//g;
+ $sec_key =~ s/,//g;
+ return ($acc_key, $sec_key);
+}
+
+# Function that deletes the given user and all associated user data
+sub purge_data
+{
+ my ($rgw_user) = @_;
+ my $cmd = "$radosgw_admin user rm --uid=$rgw_user --purge-data";
+ my $cmd_op = get_command_output($cmd);
+ if ($cmd_op !~ /./){
+ print "user $rgw_user deleted\n";
+ } else {
+ print "user $rgw_user NOT deleted\n";
+ return 1;
+ }
+ return 0;
+}
+
+# Function to get the Ceph and distro info
+sub ceph_os_info
+{
+ my $ceph_v = get_command_output ( "ceph -v" );
+ my @ceph_arr = split(" ",$ceph_v);
+ $ceph_v = "Ceph Version: $ceph_arr[2]";
+ my $os_distro = get_command_output ( "lsb_release -d" );
+ my @os_arr = split(":",$os_distro);
+ $os_distro = "Linux Flavor:$os_arr[1]";
+ return ($ceph_v, $os_distro);
+}
+
+# Execute the test case based on the input to the script
+sub create_file {
+ my ($file_size, $part) = @_;
+ my $cnt;
+ $mytestfilename = "$file_size.$part";
+ $testfileloc = "/tmp/".$mytestfilename;
+ if ($file_size == '10Mb'){
+ $cnt = 1;
+ } elsif ($file_size == '100Mb'){
+ $cnt = 10;
+ } elsif ($file_size == '500Mb'){
+ $cnt = 50;
+ } elsif ($file_size == '1Gb'){
+ $cnt = 100;
+ } elsif ($file_size == '2Gb'){
+ $cnt = 200;
+ }
+ my $ret = system("dd if=/dev/zero of=$testfileloc bs=10485760 count=$cnt");
+ if ($ret) { exit 1 };
+ return 0;
+}
+
+sub run_s3
+{
+# Run tests for the S3 functionality
+ # Modify access key and secret key to suit the user account
+ my ($user) = @_;
+ our ( $access_key, $secret_key ) = get_user_info($user);
+ if ( ($access_key) && ($secret_key) ) {
+ $s3 = Amazon::S3->new(
+ {
+ aws_access_key_id => $access_key,
+ aws_secret_access_key => $secret_key,
+ host => $hostname,
+ secure => 0,
+ retry => 1,
+ }
+ );
+ }
+
+our $bucketname = 'buck_'.get_timestamp();
+# create a new bucket (the test bucket)
+our $bucket = $s3->add_bucket( { bucket => $bucketname } )
+ or die $s3->err. "bucket $bucketname create failed\n". $s3->errstr;
+ print "Bucket Created: $bucketname \n";
+ return 0;
+}
+
+# delete keys
+sub delete_keys {
+ (($bucket->delete_key($_[0])) and return 0) or return 1;
+}
+
+# Read the file back to bucket
+sub readd_file {
+ system("dd if=/dev/zero of=/tmp/10MBfile1 bs=10485760 count=1");
+ $mytestfilename1 = '10MBfile1';
+ print "readding file to bucket: $mytestfilename1\n";
+ ((($bucket->add_key_filename( $mytestfilename1, $testfileloc,
+ { content_type => 'text/plain', },
+ )) and (print "readding file success\n") and return 0) or (return 1));
+}
+
+# check if rgw service is already running
+sub check
+{
+ my $state = get_status();
+ if ($state) {
+ exit 1;
+ }
+}
+1
diff --git a/qa/workunits/rgw/test_rgw_orphan_list.sh b/qa/workunits/rgw/test_rgw_orphan_list.sh
new file mode 100755
index 00000000..4299078a
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_orphan_list.sh
@@ -0,0 +1,511 @@
+#!/usr/bin/env bash
+
+set -ex
+
+# if defined, debug messages will be displayed and prepended with the string
+# debug="DEBUG"
+
+huge_size=2222 # in megabytes
+big_size=6 # in megabytes
+
+huge_obj=/tmp/huge_obj.temp.$$
+big_obj=/tmp/big_obj.temp.$$
+empty_obj=/tmp/empty_obj.temp.$$
+
+fifo=/tmp/orphan-fifo.$$
+awscli_dir=${HOME}/awscli_temp
+export PATH=${PATH}:${awscli_dir}
+
+rgw_host=$(hostname --fqdn)
+if echo "$rgw_host" | grep -q '\.' ; then
+ :
+else
+ host_domain=".front.sepia.ceph.com"
+ echo "WARNING: rgw hostname -- $rgw_host -- does not appear to be fully qualified; PUNTING and appending $host_domain"
+ rgw_host="${rgw_host}${host_domain}"
+fi
+rgw_port=80
+
+echo "Fully Qualified Domain Name: $rgw_host"
+
+success() {
+ echo OK.
+ exit 0
+}
+
+########################################################################
+# INSTALL AND CONFIGURE TOOLING
+
+install_awscli() {
+ # NB: this does verify authenticity and integrity of downloaded
+ # file; see
+ # https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html
+ here="$(pwd)"
+ cd "$HOME"
+ curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
+ unzip awscliv2.zip
+ mkdir -p $awscli_dir
+ ./aws/install -i $awscli_dir
+ cd "$here"
+}
+
+uninstall_awscli() {
+ here="$(pwd)"
+ cd "$HOME"
+ rm -rf $awscli_dir ./aws awscliv2.zip
+ cd "$here"
+}
+
+sudo yum -y install s3cmd
+sudo yum -y install python3-setuptools
+sudo yum -y install python3-pip
+sudo pip3 install --upgrade setuptools
+sudo pip3 install python-swiftclient
+
+# get ready for transition from s3cmd to awscli
+if false ;then
+ install_awscli
+ aws --version
+ uninstall_awscli
+fi
+
+s3config=/tmp/s3config.$$
+
+# do not include the port when it is 80; the host base is used in the
+# v4 signature and it needs to follow this convention for signatures
+# to match
+if [ "$rgw_port" -ne 80 ] ;then
+ s3_host_base="${rgw_host}:${rgw_port}"
+else
+ s3_host_base="$rgw_host"
+fi
+
+cat >${s3config} <<EOF
+[default]
+host_base = $s3_host_base
+access_key = 0555b35654ad1656d804
+secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
+bucket_location = us-east-1
+check_ssl_certificate = True
+check_ssl_hostname = True
+default_mime_type = binary/octet-stream
+delete_removed = False
+dry_run = False
+enable_multipart = True
+encoding = UTF-8
+encrypt = False
+follow_symlinks = False
+force = False
+guess_mime_type = True
+host_bucket = anything.with.three.dots
+multipart_chunk_size_mb = 15
+multipart_max_chunks = 10000
+recursive = False
+recv_chunk = 65536
+send_chunk = 65536
+signature_v2 = False
+socket_timeout = 300
+use_https = False
+use_mime_magic = True
+verbosity = WARNING
+EOF
+
+
+# set up swift authentication
+export ST_AUTH=http://${rgw_host}:${rgw_port}/auth/v1.0
+export ST_USER=test:tester
+export ST_KEY=testing
+
+create_users() {
+ # Create S3 user
+ local akey='0555b35654ad1656d804'
+ local skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+ radosgw-admin user create --uid testid \
+ --access-key $akey --secret $skey \
+ --display-name 'M. Tester' --email tester@ceph.com
+
+ # Create Swift user
+ radosgw-admin user create --subuser=test:tester \
+ --display-name=Tester-Subuser --key-type=swift \
+ --secret=testing --access=full
+}
+
+myswift() {
+ if [ -n "$debug" ] ;then
+ echo "${debug}: swift --verbose --debug $@"
+ fi
+ swift --verbose --debug "$@"
+ local code=$?
+ if [ $code -ne 0 ] ;then
+ echo "ERROR: code = $code ; command = s3cmd --config=${s3config} --verbose --debug "$@""
+ exit $code
+ fi
+}
+
+mys3cmd() {
+ if [ -n "$debug" ] ;then
+ echo "${debug}: s3cmd --config=${s3config} --verbose --debug $@"
+ fi
+ s3cmd --config=${s3config} --verbose --debug "$@"
+ local code=$?
+ if [ $code -ne 0 ] ;then
+ echo "ERROR: code = $code ; command = s3cmd --config=${s3config} --verbose --debug "$@""
+ exit $code
+ fi
+}
+
+mys3uploadkill() {
+ if [ $# -ne 5 ] ;then
+ echo "$0: error expecting 5 arguments"
+ exit 1
+ fi
+
+ set -v
+ local_file="$1"
+ remote_bkt="$2"
+ remote_obj="$3"
+ fifo="$4"
+ stop_part="$5"
+
+ mkfifo $fifo
+
+ s3cmd --config=${s3config} put $local_file \
+ s3://${remote_bkt}/${remote_obj} \
+ --progress \
+ --multipart-chunk-size-mb=5 >$fifo &
+ set +e # don't allow errors to stop script
+ while read line ;do
+ echo "$line" | grep --quiet "part $stop_part "
+ if [ ${PIPESTATUS[1]} -eq 0 ] ;then
+ kill -9 $(jobs -p)
+ break
+ fi
+ done <$fifo
+ set -e
+
+ rm -f $fifo
+}
+
+mys3upload() {
+ obj=$1
+ bucket=$2
+ dest_obj=$3
+
+ mys3cmd put -q $obj s3://${bucket}/$dest_obj
+}
+
+########################################################################
+# PREP
+
+create_users
+dd if=/dev/urandom of=$big_obj bs=1M count=${big_size}
+dd if=/dev/urandom of=$huge_obj bs=1M count=${huge_size}
+touch $empty_obj
+
+quick_tests() {
+ echo TRY A SWIFT COMMAND
+ myswift upload swift-plain-ctr $big_obj --object-name swift-obj-test
+ myswift list
+ myswift list swift-plain-ctr
+
+ echo TRY A RADOSGW-ADMIN COMMAND
+ radosgw-admin bucket list # make sure rgw is up and running
+}
+
+########################################################################
+# S3 TESTS
+
+####################################
+# regular multipart test
+
+mys3cmd mb s3://multipart-bkt
+mys3upload $huge_obj multipart-bkt multipart-obj
+mys3cmd ls
+mys3cmd ls s3://multipart-bkt
+
+####################################
+# multipart test with incomplete uploads
+
+bkt="incomplete-mp-bkt-1"
+
+mys3cmd mb s3://$bkt
+mys3uploadkill $huge_obj $bkt incomplete-mp-obj-1 $fifo 20
+mys3uploadkill $huge_obj $bkt incomplete-mp-obj-2 $fifo 100
+
+####################################
+# resharded bucket
+
+bkt=resharded-bkt-1
+
+mys3cmd mb s3://$bkt
+
+for f in $(seq 8) ; do
+ dest_obj="reshard-obj-${f}"
+ mys3cmd put -q $big_obj s3://${bkt}/$dest_obj
+done
+
+radosgw-admin bucket reshard --num-shards 3 --bucket=$bkt --yes-i-really-mean-it
+radosgw-admin bucket reshard --num-shards 5 --bucket=$bkt --yes-i-really-mean-it
+
+####################################
+# versioned bucket
+
+if true ;then
+ echo "WARNING: versioned bucket test currently turned off"
+else
+ bkt=versioned-bkt-1
+
+ mys3cmd mb s3://$bkt
+
+ # bucket-enable-versioning $bkt
+
+ for f in $(seq 3) ;do
+ for g in $(seq 10) ;do
+ dest_obj="versioned-obj-${g}"
+ mys3cmd put -q $big_obj s3://${bkt}/$dest_obj
+ done
+ done
+
+ for g in $(seq 1 2 10) ;do
+ dest_obj="versioned-obj-${g}"
+ mys3cmd rm s3://${bkt}/$dest_obj
+ done
+fi
+
+############################################################
+# copy small objects
+
+o_bkt="orig-bkt-1"
+d_bkt="copy-bkt-1"
+mys3cmd mb s3://$o_bkt
+
+for f in $(seq 4) ;do
+ dest_obj="orig-obj-$f"
+ mys3cmd put -q $big_obj s3://${o_bkt}/$dest_obj
+done
+
+mys3cmd mb s3://$d_bkt
+
+mys3cmd cp s3://${o_bkt}/orig-obj-1 s3://${d_bkt}/copied-obj-1
+mys3cmd cp s3://${o_bkt}/orig-obj-3 s3://${d_bkt}/copied-obj-3
+
+for f in $(seq 5 6) ;do
+ dest_obj="orig-obj-$f"
+ mys3cmd put -q $big_obj s3://${d_bkt}/$dest_obj
+done
+
+############################################################
+# copy small objects and delete original
+
+o_bkt="orig-bkt-2"
+d_bkt="copy-bkt-2"
+
+mys3cmd mb s3://$o_bkt
+
+for f in $(seq 4) ;do
+ dest_obj="orig-obj-$f"
+ mys3cmd put -q $big_obj s3://${o_bkt}/$dest_obj
+done
+
+mys3cmd mb s3://$d_bkt
+
+mys3cmd cp s3://${o_bkt}/orig-obj-1 s3://${d_bkt}/copied-obj-1
+mys3cmd cp s3://${o_bkt}/orig-obj-3 s3://${d_bkt}/copied-obj-3
+
+for f in $(seq 5 6) ;do
+ dest_obj="orig-obj-$f"
+ mys3cmd put -q $big_obj s3://${d_bkt}/$dest_obj
+done
+
+mys3cmd rb --recursive s3://${o_bkt}
+
+############################################################
+# copy multipart objects
+
+o_bkt="orig-mp-bkt-3"
+d_bkt="copy-mp-bkt-3"
+
+mys3cmd mb s3://$o_bkt
+
+for f in $(seq 2) ;do
+ dest_obj="orig-multipart-obj-$f"
+ mys3cmd put -q $huge_obj s3://${o_bkt}/$dest_obj
+done
+
+mys3cmd mb s3://$d_bkt
+
+mys3cmd cp s3://${o_bkt}/orig-multipart-obj-1 \
+ s3://${d_bkt}/copied-multipart-obj-1
+
+for f in $(seq 5 5) ;do
+ dest_obj="orig-multipart-obj-$f"
+ mys3cmd put -q $huge_obj s3://${d_bkt}/$dest_obj
+done
+
+
+############################################################
+# copy multipart objects and delete original
+
+o_bkt="orig-mp-bkt-4"
+d_bkt="copy-mp-bkt-4"
+
+mys3cmd mb s3://$o_bkt
+
+for f in $(seq 2) ;do
+ dest_obj="orig-multipart-obj-$f"
+ mys3cmd put -q $huge_obj s3://${o_bkt}/$dest_obj
+done
+
+mys3cmd mb s3://$d_bkt
+
+mys3cmd cp s3://${o_bkt}/orig-multipart-obj-1 \
+ s3://${d_bkt}/copied-multipart-obj-1
+
+for f in $(seq 5 5) ;do
+ dest_obj="orig-multipart-obj-$f"
+ mys3cmd put -q $huge_obj s3://${d_bkt}/$dest_obj
+done
+
+mys3cmd rb --recursive s3://$o_bkt
+
+########################################################################
+# SWIFT TESTS
+
+# 600MB
+segment_size=629145600
+
+############################################################
+# plain test
+
+for f in $(seq 4) ;do
+ myswift upload swift-plain-ctr $big_obj --object-name swift-obj-$f
+done
+
+############################################################
+# zero-len test
+
+myswift upload swift-zerolen-ctr $empty_obj --object-name subdir/
+myswift upload swift-zerolen-ctr $big_obj --object-name subdir/abc1
+myswift upload swift-zerolen-ctr $empty_obj --object-name subdir/empty1
+myswift upload swift-zerolen-ctr $big_obj --object-name subdir/xyz1
+
+############################################################
+# dlo test
+
+# upload in 300MB segments
+myswift upload swift-dlo-ctr $huge_obj --object-name dlo-obj-1 \
+ -S $segment_size
+
+############################################################
+# slo test
+
+# upload in 300MB segments
+myswift upload swift-slo-ctr $huge_obj --object-name slo-obj-1 \
+ -S $segment_size --use-slo
+
+############################################################
+# large object copy test
+
+# upload in 300MB segments
+o_ctr=swift-orig-ctr
+o_obj=slo-orig-obj-1
+d_ctr=swift-copy-ctr
+d_obj=slo-copy-obj-1
+myswift upload $o_ctr $big_obj --object-name $o_obj
+
+myswift copy --destination /${d_ctr}/${d_obj} \
+ $o_ctr $o_obj
+
+myswift delete $o_ctr $o_obj
+
+############################################################
+# huge dlo object copy test
+
+o_ctr=swift-orig-dlo-ctr-1
+o_obj=dlo-orig-dlo-obj-1
+d_ctr=swift-copy-dlo-ctr-1
+d_obj=dlo-copy-dlo-obj-1
+
+myswift upload $o_ctr $huge_obj --object-name $o_obj \
+ -S $segment_size
+
+myswift copy --destination /${d_ctr}/${d_obj} \
+ $o_ctr $o_obj
+
+############################################################
+# huge dlo object copy and orig delete
+
+o_ctr=swift-orig-dlo-ctr-2
+o_obj=dlo-orig-dlo-obj-2
+d_ctr=swift-copy-dlo-ctr-2
+d_obj=dlo-copy-dlo-obj-2
+
+myswift upload $o_ctr $huge_obj --object-name $o_obj \
+ -S $segment_size
+
+myswift copy --destination /${d_ctr}/${d_obj} \
+ $o_ctr $o_obj
+
+myswift delete $o_ctr $o_obj
+
+############################################################
+# huge slo object copy test
+
+o_ctr=swift-orig-slo-ctr-1
+o_obj=slo-orig-slo-obj-1
+d_ctr=swift-copy-slo-ctr-1
+d_obj=slo-copy-slo-obj-1
+myswift upload $o_ctr $huge_obj --object-name $o_obj \
+ -S $segment_size --use-slo
+
+myswift copy --destination /${d_ctr}/${d_obj} $o_ctr $o_obj
+
+############################################################
+# huge slo object copy test and orig delete
+
+o_ctr=swift-orig-slo-ctr-2
+o_obj=slo-orig-slo-obj-2
+d_ctr=swift-copy-slo-ctr-2
+d_obj=slo-copy-slo-obj-2
+myswift upload $o_ctr $huge_obj --object-name $o_obj \
+ -S $segment_size --use-slo
+
+myswift copy --destination /${d_ctr}/${d_obj} $o_ctr $o_obj
+
+myswift delete $o_ctr $o_obj
+
+########################################################################
+# FORCE GARBAGE COLLECTION
+
+sleep 6 # since for testing age at which gc can happen is 5 secs
+radosgw-admin gc process --include-all
+
+
+########################################
+# DO ORPHAN LIST
+
+pool="default.rgw.buckets.data"
+
+rgw-orphan-list $pool
+
+# we only expect there to be one output file, but loop just in case
+ol_error=""
+for f in orphan-list-*.out ; do
+ if [ -s "$f" ] ;then # if file non-empty
+ ol_error="${ol_error}:$f"
+ echo "One ore more orphans found in $f:"
+ cat "$f"
+ fi
+done
+
+if [ -n "$ol_error" ] ;then
+ echo "ERROR: orphans found when none expected"
+ exit 1
+fi
+
+########################################################################
+# CLEAN UP
+
+rm -f $empty_obj $big_obj $huge_obj $s3config
+
+success