summaryrefslogtreecommitdiffstats
path: root/qa/workunits/rgw
diff options
context:
space:
mode:
Diffstat (limited to 'qa/workunits/rgw')
-rw-r--r--qa/workunits/rgw/olh_noname_key1
-rw-r--r--qa/workunits/rgw/olh_noname_valbin0 -> 71 bytes
-rwxr-xr-xqa/workunits/rgw/run-reshard.sh19
-rwxr-xr-xqa/workunits/rgw/run-s3tests.sh41
-rwxr-xr-xqa/workunits/rgw/s3_bucket_quota.pl393
-rwxr-xr-xqa/workunits/rgw/s3_multipart_upload.pl151
-rwxr-xr-xqa/workunits/rgw/s3_user_quota.pl191
-rw-r--r--qa/workunits/rgw/s3_utilities.pm233
-rwxr-xr-xqa/workunits/rgw/test_librgw_file.sh59
-rwxr-xr-xqa/workunits/rgw/test_rgw_gc_log.sh5
-rwxr-xr-xqa/workunits/rgw/test_rgw_obj.sh5
-rwxr-xr-xqa/workunits/rgw/test_rgw_orphan_list.sh519
-rwxr-xr-xqa/workunits/rgw/test_rgw_reshard.py265
-rwxr-xr-xqa/workunits/rgw/test_rgw_throttle.sh5
14 files changed, 1887 insertions, 0 deletions
diff --git a/qa/workunits/rgw/olh_noname_key b/qa/workunits/rgw/olh_noname_key
new file mode 100644
index 000000000..6138c57cd
--- /dev/null
+++ b/qa/workunits/rgw/olh_noname_key
@@ -0,0 +1 @@
+€1001_04/57/0457f727ec113e418d5b16d206b200ed068c0533554883ce811df7c932a3df68/2018_12_11/2889999/3386469/metadata.gz \ No newline at end of file
diff --git a/qa/workunits/rgw/olh_noname_val b/qa/workunits/rgw/olh_noname_val
new file mode 100644
index 000000000..ff442e137
--- /dev/null
+++ b/qa/workunits/rgw/olh_noname_val
Binary files differ
diff --git a/qa/workunits/rgw/run-reshard.sh b/qa/workunits/rgw/run-reshard.sh
new file mode 100755
index 000000000..89ebc4106
--- /dev/null
+++ b/qa/workunits/rgw/run-reshard.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+set -ex
+
+#assume working ceph environment (radosgw-admin in path) and rgw on localhost:80
+# localhost::443 for ssl
+
+mydir=`dirname $0`
+
+python3 -m venv $mydir
+source $mydir/bin/activate
+pip install pip --upgrade
+pip install boto3
+
+## run test
+$mydir/bin/python3 $mydir/test_rgw_reshard.py
+
+deactivate
+echo OK.
+
diff --git a/qa/workunits/rgw/run-s3tests.sh b/qa/workunits/rgw/run-s3tests.sh
new file mode 100755
index 000000000..4f40bbd31
--- /dev/null
+++ b/qa/workunits/rgw/run-s3tests.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+set -ex
+
+# run s3-tests from current directory. assume working
+# ceph environment (radosgw-admin in path) and rgw on localhost:8000
+# (the vstart default).
+
+branch=$1
+[ -z "$1" ] && branch=master
+port=$2
+[ -z "$2" ] && port=8000 # this is vstart's default
+
+##
+
+[ -z "$BUILD_DIR" ] && BUILD_DIR=build
+
+if [ -e CMakeCache.txt ]; then
+ BIN_PATH=$PWD/bin
+elif [ -e $root_path/../${BUILD_DIR}/CMakeCache.txt ]; then
+ cd $root_path/../${BUILD_DIR}
+ BIN_PATH=$PWD/bin
+fi
+PATH=$PATH:$BIN_PATH
+
+dir=tmp.s3-tests.$$
+
+# clone and bootstrap
+mkdir $dir
+cd $dir
+git clone https://github.com/ceph/s3-tests
+cd s3-tests
+git checkout ceph-$branch
+VIRTUALENV_PYTHON=/usr/bin/python3 ./bootstrap
+
+S3TEST_CONF=s3tests.conf.SAMPLE virtualenv/bin/python -m nose -a '!fails_on_rgw,!lifecycle_expiration,!fails_strict_rfc2616' -v
+
+cd ../..
+rm -rf $dir
+
+echo OK.
+
diff --git a/qa/workunits/rgw/s3_bucket_quota.pl b/qa/workunits/rgw/s3_bucket_quota.pl
new file mode 100755
index 000000000..7f5476ef6
--- /dev/null
+++ b/qa/workunits/rgw/s3_bucket_quota.pl
@@ -0,0 +1,393 @@
+#! /usr/bin/perl
+
+=head1 NAME
+
+s3_bucket_quota.pl - Script to test the rgw bucket quota functionality using s3 interface.
+
+=head1 SYNOPSIS
+
+Use:
+ perl s3_bucket_quota.pl [--help]
+
+Examples:
+ perl s3_bucket_quota.pl
+ or
+ perl s3_bucket_quota.pl --help
+
+=head1 DESCRIPTION
+
+This script intends to test the rgw bucket quota funcionality using s3 interface
+and reports the test results
+
+=head1 ARGUMENTS
+
+s3_bucket_quota.pl takes the following arguments:
+ --help
+ (optional) Displays the usage message.
+
+=cut
+
+use Amazon::S3;
+use Data::Dumper;
+#use strict;
+use IO::File;
+use Getopt::Long;
+use Digest::MD5;
+use Pod::Usage();
+use FindBin;
+use lib $FindBin::Bin;
+use s3_utilities;
+use Net::Domain qw(hostfqdn);
+
+my $help;
+
+Getopt::Long::GetOptions(
+ 'help' => \$help
+);
+Pod::Usage::pod2usage(-verbose => 1) && exit if ($help);
+
+#== local variables ===
+our $mytestfilename;
+my $mytestfilename1;
+my $logmsg;
+my $kruft;
+my $s3;
+my $hostdom = $ENV{RGW_FQDN}||hostfqdn();
+my $port = $ENV{RGW_PORT}||80;
+our $hostname = "$hostdom:$port";
+our $testfileloc;
+my $rgw_user = "qa_user";
+
+# Function that deletes the user $rgw_user and write to logfile.
+sub delete_user
+{
+ my $cmd = "$radosgw_admin user rm --uid=$rgw_user";
+ my $cmd_op = get_command_output($cmd);
+ if ($cmd_op !~ /aborting/){
+ print "user $rgw_user deleted\n";
+ } else {
+ print "user $rgw_user NOT deleted\n";
+ return 1;
+ }
+ return 0;
+}
+
+sub quota_set_max_size {
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-size=1048576000`;
+ if ($set_quota !~ /./){
+ print "quota set for the bucket: $bucketname \n";
+ } else {
+ print "quota set failed for the bucket: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_set_max_size_zero {
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-size=0`;
+ if ($set_quota !~ /./){
+ pass ("quota set for the bucket: $bucketname with max size as zero\n");
+ } else {
+ fail ("quota set with max size 0 failed for the bucket: $bucketname \n");
+ }
+ delete_bucket();
+}
+
+sub quota_set_max_objs_zero {
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-objects=0`;
+ if ($set_quota !~ /./){
+ pass ("quota set for the bucket: $bucketname with max objects as zero\n");
+ } else {
+ fail ("quota set with max objects 0 failed for the bucket: $bucketname \n");
+ }
+ delete_bucket();
+}
+
+sub quota_set_neg_size {
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-size=-1`;
+ if ($set_quota !~ /./){
+ pass ("quota set for the bucket: $bucketname with max size -1\n");
+ } else {
+ fail ("quota set failed for the bucket: $bucketname with max size -1 \n");
+ }
+ delete_bucket();
+}
+
+sub quota_set_neg_objs {
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-objects=-1`;
+ if ($set_quota !~ /./){
+ pass ("quota set for the bucket: $bucketname max objects -1 \n");
+ } else {
+ fail ("quota set failed for the bucket: $bucketname \n with max objects -1");
+ }
+ delete_bucket();
+}
+
+sub quota_set_user_objs {
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=bucket`;
+ my $set_quota1 = `$radosgw_admin quota set --bucket=$bucketname --max-objects=1`;
+ if ($set_quota1 !~ /./){
+ print "bucket quota max_objs set for the given user: $bucketname \n";
+ } else {
+ print "bucket quota max_objs set failed for the given user: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_set_user_size {
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=bucket`;
+ my $set_quota1 = `$radosgw_admin quota set --bucket=$bucketname --max-size=1048576000`;
+ if ($set_quota1 !~ /./){
+ print "bucket quota max size set for the given user: $bucketname \n";
+ } else {
+ print "bucket quota max size set failed for the user: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_set_max_obj {
+ # set max objects
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-objects=1`;
+ if ($set_quota !~ /./){
+ print "quota set for the bucket: $bucketname \n";
+ } else {
+ print "quota set failed for the bucket: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_enable {
+ my $en_quota = `$radosgw_admin quota enable --bucket=$bucketname`;
+ if ($en_quota !~ /./){
+ print "quota enabled for the bucket: $bucketname \n";
+ } else {
+ print "quota enable failed for the bucket: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_disable {
+ my $dis_quota = `$radosgw_admin quota disable --bucket=$bucketname`;
+ if ($dis_quota !~ /./){
+ print "quota disabled for the bucket: $bucketname \n";
+ } else {
+ print "quota disable failed for the bucket: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+# upload a file to the bucket
+sub upload_file {
+ print "adding file to bucket: $mytestfilename\n";
+ ($bucket->add_key_filename( $mytestfilename, $testfileloc,
+ { content_type => 'text/plain', },
+ ) and (print "upload file successful\n" ) and return 0 ) or (return 1);
+}
+
+# delete the bucket
+sub delete_bucket {
+ #($bucket->delete_key($mytestfilename1) and print "delete keys on bucket succeeded second time\n" ) or die $s3->err . "delete keys on bucket failed second time\n" . $s3->errstr;
+ ($bucket->delete_bucket) and (print "bucket delete succeeded \n") or die $s3->err . "delete bucket failed\n" . $s3->errstr;
+}
+
+# set bucket quota with max_objects and verify
+sub test_max_objects {
+ my $size = '10Mb';
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_max_obj();
+ quota_enable();
+ my $ret_value = upload_file();
+ if ($ret_value == 0){
+ pass ( "Test max objects passed" );
+ } else {
+ fail ( "Test max objects failed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# Set bucket quota for specific user and ensure max objects set for the user is validated
+sub test_max_objects_per_user{
+ my $size = '10Mb';
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_user_objs();
+ quota_enable();
+ my $ret_value = upload_file();
+ if ($ret_value == 0){
+ pass ( "Test max objects for the given user passed" );
+ } else {
+ fail ( "Test max objects for the given user failed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota with max_objects and try to exceed the max_objects and verify
+sub test_beyond_max_objs {
+ my $size = "10Mb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_max_obj();
+ quota_enable();
+ upload_file();
+ my $ret_value = readd_file();
+ if ($ret_value == 1){
+ pass ( "set max objects and test beyond max objects passed" );
+ } else {
+ fail ( "set max objects and test beyond max objects failed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota for a user with max_objects and try to exceed the max_objects and verify
+sub test_beyond_max_objs_user {
+ my $size = "10Mb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_user_objs();
+ quota_enable();
+ upload_file();
+ my $ret_value = readd_file();
+ if ($ret_value == 1){
+ pass ( "set max objects for a given user and test beyond max objects passed" );
+ } else {
+ fail ( "set max objects for a given user and test beyond max objects failed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota for max size and ensure it is validated
+sub test_quota_size {
+ my $ret_value;
+ my $size = "2Gb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_max_size();
+ quota_enable();
+ my $ret_value = upload_file();
+ if ($ret_value == 1) {
+ pass ( "set max size and ensure that objects upload beyond max size is not entertained" );
+ my $retdel = delete_keys($mytestfilename);
+ if ($retdel == 0) {
+ print "delete objects successful \n";
+ my $size1 = "1Gb";
+ create_file($size1);
+ my $ret_val1 = upload_file();
+ if ($ret_val1 == 0) {
+ pass ( "set max size and ensure that the max size is in effect" );
+ } else {
+ fail ( "set max size and ensure the max size takes effect" );
+ }
+ }
+ } else {
+ fail ( "set max size and ensure that objects beyond max size is not allowed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota for max size for a given user and ensure it is validated
+sub test_quota_size_user {
+ my $ret_value;
+ my $size = "2Gb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_user_size();
+ quota_enable();
+ my $ret_value = upload_file();
+ if ($ret_value == 1) {
+ pass ( "set max size for a given user and ensure that objects upload beyond max size is not entertained" );
+ my $retdel = delete_keys($mytestfilename);
+ if ($retdel == 0) {
+ print "delete objects successful \n";
+ my $size1 = "1Gb";
+ create_file($size1);
+ my $ret_val1 = upload_file();
+ if ($ret_val1 == 0) {
+ pass ( "set max size for a given user and ensure that the max size is in effect" );
+ } else {
+ fail ( "set max size for a given user and ensure the max size takes effect" );
+ }
+ }
+ } else {
+ fail ( "set max size for a given user and ensure that objects beyond max size is not allowed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota size but disable quota and verify
+sub test_quota_size_disabled {
+ my $ret_value;
+ my $size = "2Gb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_max_size();
+ quota_disable();
+ my $ret_value = upload_file();
+ if ($ret_value == 0) {
+ pass ( "bucket quota size doesnt take effect when quota is disabled" );
+ } else {
+ fail ( "bucket quota size doesnt take effect when quota is disabled" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota size for a given user but disable quota and verify
+sub test_quota_size_disabled_user {
+ my $ret_value;
+ my $size = "2Gb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_user_size();
+ quota_disable();
+ my $ret_value = upload_file();
+ if ($ret_value == 0) {
+ pass ( "bucket quota size for a given user doesnt take effect when quota is disabled" );
+ } else {
+ fail ( "bucket quota size for a given user doesnt take effect when quota is disabled" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota for specified user and verify
+
+#== Main starts here===
+ceph_os_info();
+test_max_objects();
+test_max_objects_per_user();
+test_beyond_max_objs();
+test_beyond_max_objs_user();
+quota_set_max_size_zero();
+quota_set_max_objs_zero();
+quota_set_neg_objs();
+quota_set_neg_size();
+test_quota_size();
+test_quota_size_user();
+test_quota_size_disabled();
+test_quota_size_disabled_user();
+
+print "OK";
diff --git a/qa/workunits/rgw/s3_multipart_upload.pl b/qa/workunits/rgw/s3_multipart_upload.pl
new file mode 100755
index 000000000..ab29e6b03
--- /dev/null
+++ b/qa/workunits/rgw/s3_multipart_upload.pl
@@ -0,0 +1,151 @@
+#! /usr/bin/perl
+
+=head1 NAME
+
+s3_multipart_upload.pl - Script to test rgw multipart upload using s3 interface.
+
+=head1 SYNOPSIS
+
+Use:
+ perl s3_multipart_upload.pl [--help]
+
+Examples:
+ perl s3_multipart_upload.pl
+ or
+ perl s3_multipart_upload.pl --help
+
+=head1 DESCRIPTION
+
+This script intends to test the rgw multipart upload followed by a download
+and verify checksum using s3 interface and reports test results
+
+=head1 ARGUMENTS
+
+s3_multipart_upload.pl takes the following arguments:
+ --help
+ (optional) Displays the usage message.
+
+=cut
+
+use Amazon::S3;
+use Data::Dumper;
+use IO::File;
+use Getopt::Long;
+use Digest::MD5;
+use Pod::Usage();
+use FindBin;
+use lib $FindBin::Bin;
+use s3_utilities;
+use Net::Domain qw(hostfqdn);
+
+my $help;
+
+Getopt::Long::GetOptions(
+ 'help' => \$help
+);
+Pod::Usage::pod2usage(-verbose => 1) && exit if ($help);
+
+#== local variables ===
+my $s3;
+my $hostdom = $ENV{RGW_FQDN}||hostfqdn();
+my $port = $ENV{RGW_PORT}||80;
+our $hostname = "$hostdom:$port";
+our $testfileloc;
+our $mytestfilename;
+
+# upload a file to the bucket
+sub upload_file {
+ my ($fsize, $i) = @_;
+ create_file($fsize, $i);
+ print "adding file to bucket $bucketname: $mytestfilename\n";
+ ($bucket->add_key_filename( $mytestfilename, $testfileloc,
+ { content_type => 'text/plain', },
+ ) and (print "upload file successful\n" ) and return 0 ) or (print "upload failed\n" and return 1);
+}
+
+# delete the bucket
+sub delete_bucket {
+ ($bucket->delete_bucket) and (print "bucket delete succeeded \n") or die $s3->err . "delete bucket failed\n" . $s3->errstr;
+}
+
+# Function to perform multipart upload of given file size to the user bucket via s3 interface
+sub multipart_upload
+{
+ my ($size, $parts) = @_;
+ # generate random user every time
+ my $user = rand();
+ # Divide the file size in to equal parts and upload to bucket in multiple parts
+ my $fsize = ($size/$parts);
+ my $fsize1;
+ run_s3($user);
+ if ($parts == 10){
+ $fsize1 = '100Mb';
+ } elsif ($parts == 100){
+ $fsize1 = '10Mb';
+ }
+ foreach my $i(1..$parts){
+ print "uploading file - part $i \n";
+ upload_file($fsize1, $i);
+ }
+ fetch_file_from_bucket($fsize1, $parts);
+ compare_cksum($fsize1, $parts);
+ purge_data($user);
+}
+
+# Function to download the files from bucket to verify there is no data corruption
+sub fetch_file_from_bucket
+{
+ # fetch file from the bucket
+ my ($fsize, $parts) = @_;
+ foreach my $i(1..$parts){
+ my $src_file = "$fsize.$i";
+ my $dest_file = "/tmp/downloadfile.$i";
+ print
+ "Downloading $src_file from bucket to $dest_file \n";
+ $response =
+ $bucket->get_key_filename( $src_file, GET,
+ $dest_file )
+ or die $s3->err . ": " . $s3->errstr;
+ }
+}
+
+# Compare the source file with destination file and verify checksum to ensure
+# the files are not corrupted
+sub compare_cksum
+{
+ my ($fsize, $parts)=@_;
+ my $md5 = Digest::MD5->new;
+ my $flag = 0;
+ foreach my $i (1..$parts){
+ my $src_file = "/tmp/"."$fsize".".$i";
+ my $dest_file = "/tmp/downloadfile".".$i";
+ open( FILE, $src_file )
+ or die "Error: Could not open $src_file for MD5 checksum...";
+ open( DLFILE, $dest_file )
+ or die "Error: Could not open $dest_file for MD5 checksum.";
+ binmode(FILE);
+ binmode(DLFILE);
+ my $md5sum = $md5->addfile(*FILE)->hexdigest;
+ my $md5sumdl = $md5->addfile(*DLFILE)->hexdigest;
+ close FILE;
+ close DLFILE;
+ # compare the checksums
+ if ( $md5sum eq $md5sumdl ) {
+ $flag++;
+ }
+ }
+ if ($flag == $parts){
+ pass("checksum verification for multipart upload passed" );
+ }else{
+ fail("checksum verification for multipart upload failed" );
+ }
+}
+
+#== Main starts here===
+ceph_os_info();
+check();
+# The following test runs multi part upload of file size 1Gb in 10 parts
+multipart_upload('1048576000', 10);
+# The following test runs multipart upload of 1 Gb file in 100 parts
+multipart_upload('1048576000', 100);
+print "OK";
diff --git a/qa/workunits/rgw/s3_user_quota.pl b/qa/workunits/rgw/s3_user_quota.pl
new file mode 100755
index 000000000..6d5c02a9a
--- /dev/null
+++ b/qa/workunits/rgw/s3_user_quota.pl
@@ -0,0 +1,191 @@
+#! /usr/bin/perl
+
+=head1 NAME
+
+s3_user_quota.pl - Script to test the rgw user quota functionality using s3 interface.
+
+=head1 SYNOPSIS
+
+Use:
+ perl s3_user_quota.pl [--help]
+
+Examples:
+ perl s3_user_quota.pl
+ or
+ perl s3_user_quota.pl --help
+
+=head1 DESCRIPTION
+
+This script intends to test the rgw user quota funcionality using s3 interface
+and reports the test results
+
+=head1 ARGUMENTS
+
+s3_user_quota.pl takes the following arguments:
+ --help
+ (optional) Displays the usage message.
+
+=cut
+
+use Amazon::S3;
+use Data::Dumper;
+use IO::File;
+use Getopt::Long;
+use Digest::MD5;
+use Pod::Usage();
+use FindBin;
+use lib $FindBin::Bin;
+use s3_utilities;
+use Net::Domain qw(hostfqdn);
+
+my $help;
+
+Getopt::Long::GetOptions(
+ 'help' => \$help
+);
+Pod::Usage::pod2usage(-verbose => 1) && exit if ($help);
+
+#== local variables ===
+our $mytestfilename;
+my $mytestfilename1;
+my $logmsg;
+my $kruft;
+my $s3;
+my $hostdom = $ENV{RGW_FQDN}||hostfqdn();
+my $port = $ENV{RGW_PORT}||80;
+our $hostname = "$hostdom:$port";
+our $testfileloc;
+our $cnt;
+
+sub quota_set_max_size_per_user {
+ my ($maxsize, $size1,$rgw_user) = @_;
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=user --max-size=$maxsize`;
+ if (($set_quota !~ /./)&&($maxsize == 0)){
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 1){
+ pass("quota set for user: $rgw_user with max_size=$maxsize passed" );
+ }else {
+ fail("quota set for user: $rgw_user with max_size=$maxsize failed" );
+ }
+ } elsif (($set_quota !~ /./) && ($maxsize != 0)) {
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 0){
+ pass("quota set for user: $rgw_user with max_size=$maxsize passed" );
+ }else {
+ fail("quota set for user: $rgw_user with max_size=$maxsize failed" );
+ }
+ }
+ delete_keys($mytestfilename);
+ purge_data($rgw_user);
+ return 0;
+}
+
+sub max_size_per_user {
+ my ($maxsize, $size1,$rgw_user) = @_;
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=user --max-size=$maxsize`;
+ if (($set_quota !~ /./) && ($maxsize != 0)) {
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 0){
+ $cnt++;
+ }
+ }
+ return $cnt;
+}
+
+sub quota_set_max_obj_per_user {
+ # set max objects
+ my ($maxobjs, $size1, $rgw_user) = @_;
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=user --max-objects=$maxobjs`;
+ if (($set_quota !~ /./) && ($maxobjs == 0)){
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 1){
+ pass("quota set for user: $rgw_user with max_objects=$maxobjs passed" );
+ }else {
+ fail("quota set for user: $rgw_user with max_objects=$maxobjs failed" );
+ }
+ } elsif (($set_quota !~ /./) && ($maxobjs == 1)) {
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 0){
+ pass("quota set for user: $rgw_user with max_objects=$maxobjs passed" );
+ }else {
+ fail("quota set for user: $rgw_user with max_objects=$maxobjs failed" );
+ }
+ }
+ delete_keys($mytestfilename);
+ purge_data($rgw_user);
+}
+
+sub quota_enable_user {
+ my ($rgw_user) = @_;
+ my $en_quota = `$radosgw_admin quota enable --uid=$rgw_user --quota-scope=user`;
+ if ($en_quota !~ /./){
+ print "quota enabled for the user $rgw_user \n";
+ } else {
+ print "quota enable failed for the user $rgw_user \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_disable_user {
+ my $dis_quota = `$radosgw_admin quota disable --uid=$rgw_user --quota-scope=user`;
+ if ($dis_quota !~ /./){
+ print "quota disabled for the user $rgw_user \n";
+ } else {
+ print "quota disable failed for the user $rgw_user \n";
+ exit 1;
+ }
+ return 0;
+}
+
+# upload a file to the bucket
+sub upload_file {
+ print "adding file to bucket $bucketname: $mytestfilename\n";
+ ($bucket->add_key_filename( $mytestfilename, $testfileloc,
+ { content_type => 'text/plain', },
+ ) and (print "upload file successful\n" ) and return 0 ) or (return 1);
+}
+
+# delete the bucket
+sub delete_bucket {
+ ($bucket->delete_bucket) and (print "bucket delete succeeded \n") or die $s3->err . "delete bucket failed\n" . $s3->errstr;
+}
+
+#Function to upload the given file size to bucket and verify
+sub test_max_objs {
+ my ($size, $rgw_user) = @_;
+ create_file($size);
+ quota_enable_user($rgw_user);
+ my $ret_value = upload_file();
+ return $ret_value;
+}
+
+# set user quota and ensure it is validated
+sub test_user_quota_max_size{
+ my ($max_buckets,$size, $fsize) = @_;
+ my $usr = rand();
+ foreach my $i (1..$max_buckets){
+ my $ret_value = max_size_per_user($size, $fsize, $usr );
+ }
+ if ($ret_value == $max_buckets){
+ fail( "user quota max size for $usr failed on $max_buckets buckets" );
+ } else {
+ pass( "user quota max size for $usr passed on $max_buckets buckets" );
+ }
+ delete_keys($mytestfilename);
+ purge_data($usr);
+}
+
+#== Main starts here===
+ceph_os_info();
+check();
+quota_set_max_obj_per_user('0', '10Mb', 'usr1');
+quota_set_max_obj_per_user('1', '10Mb', 'usr2');
+quota_set_max_size_per_user(0, '10Mb', 'usr1');
+quota_set_max_size_per_user(1048576000, '1Gb', 'usr2');
+test_user_quota_max_size(3,1048576000,'100Mb');
+test_user_quota_max_size(2,1048576000, '1Gb');
+print "OK";
diff --git a/qa/workunits/rgw/s3_utilities.pm b/qa/workunits/rgw/s3_utilities.pm
new file mode 100644
index 000000000..3c3fae900
--- /dev/null
+++ b/qa/workunits/rgw/s3_utilities.pm
@@ -0,0 +1,233 @@
+# Common subroutines shared by the s3 testing code
+my $sec;
+my $min;
+my $hour;
+my $mon;
+my $year;
+my $mday;
+my $wday;
+my $yday;
+my $isdst;
+my $PASS_CNT = 0;
+my $FAIL_CNT = 0;
+
+our $radosgw_admin = $ENV{RGW_ADMIN}||"sudo radosgw-admin";
+
+# function to get the current time stamp from the test set up
+sub get_timestamp {
+ ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time);
+ if ($mon < 10) { $mon = "0$mon"; }
+ if ($hour < 10) { $hour = "0$hour"; }
+ if ($min < 10) { $min = "0$min"; }
+ if ($sec < 10) { $sec = "0$sec"; }
+ $year=$year+1900;
+ return $year . '_' . $mon . '_' . $mday . '_' . $hour . '_' . $min . '_' . $sec;
+}
+
+# Function to check if radosgw is already running
+sub get_status {
+ my $service = "radosgw";
+ my $cmd = "pgrep $service";
+ my $status = get_cmd_op($cmd);
+ if ($status =~ /\d+/ ){
+ return 0;
+ }
+ return 1;
+}
+
+# function to execute the command and return output
+sub get_cmd_op
+{
+ my $cmd = shift;
+ my $excmd = `$cmd`;
+ return $excmd;
+}
+
+#Function that executes the CLI commands and returns the output of the command
+sub get_command_output {
+ my $cmd_output = shift;
+ open( FH, ">>$test_log" );
+ print FH "\"$cmd_output\"\n";
+ my $exec_cmd = `$cmd_output 2>&1`;
+ print FH "$exec_cmd\n";
+ close(FH);
+ return $exec_cmd;
+}
+
+# Function to get the hostname
+sub get_hostname
+{
+ my $cmd = "hostname";
+ my $get_host = get_command_output($cmd);
+ chomp($get_host);
+ return($get_host);
+}
+
+sub pass {
+ my ($comment) = @_;
+ print "Comment required." unless length $comment;
+ chomp $comment;
+ print_border2();
+ print "Test case: $TC_CNT PASSED - $comment \n";
+ print_border2();
+ $PASS_CNT++;
+}
+
+sub fail {
+ my ($comment) = @_;
+ print "Comment required." unless length $comment;
+ chomp $comment;
+ print_border2();
+ print "Test case: $TC_CNT FAILED - $comment \n";
+ print_border2();
+ $FAIL_CNT++;
+}
+
+sub print_border2 {
+ print "~" x 90 . "\n";
+}
+
+# Function to create the user "qa_user" and extract the user access_key and secret_key of the user
+sub get_user_info
+{
+ my ($rgw_user) = @_;
+ my $cmd = "$radosgw_admin user create --uid=$rgw_user --display-name=$rgw_user";
+ my $cmd_op = get_command_output($cmd);
+ if ($cmd_op !~ /keys/){
+ return (0,0);
+ }
+ my @get_user = (split/\n/,$cmd_op);
+ foreach (@get_user) {
+ if ($_ =~ /access_key/ ){
+ $get_acc_key = $_;
+ } elsif ($_ =~ /secret_key/ ){
+ $get_sec_key = $_;
+ }
+ }
+ my $access_key = $get_acc_key;
+ my $acc_key = (split /:/, $access_key)[1];
+ $acc_key =~ s/\\//g;
+ $acc_key =~ s/ //g;
+ $acc_key =~ s/"//g;
+ $acc_key =~ s/,//g;
+ my $secret_key = $get_sec_key;
+ my $sec_key = (split /:/, $secret_key)[1];
+ $sec_key =~ s/\\//g;
+ $sec_key =~ s/ //g;
+ $sec_key =~ s/"//g;
+ $sec_key =~ s/,//g;
+ return ($acc_key, $sec_key);
+}
+
+# Function that deletes the given user and all associated user data
+sub purge_data
+{
+ my ($rgw_user) = @_;
+ my $cmd = "$radosgw_admin user rm --uid=$rgw_user --purge-data";
+ my $cmd_op = get_command_output($cmd);
+ if ($cmd_op !~ /./){
+ print "user $rgw_user deleted\n";
+ } else {
+ print "user $rgw_user NOT deleted\n";
+ return 1;
+ }
+ return 0;
+}
+
+# Read PRETTY_NAME from /etc/os-release
+sub os_pretty_name
+{
+ open(FH, '<', '/etc/os-release') or die $!;
+ while (my $line = <FH>) {
+ chomp $line;
+ if ($line =~ /^\s*PRETTY_NAME=\"?([^"]*)\"?/) {
+ return $1;
+ }
+ }
+ close(FH);
+}
+
+
+# Function to get the Ceph and distro info
+sub ceph_os_info
+{
+ my $ceph_v = get_command_output ( "ceph -v" );
+ my @ceph_arr = split(" ",$ceph_v);
+ $ceph_v = "Ceph Version: $ceph_arr[2]";
+ my $os_distro = os_pretty_name();
+ $os_distro = "Linux Flavor:$os_distro";
+ return ($ceph_v, $os_distro);
+}
+
+# Execute the test case based on the input to the script
+sub create_file {
+ my ($file_size, $part) = @_;
+ my $cnt;
+ $mytestfilename = "$file_size.$part";
+ $testfileloc = "/tmp/".$mytestfilename;
+ if ($file_size == '10Mb'){
+ $cnt = 1;
+ } elsif ($file_size == '100Mb'){
+ $cnt = 10;
+ } elsif ($file_size == '500Mb'){
+ $cnt = 50;
+ } elsif ($file_size == '1Gb'){
+ $cnt = 100;
+ } elsif ($file_size == '2Gb'){
+ $cnt = 200;
+ }
+ my $ret = system("dd if=/dev/zero of=$testfileloc bs=10485760 count=$cnt");
+ if ($ret) { exit 1 };
+ return 0;
+}
+
+sub run_s3
+{
+# Run tests for the S3 functionality
+ # Modify access key and secret key to suit the user account
+ my ($user) = @_;
+ our ( $access_key, $secret_key ) = get_user_info($user);
+ if ( ($access_key) && ($secret_key) ) {
+ $s3 = Amazon::S3->new(
+ {
+ aws_access_key_id => $access_key,
+ aws_secret_access_key => $secret_key,
+ host => $hostname,
+ secure => 0,
+ retry => 1,
+ }
+ );
+ }
+
+our $bucketname = 'buck_'.get_timestamp();
+# create a new bucket (the test bucket)
+our $bucket = $s3->add_bucket( { bucket => $bucketname } )
+ or die $s3->err. "bucket $bucketname create failed\n". $s3->errstr;
+ print "Bucket Created: $bucketname \n";
+ return 0;
+}
+
+# delete keys
+sub delete_keys {
+ (($bucket->delete_key($_[0])) and return 0) or return 1;
+}
+
+# Read the file back to bucket
+sub readd_file {
+ system("dd if=/dev/zero of=/tmp/10MBfile1 bs=10485760 count=1");
+ $mytestfilename1 = '10MBfile1';
+ print "readding file to bucket: $mytestfilename1\n";
+ ((($bucket->add_key_filename( $mytestfilename1, $testfileloc,
+ { content_type => 'text/plain', },
+ )) and (print "readding file success\n") and return 0) or (return 1));
+}
+
+# check if rgw service is already running
+sub check
+{
+ my $state = get_status();
+ if ($state) {
+ exit 1;
+ }
+}
+1
diff --git a/qa/workunits/rgw/test_librgw_file.sh b/qa/workunits/rgw/test_librgw_file.sh
new file mode 100755
index 000000000..1371ff711
--- /dev/null
+++ b/qa/workunits/rgw/test_librgw_file.sh
@@ -0,0 +1,59 @@
+#!/bin/sh -e
+
+
+if [ -z ${AWS_ACCESS_KEY_ID} ]
+then
+ export AWS_ACCESS_KEY_ID=`openssl rand -base64 20`
+ export AWS_SECRET_ACCESS_KEY=`openssl rand -base64 40`
+
+ radosgw-admin user create --uid ceph-test-librgw-file \
+ --access-key $AWS_ACCESS_KEY_ID \
+ --secret $AWS_SECRET_ACCESS_KEY \
+ --display-name "librgw test user" \
+ --email librgw@example.com || echo "librgw user exists"
+
+ # keyring override for teuthology env
+ KEYRING="/etc/ceph/ceph.keyring"
+ K="-k ${KEYRING}"
+fi
+
+# nfsns is the main suite
+
+# create herarchy, and then list it
+echo "phase 1.1"
+ceph_test_librgw_file_nfsns ${K} --hier1 --dirs1 --create --rename --verbose
+
+# the older librgw_file can consume the namespace
+echo "phase 1.2"
+ceph_test_librgw_file_nfsns ${K} --getattr --verbose
+
+# and delete the hierarchy
+echo "phase 1.3"
+ceph_test_librgw_file_nfsns ${K} --hier1 --dirs1 --delete --verbose
+
+# bulk create/delete buckets
+echo "phase 2.1"
+ceph_test_librgw_file_cd ${K} --create --multi --verbose
+echo "phase 2.2"
+ceph_test_librgw_file_cd ${K} --delete --multi --verbose
+
+# write continuation test
+echo "phase 3.1"
+ceph_test_librgw_file_aw ${K} --create --large --verify
+echo "phase 3.2"
+ceph_test_librgw_file_aw ${K} --delete --large
+
+# continued readdir
+echo "phase 4.1"
+ceph_test_librgw_file_marker ${K} --create --marker1 --marker2 --nobjs=100 --verbose
+echo "phase 4.2"
+ceph_test_librgw_file_marker ${K} --delete --verbose
+
+# advanced i/o--but skip readv/writev for now--split delete from
+# create and stat ops to avoid fault in sysobject cache
+echo "phase 5.1"
+ceph_test_librgw_file_gp ${K} --get --stat --put --create
+echo "phase 5.2"
+ceph_test_librgw_file_gp ${K} --delete
+
+exit 0
diff --git a/qa/workunits/rgw/test_rgw_gc_log.sh b/qa/workunits/rgw/test_rgw_gc_log.sh
new file mode 100755
index 000000000..ab4015aef
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_gc_log.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -e
+
+ceph_test_rgw_gc_log
+
+exit 0
diff --git a/qa/workunits/rgw/test_rgw_obj.sh b/qa/workunits/rgw/test_rgw_obj.sh
new file mode 100755
index 000000000..01dd2b5ee
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_obj.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -e
+
+ceph_test_rgw_obj
+
+exit 0
diff --git a/qa/workunits/rgw/test_rgw_orphan_list.sh b/qa/workunits/rgw/test_rgw_orphan_list.sh
new file mode 100755
index 000000000..34d550cea
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_orphan_list.sh
@@ -0,0 +1,519 @@
+#!/usr/bin/env bash
+
+# set -x
+set -e
+
+# if defined, debug messages will be displayed and prepended with the string
+# debug="DEBUG"
+
+huge_size=5100 # in megabytes
+big_size=7 # in megabytes
+
+huge_obj=/tmp/huge_obj.temp.$$
+big_obj=/tmp/big_obj.temp.$$
+empty_obj=/tmp/empty_obj.temp.$$
+
+fifo=/tmp/orphan-fifo.$$
+awscli_dir=${HOME}/awscli_temp
+export PATH=${PATH}:${awscli_dir}
+
+rgw_host=$(hostname --fqdn)
+if echo "$rgw_host" | grep -q '\.' ; then
+ :
+else
+ host_domain=".front.sepia.ceph.com"
+ echo "WARNING: rgw hostname -- $rgw_host -- does not appear to be fully qualified; PUNTING and appending $host_domain"
+ rgw_host="${rgw_host}${host_domain}"
+fi
+rgw_port=80
+
+echo "Fully Qualified Domain Name: $rgw_host"
+
+success() {
+ echo OK.
+ exit 0
+}
+
+########################################################################
+# INSTALL AND CONFIGURE TOOLING
+
+install_awscli() {
+ # NB: this does verify authenticity and integrity of downloaded
+ # file; see
+ # https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html
+ here="$(pwd)"
+ cd "$HOME"
+ curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
+ unzip awscliv2.zip
+ mkdir -p $awscli_dir
+ ./aws/install -i $awscli_dir
+ cd "$here"
+}
+
+uninstall_awscli() {
+ here="$(pwd)"
+ cd "$HOME"
+ rm -rf $awscli_dir ./aws awscliv2.zip
+ cd "$here"
+}
+
+sudo yum -y install s3cmd
+sudo yum -y install python3-setuptools
+sudo yum -y install python3-pip
+sudo pip3 install --upgrade setuptools
+sudo pip3 install python-swiftclient
+
+# get ready for transition from s3cmd to awscli
+if false ;then
+ install_awscli
+ aws --version
+ uninstall_awscli
+fi
+
+s3config=/tmp/s3config.$$
+
+# do not include the port when it is 80; the host base is used in the
+# v4 signature and it needs to follow this convention for signatures
+# to match
+if [ "$rgw_port" -ne 80 ] ;then
+ s3_host_base="${rgw_host}:${rgw_port}"
+else
+ s3_host_base="$rgw_host"
+fi
+
+cat >${s3config} <<EOF
+[default]
+host_base = $s3_host_base
+access_key = 0555b35654ad1656d804
+secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
+bucket_location = us-east-1
+check_ssl_certificate = True
+check_ssl_hostname = True
+default_mime_type = binary/octet-stream
+delete_removed = False
+dry_run = False
+enable_multipart = True
+encoding = UTF-8
+encrypt = False
+follow_symlinks = False
+force = False
+guess_mime_type = True
+host_bucket = anything.with.three.dots
+multipart_chunk_size_mb = 15
+multipart_max_chunks = 10000
+recursive = False
+recv_chunk = 65536
+send_chunk = 65536
+signature_v2 = False
+socket_timeout = 300
+use_https = False
+use_mime_magic = True
+verbosity = WARNING
+EOF
+
+
+# set up swift authentication
+export ST_AUTH=http://${rgw_host}:${rgw_port}/auth/v1.0
+export ST_USER=test:tester
+export ST_KEY=testing
+
+create_users() {
+ # Create S3 user
+ local akey='0555b35654ad1656d804'
+ local skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+ radosgw-admin user create --uid testid \
+ --access-key $akey --secret $skey \
+ --display-name 'M. Tester' --email tester@ceph.com
+
+ # Create Swift user
+ radosgw-admin user create --subuser=test:tester \
+ --display-name=Tester-Subuser --key-type=swift \
+ --secret=testing --access=full
+}
+
+myswift() {
+ if [ -n "$debug" ] ;then
+ echo "${debug}: swift --verbose --debug $@"
+ fi
+ swift --verbose --debug "$@"
+ local code=$?
+ if [ $code -ne 0 ] ;then
+ echo "ERROR: code = $code ; command = s3cmd --config=${s3config} --verbose --debug "$@""
+ exit $code
+ fi
+}
+
+mys3cmd() {
+ if [ -n "$debug" ] ;then
+ echo "${debug}: s3cmd --config=${s3config} --verbose --debug $@"
+ fi
+ s3cmd --config=${s3config} --verbose --debug "$@"
+ local code=$?
+ if [ $code -ne 0 ] ;then
+ echo "ERROR: code = $code ; command = s3cmd --config=${s3config} --verbose --debug "$@""
+ exit $code
+ fi
+}
+
+mys3uploadkill() {
+ if [ $# -ne 5 ] ;then
+ echo "$0: error expecting 5 arguments"
+ exit 1
+ fi
+
+ local_file="$1"
+ remote_bkt="$2"
+ remote_obj="$3"
+ fifo="$4"
+ stop_part="$5"
+
+ mkfifo $fifo
+
+ s3cmd --config=${s3config} put $local_file \
+ s3://${remote_bkt}/${remote_obj} \
+ --progress \
+ --multipart-chunk-size-mb=5 >$fifo &
+ set +e # don't allow errors to stop script
+ while read line ;do
+ echo "$line" | grep --quiet "part $stop_part "
+ if [ ${PIPESTATUS[1]} -eq 0 ] ;then
+ kill -9 $(jobs -p)
+ break
+ fi
+ done <$fifo
+ set -e
+
+ rm -f $fifo
+}
+
+mys3upload() {
+ obj=$1
+ bucket=$2
+ dest_obj=$3
+
+ mys3cmd put -q $obj s3://${bucket}/$dest_obj
+}
+
+########################################################################
+# PREP
+
+create_users
+dd if=/dev/urandom of=$big_obj bs=1M count=${big_size}
+dd if=/dev/urandom of=$huge_obj bs=1M count=${huge_size}
+touch $empty_obj
+
+quick_tests() {
+ echo TRY A SWIFT COMMAND
+ myswift upload swift-plain-ctr $big_obj --object-name swift-obj-test
+ myswift list
+ myswift list swift-plain-ctr
+
+ echo TRY A RADOSGW-ADMIN COMMAND
+ radosgw-admin bucket list # make sure rgw is up and running
+}
+
+########################################################################
+# S3 TESTS
+
+####################################
+# regular multipart test
+
+mys3cmd mb s3://multipart-bkt
+mys3upload $huge_obj multipart-bkt multipart-obj
+mys3cmd ls
+mys3cmd ls s3://multipart-bkt
+
+####################################
+# multipart test with incomplete uploads
+
+bkt="incomplete-mp-bkt-1"
+
+mys3cmd mb s3://$bkt
+
+mys3uploadkill $huge_obj $bkt incomplete-mp-obj-c $fifo 20
+
+# generate an incomplete multipart with more than 1,000 parts
+mys3uploadkill $huge_obj $bkt incomplete-mp-obj-b $fifo 1005
+
+# generate more than 1000 incomplet multiparts
+for c in $(seq 1005) ;do
+ mys3uploadkill $huge_obj $bkt incomplete-mp-obj-c-$c $fifo 3
+done
+
+####################################
+# resharded bucket
+
+bkt=resharded-bkt-1
+
+mys3cmd mb s3://$bkt
+
+for f in $(seq 8) ; do
+ dest_obj="reshard-obj-${f}"
+ mys3cmd put -q $big_obj s3://${bkt}/$dest_obj
+done
+
+radosgw-admin bucket reshard --num-shards 3 --bucket=$bkt --yes-i-really-mean-it
+radosgw-admin bucket reshard --num-shards 5 --bucket=$bkt --yes-i-really-mean-it
+
+####################################
+# versioned bucket
+
+if true ;then
+ echo "WARNING: versioned bucket test currently turned off"
+else
+ bkt=versioned-bkt-1
+
+ mys3cmd mb s3://$bkt
+
+ # bucket-enable-versioning $bkt
+
+ for f in $(seq 3) ;do
+ for g in $(seq 10) ;do
+ dest_obj="versioned-obj-${g}"
+ mys3cmd put -q $big_obj s3://${bkt}/$dest_obj
+ done
+ done
+
+ for g in $(seq 1 2 10) ;do
+ dest_obj="versioned-obj-${g}"
+ mys3cmd rm s3://${bkt}/$dest_obj
+ done
+fi
+
+############################################################
+# copy small objects
+
+o_bkt="orig-bkt-1"
+d_bkt="copy-bkt-1"
+mys3cmd mb s3://$o_bkt
+
+for f in $(seq 4) ;do
+ dest_obj="orig-obj-$f"
+ mys3cmd put -q $big_obj s3://${o_bkt}/$dest_obj
+done
+
+mys3cmd mb s3://$d_bkt
+
+mys3cmd cp s3://${o_bkt}/orig-obj-1 s3://${d_bkt}/copied-obj-1
+mys3cmd cp s3://${o_bkt}/orig-obj-3 s3://${d_bkt}/copied-obj-3
+
+for f in $(seq 5 6) ;do
+ dest_obj="orig-obj-$f"
+ mys3cmd put -q $big_obj s3://${d_bkt}/$dest_obj
+done
+
+############################################################
+# copy small objects and delete original
+
+o_bkt="orig-bkt-2"
+d_bkt="copy-bkt-2"
+
+mys3cmd mb s3://$o_bkt
+
+for f in $(seq 4) ;do
+ dest_obj="orig-obj-$f"
+ mys3cmd put -q $big_obj s3://${o_bkt}/$dest_obj
+done
+
+mys3cmd mb s3://$d_bkt
+
+mys3cmd cp s3://${o_bkt}/orig-obj-1 s3://${d_bkt}/copied-obj-1
+mys3cmd cp s3://${o_bkt}/orig-obj-3 s3://${d_bkt}/copied-obj-3
+
+for f in $(seq 5 6) ;do
+ dest_obj="orig-obj-$f"
+ mys3cmd put -q $big_obj s3://${d_bkt}/$dest_obj
+done
+
+mys3cmd rb --recursive s3://${o_bkt}
+
+############################################################
+# copy multipart objects
+
+o_bkt="orig-mp-bkt-3"
+d_bkt="copy-mp-bkt-3"
+
+mys3cmd mb s3://$o_bkt
+
+for f in $(seq 2) ;do
+ dest_obj="orig-multipart-obj-$f"
+ mys3cmd put -q $huge_obj s3://${o_bkt}/$dest_obj
+done
+
+mys3cmd mb s3://$d_bkt
+
+mys3cmd cp s3://${o_bkt}/orig-multipart-obj-1 \
+ s3://${d_bkt}/copied-multipart-obj-1
+
+for f in $(seq 5 5) ;do
+ dest_obj="orig-multipart-obj-$f"
+ mys3cmd put -q $huge_obj s3://${d_bkt}/$dest_obj
+done
+
+
+############################################################
+# copy multipart objects and delete original
+
+o_bkt="orig-mp-bkt-4"
+d_bkt="copy-mp-bkt-4"
+
+mys3cmd mb s3://$o_bkt
+
+for f in $(seq 2) ;do
+ dest_obj="orig-multipart-obj-$f"
+ mys3cmd put -q $huge_obj s3://${o_bkt}/$dest_obj
+done
+
+mys3cmd mb s3://$d_bkt
+
+mys3cmd cp s3://${o_bkt}/orig-multipart-obj-1 \
+ s3://${d_bkt}/copied-multipart-obj-1
+
+for f in $(seq 5 5) ;do
+ dest_obj="orig-multipart-obj-$f"
+ mys3cmd put -q $huge_obj s3://${d_bkt}/$dest_obj
+done
+
+mys3cmd rb --recursive s3://$o_bkt
+
+########################################################################
+# SWIFT TESTS
+
+# 600MB
+segment_size=629145600
+
+############################################################
+# plain test
+
+for f in $(seq 4) ;do
+ myswift upload swift-plain-ctr $big_obj --object-name swift-obj-$f
+done
+
+############################################################
+# zero-len test
+
+myswift upload swift-zerolen-ctr $empty_obj --object-name subdir/
+myswift upload swift-zerolen-ctr $big_obj --object-name subdir/abc1
+myswift upload swift-zerolen-ctr $empty_obj --object-name subdir/empty1
+myswift upload swift-zerolen-ctr $big_obj --object-name subdir/xyz1
+
+############################################################
+# dlo test
+
+# upload in 300MB segments
+myswift upload swift-dlo-ctr $huge_obj --object-name dlo-obj-1 \
+ -S $segment_size
+
+############################################################
+# slo test
+
+# upload in 300MB segments
+myswift upload swift-slo-ctr $huge_obj --object-name slo-obj-1 \
+ -S $segment_size --use-slo
+
+############################################################
+# large object copy test
+
+# upload in 300MB segments
+o_ctr=swift-orig-ctr
+o_obj=slo-orig-obj-1
+d_ctr=swift-copy-ctr
+d_obj=slo-copy-obj-1
+myswift upload $o_ctr $big_obj --object-name $o_obj
+
+myswift copy --destination /${d_ctr}/${d_obj} \
+ $o_ctr $o_obj
+
+myswift delete $o_ctr $o_obj
+
+############################################################
+# huge dlo object copy test
+
+o_ctr=swift-orig-dlo-ctr-1
+o_obj=dlo-orig-dlo-obj-1
+d_ctr=swift-copy-dlo-ctr-1
+d_obj=dlo-copy-dlo-obj-1
+
+myswift upload $o_ctr $huge_obj --object-name $o_obj \
+ -S $segment_size
+
+myswift copy --destination /${d_ctr}/${d_obj} \
+ $o_ctr $o_obj
+
+############################################################
+# huge dlo object copy and orig delete
+
+o_ctr=swift-orig-dlo-ctr-2
+o_obj=dlo-orig-dlo-obj-2
+d_ctr=swift-copy-dlo-ctr-2
+d_obj=dlo-copy-dlo-obj-2
+
+myswift upload $o_ctr $huge_obj --object-name $o_obj \
+ -S $segment_size
+
+myswift copy --destination /${d_ctr}/${d_obj} \
+ $o_ctr $o_obj
+
+myswift delete $o_ctr $o_obj
+
+############################################################
+# huge slo object copy test
+
+o_ctr=swift-orig-slo-ctr-1
+o_obj=slo-orig-slo-obj-1
+d_ctr=swift-copy-slo-ctr-1
+d_obj=slo-copy-slo-obj-1
+myswift upload $o_ctr $huge_obj --object-name $o_obj \
+ -S $segment_size --use-slo
+
+myswift copy --destination /${d_ctr}/${d_obj} $o_ctr $o_obj
+
+############################################################
+# huge slo object copy test and orig delete
+
+o_ctr=swift-orig-slo-ctr-2
+o_obj=slo-orig-slo-obj-2
+d_ctr=swift-copy-slo-ctr-2
+d_obj=slo-copy-slo-obj-2
+myswift upload $o_ctr $huge_obj --object-name $o_obj \
+ -S $segment_size --use-slo
+
+myswift copy --destination /${d_ctr}/${d_obj} $o_ctr $o_obj
+
+myswift delete $o_ctr $o_obj
+
+########################################################################
+# FORCE GARBAGE COLLECTION
+
+sleep 6 # since for testing age at which gc can happen is 5 secs
+radosgw-admin gc process --include-all
+
+
+########################################
+# DO ORPHAN LIST
+
+pool="default.rgw.buckets.data"
+
+rgw-orphan-list $pool
+
+# we only expect there to be one output file, but loop just in case
+ol_error=""
+for f in orphan-list-*.out ; do
+ if [ -s "$f" ] ;then # if file non-empty
+ ol_error="${ol_error}:$f"
+ echo "One ore more orphans found in $f:"
+ cat "$f"
+ fi
+done
+
+if [ -n "$ol_error" ] ;then
+ echo "ERROR: orphans found when none expected"
+ exit 1
+fi
+
+########################################################################
+# CLEAN UP
+
+rm -f $empty_obj $big_obj $huge_obj $s3config
+
+success
diff --git a/qa/workunits/rgw/test_rgw_reshard.py b/qa/workunits/rgw/test_rgw_reshard.py
new file mode 100755
index 000000000..400994e47
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_reshard.py
@@ -0,0 +1,265 @@
+#!/usr/bin/python3
+
+import logging as log
+import time
+import subprocess
+import json
+import boto3
+import botocore.exceptions
+import os
+
+"""
+Rgw manual and dynamic resharding testing against a running instance
+"""
+# The test cases in this file have been annotated for inventory.
+# To extract the inventory (in csv format) use the command:
+#
+# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
+#
+#
+
+log.basicConfig(level=log.DEBUG)
+log.getLogger('botocore').setLevel(log.CRITICAL)
+log.getLogger('boto3').setLevel(log.CRITICAL)
+log.getLogger('urllib3').setLevel(log.CRITICAL)
+
+""" Constants """
+USER = 'tester'
+DISPLAY_NAME = 'Testing'
+ACCESS_KEY = 'NX5QOQKC6BH2IDN8HC7A'
+SECRET_KEY = 'LnEsqNNqZIpkzauboDcLXLcYaWwLQ3Kop0zAnKIn'
+BUCKET_NAME1 = 'myfoo'
+BUCKET_NAME2 = 'mybar'
+VER_BUCKET_NAME = 'myver'
+INDEX_POOL = 'default.rgw.buckets.index'
+
+
+def exec_cmd(cmd):
+ try:
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
+ out, err = proc.communicate()
+ if proc.returncode == 0:
+ log.info('command succeeded')
+ if out is not None: log.info(out)
+ return out
+ else:
+ raise Exception("error: %s \nreturncode: %s" % (err, proc.returncode))
+ except Exception as e:
+ log.error('command failed')
+ log.error(e)
+ return False
+
+
+class BucketStats:
+ def __init__(self, bucket_name, bucket_id, num_objs=0, size_kb=0, num_shards=0):
+ self.bucket_name = bucket_name
+ self.bucket_id = bucket_id
+ self.num_objs = num_objs
+ self.size_kb = size_kb
+ self.num_shards = num_shards if num_shards > 0 else 1
+
+ def get_num_shards(self):
+ self.num_shards = get_bucket_num_shards(self.bucket_name, self.bucket_id)
+
+
+def get_bucket_stats(bucket_name):
+ """
+ function to get bucket stats
+ """
+ cmd = exec_cmd("radosgw-admin bucket stats --bucket %s" % bucket_name)
+ json_op = json.loads(cmd)
+ bucket_id = json_op['id']
+ num_shards_op = json_op['num_shards']
+ if len(json_op['usage']) > 0:
+ num_objects = json_op['usage']['rgw.main']['num_objects']
+ size_kb = json_op['usage']['rgw.main']['size_kb']
+ else:
+ num_objects = 0
+ size_kb = 0
+ log.debug("bucket %s id %s num_objects %d size_kb %d num_shards %d", bucket_name, bucket_id,
+ num_objects, size_kb, num_shards_op)
+ return BucketStats(bucket_name, bucket_id, num_objects, size_kb, num_shards_op)
+
+
+def get_bucket_num_shards(bucket_name, bucket_id):
+ """
+ function to get bucket num shards
+ """
+ metadata = 'bucket.instance:' + bucket_name + ':' + bucket_id
+ log.debug("metadata %s", metadata)
+ cmd = exec_cmd('radosgw-admin metadata get %s' % metadata)
+ json_op = json.loads(cmd)
+ num_shards = json_op['data']['bucket_info']['num_shards']
+ log.debug("bucket %s id %s num_shards %d", bucket_name, bucket_id, num_shards)
+ return num_shards
+
+
+def main():
+ """
+ execute manual and dynamic resharding commands
+ """
+ # create user
+ exec_cmd('radosgw-admin user create --uid %s --display-name %s --access-key %s --secret %s'
+ % (USER, DISPLAY_NAME, ACCESS_KEY, SECRET_KEY))
+
+ def boto_connect(portnum, ssl, proto):
+ endpoint = proto + '://localhost:' + portnum
+ conn = boto3.resource('s3',
+ aws_access_key_id=ACCESS_KEY,
+ aws_secret_access_key=SECRET_KEY,
+ use_ssl=ssl,
+ endpoint_url=endpoint,
+ verify=False,
+ config=None,
+ )
+ try:
+ list(conn.buckets.limit(1)) # just verify we can list buckets
+ except botocore.exceptions.ConnectionError as e:
+ print(e)
+ raise
+ print('connected to', endpoint)
+ return conn
+
+ try:
+ connection = boto_connect('80', False, 'http')
+ except botocore.exceptions.ConnectionError:
+ try: # retry on non-privileged http port
+ connection = boto_connect('8000', False, 'http')
+ except botocore.exceptions.ConnectionError:
+ # retry with ssl
+ connection = boto_connect('443', True, 'https')
+
+ # create a bucket
+ bucket1 = connection.create_bucket(Bucket=BUCKET_NAME1)
+ bucket2 = connection.create_bucket(Bucket=BUCKET_NAME2)
+ ver_bucket = connection.create_bucket(Bucket=VER_BUCKET_NAME)
+ connection.BucketVersioning('ver_bucket')
+
+ bucket_stats1 = get_bucket_stats(BUCKET_NAME1)
+ bucket_stats2 = get_bucket_stats(BUCKET_NAME2)
+ ver_bucket_stats = get_bucket_stats(VER_BUCKET_NAME)
+
+ bucket1_acl = connection.BucketAcl(BUCKET_NAME1).load()
+ bucket2_acl = connection.BucketAcl(BUCKET_NAME2).load()
+ ver_bucket_acl = connection.BucketAcl(VER_BUCKET_NAME).load()
+
+ # TESTCASE 'reshard-add','reshard','add','add bucket to resharding queue','succeeds'
+ log.debug(' test: reshard add')
+ num_shards_expected = bucket_stats1.num_shards + 1
+ cmd = exec_cmd('radosgw-admin reshard add --bucket %s --num-shards %s' % (BUCKET_NAME1, num_shards_expected))
+ cmd = exec_cmd('radosgw-admin reshard list')
+ json_op = json.loads(cmd)
+ log.debug('bucket name %s', json_op[0]['bucket_name'])
+ assert json_op[0]['bucket_name'] == BUCKET_NAME1
+ assert json_op[0]['tentative_new_num_shards'] == num_shards_expected
+
+ # TESTCASE 'reshard-process','reshard','','process bucket resharding','succeeds'
+ log.debug(' test: reshard process')
+ cmd = exec_cmd('radosgw-admin reshard process')
+ time.sleep(5)
+ # check bucket shards num
+ bucket_stats1 = get_bucket_stats(BUCKET_NAME1)
+ bucket_stats1.get_num_shards()
+ if bucket_stats1.num_shards != num_shards_expected:
+ log.error("Resharding failed on bucket %s. Expected number of shards are not created" % BUCKET_NAME1)
+
+ # TESTCASE 'reshard-add','reshard','add','add non empty bucket to resharding queue','succeeds'
+ log.debug(' test: reshard add non empty bucket')
+ # create objs
+ num_objs = 8
+ for i in range(0, num_objs):
+ connection.Object(BUCKET_NAME1, ('key'+str(i))).put(Body=b"some_data")
+
+ num_shards_expected = bucket_stats1.num_shards + 1
+ cmd = exec_cmd('radosgw-admin reshard add --bucket %s --num-shards %s' % (BUCKET_NAME1, num_shards_expected))
+ cmd = exec_cmd('radosgw-admin reshard list')
+ json_op = json.loads(cmd)
+ log.debug('bucket name %s', json_op[0]['bucket_name'])
+ assert json_op[0]['bucket_name'] == BUCKET_NAME1
+ assert json_op[0]['tentative_new_num_shards'] == num_shards_expected
+
+ # TESTCASE 'reshard process ,'reshard','process','reshard non empty bucket','succeeds'
+ log.debug(' test: reshard process non empty bucket')
+ cmd = exec_cmd('radosgw-admin reshard process')
+ # check bucket shards num
+ bucket_stats1 = get_bucket_stats(BUCKET_NAME1)
+ bucket_stats1.get_num_shards()
+ if bucket_stats1.num_shards != num_shards_expected:
+ log.error("Resharding failed on bucket %s. Expected number of shards are not created" % BUCKET_NAME1)
+
+ # TESTCASE 'manual resharding','bucket', 'reshard','','manual bucket resharding','succeeds'
+ log.debug(' test: manual reshard bucket')
+ # create objs
+ num_objs = 11
+ for i in range(0, num_objs):
+ connection.Object(BUCKET_NAME2, ('key' + str(i))).put(Body=b"some_data")
+
+ time.sleep(10)
+ num_shards_expected = bucket_stats2.num_shards + 1
+ cmd = exec_cmd('radosgw-admin bucket reshard --bucket %s --num-shards %s' % (BUCKET_NAME2,
+ num_shards_expected))
+ # check bucket shards num
+ bucket_stats2 = get_bucket_stats(BUCKET_NAME2)
+ bucket_stats2.get_num_shards()
+ if bucket_stats2.num_shards != num_shards_expected:
+ log.error("Resharding failed on bucket %s. Expected number of shards are not created" % BUCKET_NAME2)
+
+ # TESTCASE 'versioning reshard-','bucket', reshard','versioning reshard','succeeds'
+ log.debug(' test: reshard versioned bucket')
+ num_shards_expected = ver_bucket_stats.num_shards + 1
+ cmd = exec_cmd('radosgw-admin bucket reshard --bucket %s --num-shards %s' % (VER_BUCKET_NAME,
+ num_shards_expected))
+ # check bucket shards num
+ ver_bucket_stats = get_bucket_stats(VER_BUCKET_NAME)
+ assert ver_bucket_stats.num_shards == num_shards_expected
+
+ # TESTCASE 'check acl'
+ new_bucket1_acl = connection.BucketAcl(BUCKET_NAME1).load()
+ assert new_bucket1_acl == bucket1_acl
+ new_bucket2_acl = connection.BucketAcl(BUCKET_NAME2).load()
+ assert new_bucket2_acl == bucket2_acl
+ new_ver_bucket_acl = connection.BucketAcl(VER_BUCKET_NAME).load()
+ assert new_ver_bucket_acl == ver_bucket_acl
+
+ # TESTCASE 'check reshard removes olh entries with empty name'
+ log.debug(' test: reshard removes olh entries with empty name')
+ bucket1.objects.all().delete()
+
+ # get name of shard 0 object, add a bogus olh entry with empty name
+ bucket_shard0 = '.dir.%s.0' % get_bucket_stats(BUCKET_NAME1).bucket_id
+ if 'CEPH_ROOT' in os.environ:
+ k = '%s/qa/workunits/rgw/olh_noname_key' % os.environ['CEPH_ROOT']
+ v = '%s/qa/workunits/rgw/olh_noname_val' % os.environ['CEPH_ROOT']
+ else:
+ k = 'olh_noname_key'
+ v = 'olh_noname_val'
+ exec_cmd('rados -p %s setomapval %s --omap-key-file %s < %s' % (INDEX_POOL, bucket_shard0, k, v))
+
+ # check that bi list has one entry with empty name
+ cmd = exec_cmd('radosgw-admin bi list --bucket %s' % BUCKET_NAME1)
+ json_op = json.loads(cmd.decode('utf-8', 'ignore')) # ignore utf-8 can't decode 0x80
+ assert len(json_op) == 1
+ assert json_op[0]['entry']['key']['name'] == ''
+
+ # reshard to prune the bogus olh
+ cmd = exec_cmd('radosgw-admin bucket reshard --bucket %s --num-shards %s --yes-i-really-mean-it' % (BUCKET_NAME1, 1))
+
+ # get new name of shard 0 object, check that bi list has zero entries
+ bucket_shard0 = '.dir.%s.0' % get_bucket_stats(BUCKET_NAME1).bucket_id
+ cmd = exec_cmd('radosgw-admin bi list --bucket %s' % BUCKET_NAME1)
+ json_op = json.loads(cmd)
+ assert len(json_op) == 0
+
+ # Clean up
+ log.debug("Deleting bucket %s", BUCKET_NAME1)
+ bucket1.objects.all().delete()
+ bucket1.delete()
+ log.debug("Deleting bucket %s", BUCKET_NAME2)
+ bucket2.objects.all().delete()
+ bucket2.delete()
+ log.debug("Deleting bucket %s", VER_BUCKET_NAME)
+ ver_bucket.delete()
+
+
+main()
+log.info("Completed resharding tests")
diff --git a/qa/workunits/rgw/test_rgw_throttle.sh b/qa/workunits/rgw/test_rgw_throttle.sh
new file mode 100755
index 000000000..f637b8f08
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_throttle.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -e
+
+ceph_test_rgw_throttle
+
+exit 0