summaryrefslogtreecommitdiffstats
path: root/qa/workunits/rgw
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /qa/workunits/rgw
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'qa/workunits/rgw')
-rwxr-xr-xqa/workunits/rgw/common.py103
-rwxr-xr-xqa/workunits/rgw/keystone-fake-server.py208
-rwxr-xr-xqa/workunits/rgw/keystone-service-token.sh34
-rw-r--r--qa/workunits/rgw/olh_noname_key1
-rw-r--r--qa/workunits/rgw/olh_noname_valbin0 -> 71 bytes
-rwxr-xr-xqa/workunits/rgw/run-bucket-check.sh19
-rwxr-xr-xqa/workunits/rgw/run-datacache.sh19
-rwxr-xr-xqa/workunits/rgw/run-reshard.sh23
-rwxr-xr-xqa/workunits/rgw/run-s3tests.sh39
-rwxr-xr-xqa/workunits/rgw/run-versioning.sh19
-rwxr-xr-xqa/workunits/rgw/s3_bucket_quota.pl393
-rwxr-xr-xqa/workunits/rgw/s3_multipart_upload.pl151
-rwxr-xr-xqa/workunits/rgw/s3_user_quota.pl191
-rw-r--r--qa/workunits/rgw/s3_utilities.pm233
-rwxr-xr-xqa/workunits/rgw/test-keystone-service-token.py189
-rwxr-xr-xqa/workunits/rgw/test_librgw_file.sh59
-rwxr-xr-xqa/workunits/rgw/test_rgw_bucket_check.py194
-rwxr-xr-xqa/workunits/rgw/test_rgw_datacache.py209
-rwxr-xr-xqa/workunits/rgw/test_rgw_gc_log.sh5
-rwxr-xr-xqa/workunits/rgw/test_rgw_obj.sh5
-rwxr-xr-xqa/workunits/rgw/test_rgw_orphan_list.sh519
-rwxr-xr-xqa/workunits/rgw/test_rgw_reshard.py311
-rwxr-xr-xqa/workunits/rgw/test_rgw_s3_mp_reupload.py121
-rwxr-xr-xqa/workunits/rgw/test_rgw_s3_mp_reupload.sh110
-rwxr-xr-xqa/workunits/rgw/test_rgw_throttle.sh5
-rwxr-xr-xqa/workunits/rgw/test_rgw_versioning.py110
26 files changed, 3270 insertions, 0 deletions
diff --git a/qa/workunits/rgw/common.py b/qa/workunits/rgw/common.py
new file mode 100755
index 000000000..2c9c5d035
--- /dev/null
+++ b/qa/workunits/rgw/common.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python3
+
+import errno
+import subprocess
+import logging as log
+import boto3
+import botocore.exceptions
+import random
+import json
+from time import sleep
+
+log.basicConfig(format = '%(message)s', level=log.DEBUG)
+log.getLogger('botocore').setLevel(log.CRITICAL)
+log.getLogger('boto3').setLevel(log.CRITICAL)
+log.getLogger('urllib3').setLevel(log.CRITICAL)
+
+def exec_cmd(cmd, wait = True, **kwargs):
+ check_retcode = kwargs.pop('check_retcode', True)
+ kwargs['shell'] = True
+ kwargs['stdout'] = subprocess.PIPE
+ proc = subprocess.Popen(cmd, **kwargs)
+ log.info(proc.args)
+ if wait:
+ out, _ = proc.communicate()
+ if check_retcode:
+ assert(proc.returncode == 0)
+ return out
+ return (out, proc.returncode)
+ return ''
+
+def create_user(uid, display_name, access_key, secret_key):
+ _, ret = exec_cmd(f'radosgw-admin user create --uid {uid} --display-name "{display_name}" --access-key {access_key} --secret {secret_key}', check_retcode=False)
+ assert(ret == 0 or errno.EEXIST)
+
+def boto_connect(access_key, secret_key, config=None):
+ def try_connect(portnum, ssl, proto):
+ endpoint = proto + '://localhost:' + portnum
+ conn = boto3.resource('s3',
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ use_ssl=ssl,
+ endpoint_url=endpoint,
+ verify=False,
+ config=config,
+ )
+ try:
+ list(conn.buckets.limit(1)) # just verify we can list buckets
+ except botocore.exceptions.ConnectionError as e:
+ print(e)
+ raise
+ print('connected to', endpoint)
+ return conn
+ try:
+ return try_connect('80', False, 'http')
+ except botocore.exceptions.ConnectionError:
+ try: # retry on non-privileged http port
+ return try_connect('8000', False, 'http')
+ except botocore.exceptions.ConnectionError:
+ # retry with ssl
+ return try_connect('443', True, 'https')
+
+def put_objects(bucket, key_list):
+ objs = []
+ for key in key_list:
+ o = bucket.put_object(Key=key, Body=b"some_data")
+ objs.append((o.key, o.version_id))
+ return objs
+
+def create_unlinked_objects(conn, bucket, key_list):
+ # creates an unlinked/unlistable object for each key in key_list
+
+ object_versions = []
+ try:
+ exec_cmd('ceph config set client rgw_debug_inject_set_olh_err 2')
+ exec_cmd('ceph config set client rgw_debug_inject_olh_cancel_modification_err true')
+ sleep(1)
+ for key in key_list:
+ tag = str(random.randint(0, 1_000_000))
+ try:
+ bucket.put_object(Key=key, Body=b"some_data", Metadata = {
+ 'tag': tag,
+ })
+ except Exception as e:
+ log.debug(e)
+ out = exec_cmd(f'radosgw-admin bi list --bucket {bucket.name} --object {key}')
+ instance_entries = filter(
+ lambda x: x['type'] == 'instance',
+ json.loads(out.replace(b'\x80', b'0x80')))
+ found = False
+ for ie in instance_entries:
+ instance_id = ie['entry']['instance']
+ ov = conn.ObjectVersion(bucket.name, key, instance_id).head()
+ if ov['Metadata'] and ov['Metadata']['tag'] == tag:
+ object_versions.append((key, instance_id))
+ found = True
+ break
+ if not found:
+ raise Exception(f'failed to create unlinked object for key={key}')
+ finally:
+ exec_cmd('ceph config rm client rgw_debug_inject_set_olh_err')
+ exec_cmd('ceph config rm client rgw_debug_inject_olh_cancel_modification_err')
+ return object_versions
+
diff --git a/qa/workunits/rgw/keystone-fake-server.py b/qa/workunits/rgw/keystone-fake-server.py
new file mode 100755
index 000000000..c05ad7bfd
--- /dev/null
+++ b/qa/workunits/rgw/keystone-fake-server.py
@@ -0,0 +1,208 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2022 Binero
+#
+# Author: Tobias Urdin <tobias.urdin@binero.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+
+from datetime import datetime, timedelta
+import logging
+import json
+from http.server import BaseHTTPRequestHandler, HTTPServer
+
+
+DEFAULT_DOMAIN = {
+ 'id': 'default',
+ 'name': 'Default',
+}
+
+
+PROJECTS = {
+ 'admin': {
+ 'domain': DEFAULT_DOMAIN,
+ 'id': 'a6944d763bf64ee6a275f1263fae0352',
+ 'name': 'admin',
+ },
+ 'deadbeef': {
+ 'domain': DEFAULT_DOMAIN,
+ 'id': 'b4221c214dd64ee6a464g2153fae3813',
+ 'name': 'deadbeef',
+ },
+}
+
+
+USERS = {
+ 'admin': {
+ 'domain': DEFAULT_DOMAIN,
+ 'id': '51cc68287d524c759f47c811e6463340',
+ 'name': 'admin',
+ },
+ 'deadbeef': {
+ 'domain': DEFAULT_DOMAIN,
+ 'id': '99gg485738df758349jf8d848g774392',
+ 'name': 'deadbeef',
+ },
+}
+
+
+USERROLES = {
+ 'admin': [
+ {
+ 'id': '51cc68287d524c759f47c811e6463340',
+ 'name': 'admin',
+ }
+ ],
+ 'deadbeef': [
+ {
+ 'id': '98bd32184f854f393a72b932g5334124',
+ 'name': 'Member',
+ }
+ ],
+}
+
+
+TOKENS = {
+ 'admin-token-1': {
+ 'username': 'admin',
+ 'project': 'admin',
+ 'expired': False,
+ },
+ 'user-token-1': {
+ 'username': 'deadbeef',
+ 'project': 'deadbeef',
+ 'expired': False,
+ },
+ 'user-token-2': {
+ 'username': 'deadbeef',
+ 'project': 'deadbeef',
+ 'expired': True,
+ },
+}
+
+
+def _generate_token_result(username, project, expired=False):
+ userdata = USERS[username]
+ projectdata = PROJECTS[project]
+ userroles = USERROLES[username]
+
+ if expired:
+ then = datetime.now() - timedelta(hours=2)
+ issued_at = then.strftime('%Y-%m-%dT%H:%M:%SZ')
+ expires_at = (then + timedelta(hours=1)).strftime('%Y-%m-%dT%H:%M:%SZ')
+ else:
+ now = datetime.now()
+ issued_at = now.strftime('%Y-%m-%dT%H:%M:%SZ')
+ expires_at = (now + timedelta(seconds=10)).strftime('%Y-%m-%dT%H:%M:%SZ')
+
+ result = {
+ 'token': {
+ 'audit_ids': ['3T2dc1CGQxyJsHdDu1xkcw'],
+ 'catalog': [],
+ 'expires_at': expires_at,
+ 'is_domain': False,
+ 'issued_at': issued_at,
+ 'methods': ['password'],
+ 'project': projectdata,
+ 'roles': userroles,
+ 'user': userdata,
+ }
+ }
+
+ return result
+
+
+COUNTERS = {
+ 'get_total': 0,
+ 'post_total': 0,
+}
+
+
+class HTTPRequestHandler(BaseHTTPRequestHandler):
+ def do_GET(self):
+ # This is not part of the Keystone API
+ if self.path == '/stats':
+ self._handle_stats()
+ return
+
+ if str(self.path).startswith('/v3/auth/tokens'):
+ self._handle_get_auth()
+ else:
+ self.send_response(403)
+ self.end_headers()
+
+ def do_POST(self):
+ if self.path == '/v3/auth/tokens':
+ self._handle_post_auth()
+ else:
+ self.send_response(400)
+ self.end_headers()
+
+ def _get_data(self):
+ length = int(self.headers.get('content-length'))
+ data = self.rfile.read(length).decode('utf8')
+ return json.loads(data)
+
+ def _set_data(self, data):
+ jdata = json.dumps(data)
+ self.wfile.write(jdata.encode('utf8'))
+
+ def _handle_stats(self):
+ self.send_response(200)
+ self.end_headers()
+ self._set_data(COUNTERS)
+
+ def _handle_get_auth(self):
+ logging.info('Increasing get_total counter from %d -> %d' % (COUNTERS['get_total'], COUNTERS['get_total']+1))
+ COUNTERS['get_total'] += 1
+ auth_token = self.headers.get('X-Subject-Token', None)
+ if auth_token and auth_token in TOKENS:
+ tokendata = TOKENS[auth_token]
+ if tokendata['expired'] and 'allow_expired=1' not in self.path:
+ self.send_response(404)
+ self.end_headers()
+ else:
+ self.send_response(200)
+ self.send_header('Content-Type', 'application/json')
+ self.end_headers()
+ result = _generate_token_result(tokendata['username'], tokendata['project'], tokendata['expired'])
+ self._set_data(result)
+ else:
+ self.send_response(404)
+ self.end_headers()
+
+ def _handle_post_auth(self):
+ logging.info('Increasing post_total counter from %d -> %d' % (COUNTERS['post_total'], COUNTERS['post_total']+1))
+ COUNTERS['post_total'] += 1
+ data = self._get_data()
+ user = data['auth']['identity']['password']['user']
+ if user['name'] == 'admin' and user['password'] == 'ADMIN':
+ self.send_response(201)
+ self.send_header('Content-Type', 'application/json')
+ self.send_header('X-Subject-Token', 'admin-token-1')
+ self.end_headers()
+ tokendata = TOKENS['admin-token-1']
+ result = _generate_token_result(tokendata['username'], tokendata['project'], tokendata['expired'])
+ self._set_data(result)
+ else:
+ self.send_response(401)
+ self.end_headers()
+
+
+def main():
+ logging.basicConfig(level=logging.DEBUG)
+ logging.info('Starting keystone-fake-server')
+ server = HTTPServer(('localhost', 5000), HTTPRequestHandler)
+ server.serve_forever()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/qa/workunits/rgw/keystone-service-token.sh b/qa/workunits/rgw/keystone-service-token.sh
new file mode 100755
index 000000000..fc39731ca
--- /dev/null
+++ b/qa/workunits/rgw/keystone-service-token.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2022 Binero
+#
+# Author: Tobias Urdin <tobias.urdin@binero.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+trap cleanup EXIT
+
+function cleanup() {
+ kill $KEYSTONE_FAKE_SERVER_PID
+ wait
+}
+
+function run() {
+ $CEPH_ROOT/qa/workunits/rgw//keystone-fake-server.py &
+ KEYSTONE_FAKE_SERVER_PID=$!
+ # Give fake Keystone server some seconds to startup
+ sleep 5
+ $CEPH_ROOT/qa/workunits/rgw/test-keystone-service-token.py
+}
+
+main keystone-service-token "$@"
diff --git a/qa/workunits/rgw/olh_noname_key b/qa/workunits/rgw/olh_noname_key
new file mode 100644
index 000000000..6138c57cd
--- /dev/null
+++ b/qa/workunits/rgw/olh_noname_key
@@ -0,0 +1 @@
+€1001_04/57/0457f727ec113e418d5b16d206b200ed068c0533554883ce811df7c932a3df68/2018_12_11/2889999/3386469/metadata.gz \ No newline at end of file
diff --git a/qa/workunits/rgw/olh_noname_val b/qa/workunits/rgw/olh_noname_val
new file mode 100644
index 000000000..ff442e137
--- /dev/null
+++ b/qa/workunits/rgw/olh_noname_val
Binary files differ
diff --git a/qa/workunits/rgw/run-bucket-check.sh b/qa/workunits/rgw/run-bucket-check.sh
new file mode 100755
index 000000000..85e02db5e
--- /dev/null
+++ b/qa/workunits/rgw/run-bucket-check.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+set -ex
+
+# assume working ceph environment (radosgw-admin in path) and rgw on localhost:80
+# localhost::443 for ssl
+
+mydir=`dirname $0`
+
+python3 -m venv $mydir
+source $mydir/bin/activate
+pip install pip --upgrade
+pip install boto3
+
+## run test
+$mydir/bin/python3 $mydir/test_rgw_bucket_check.py
+
+deactivate
+echo OK.
+
diff --git a/qa/workunits/rgw/run-datacache.sh b/qa/workunits/rgw/run-datacache.sh
new file mode 100755
index 000000000..5c00da1da
--- /dev/null
+++ b/qa/workunits/rgw/run-datacache.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+set -ex
+
+#assume working ceph environment (radosgw-admin in path) and rgw on localhost:80
+# localhost::443 for ssl
+
+mydir=`dirname $0`
+
+python3 -m venv $mydir
+source $mydir/bin/activate
+pip install pip --upgrade
+pip install configobj
+
+## run test
+$mydir/bin/python3 $mydir/test_rgw_datacache.py
+
+deactivate
+echo OK.
+
diff --git a/qa/workunits/rgw/run-reshard.sh b/qa/workunits/rgw/run-reshard.sh
new file mode 100755
index 000000000..bdab0aabb
--- /dev/null
+++ b/qa/workunits/rgw/run-reshard.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+set -ex
+
+# this test uses fault injection to abort during 'radosgw-admin bucket reshard'
+# disable coredumps so teuthology won't mark a failure
+ulimit -c 0
+
+#assume working ceph environment (radosgw-admin in path) and rgw on localhost:80
+# localhost::443 for ssl
+
+mydir=`dirname $0`
+
+python3 -m venv $mydir
+source $mydir/bin/activate
+pip install pip --upgrade
+pip install boto3
+
+## run test
+$mydir/bin/python3 $mydir/test_rgw_reshard.py
+
+deactivate
+echo OK.
+
diff --git a/qa/workunits/rgw/run-s3tests.sh b/qa/workunits/rgw/run-s3tests.sh
new file mode 100755
index 000000000..727bef9eb
--- /dev/null
+++ b/qa/workunits/rgw/run-s3tests.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+set -ex
+
+# run s3-tests from current directory. assume working
+# ceph environment (radosgw-admin in path) and rgw on localhost:8000
+# (the vstart default).
+
+branch=$1
+[ -z "$1" ] && branch=master
+port=$2
+[ -z "$2" ] && port=8000 # this is vstart's default
+
+##
+
+[ -z "$BUILD_DIR" ] && BUILD_DIR=build
+
+if [ -e CMakeCache.txt ]; then
+ BIN_PATH=$PWD/bin
+elif [ -e $root_path/../${BUILD_DIR}/CMakeCache.txt ]; then
+ cd $root_path/../${BUILD_DIR}
+ BIN_PATH=$PWD/bin
+fi
+PATH=$PATH:$BIN_PATH
+
+dir=tmp.s3-tests.$$
+
+# clone and bootstrap
+mkdir $dir
+cd $dir
+git clone https://github.com/ceph/s3-tests
+cd s3-tests
+git checkout ceph-$branch
+S3TEST_CONF=s3tests.conf.SAMPLE tox -- -m "not fails_on_rgw and not sse_s3 and not lifecycle_expiration and not test_of_sts and not webidentity_test" -v
+
+cd ../..
+rm -rf $dir
+
+echo OK.
+
diff --git a/qa/workunits/rgw/run-versioning.sh b/qa/workunits/rgw/run-versioning.sh
new file mode 100755
index 000000000..df60b7b03
--- /dev/null
+++ b/qa/workunits/rgw/run-versioning.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+set -ex
+
+# assume working ceph environment (radosgw-admin in path) and rgw on localhost:80
+# localhost::443 for ssl
+
+mydir=`dirname $0`
+
+python3 -m venv $mydir
+source $mydir/bin/activate
+pip install pip --upgrade
+pip install boto3
+
+## run test
+$mydir/bin/python3 $mydir/test_rgw_versioning.py
+
+deactivate
+echo OK.
+
diff --git a/qa/workunits/rgw/s3_bucket_quota.pl b/qa/workunits/rgw/s3_bucket_quota.pl
new file mode 100755
index 000000000..7f5476ef6
--- /dev/null
+++ b/qa/workunits/rgw/s3_bucket_quota.pl
@@ -0,0 +1,393 @@
+#! /usr/bin/perl
+
+=head1 NAME
+
+s3_bucket_quota.pl - Script to test the rgw bucket quota functionality using s3 interface.
+
+=head1 SYNOPSIS
+
+Use:
+ perl s3_bucket_quota.pl [--help]
+
+Examples:
+ perl s3_bucket_quota.pl
+ or
+ perl s3_bucket_quota.pl --help
+
+=head1 DESCRIPTION
+
+This script intends to test the rgw bucket quota funcionality using s3 interface
+and reports the test results
+
+=head1 ARGUMENTS
+
+s3_bucket_quota.pl takes the following arguments:
+ --help
+ (optional) Displays the usage message.
+
+=cut
+
+use Amazon::S3;
+use Data::Dumper;
+#use strict;
+use IO::File;
+use Getopt::Long;
+use Digest::MD5;
+use Pod::Usage();
+use FindBin;
+use lib $FindBin::Bin;
+use s3_utilities;
+use Net::Domain qw(hostfqdn);
+
+my $help;
+
+Getopt::Long::GetOptions(
+ 'help' => \$help
+);
+Pod::Usage::pod2usage(-verbose => 1) && exit if ($help);
+
+#== local variables ===
+our $mytestfilename;
+my $mytestfilename1;
+my $logmsg;
+my $kruft;
+my $s3;
+my $hostdom = $ENV{RGW_FQDN}||hostfqdn();
+my $port = $ENV{RGW_PORT}||80;
+our $hostname = "$hostdom:$port";
+our $testfileloc;
+my $rgw_user = "qa_user";
+
+# Function that deletes the user $rgw_user and write to logfile.
+sub delete_user
+{
+ my $cmd = "$radosgw_admin user rm --uid=$rgw_user";
+ my $cmd_op = get_command_output($cmd);
+ if ($cmd_op !~ /aborting/){
+ print "user $rgw_user deleted\n";
+ } else {
+ print "user $rgw_user NOT deleted\n";
+ return 1;
+ }
+ return 0;
+}
+
+sub quota_set_max_size {
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-size=1048576000`;
+ if ($set_quota !~ /./){
+ print "quota set for the bucket: $bucketname \n";
+ } else {
+ print "quota set failed for the bucket: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_set_max_size_zero {
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-size=0`;
+ if ($set_quota !~ /./){
+ pass ("quota set for the bucket: $bucketname with max size as zero\n");
+ } else {
+ fail ("quota set with max size 0 failed for the bucket: $bucketname \n");
+ }
+ delete_bucket();
+}
+
+sub quota_set_max_objs_zero {
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-objects=0`;
+ if ($set_quota !~ /./){
+ pass ("quota set for the bucket: $bucketname with max objects as zero\n");
+ } else {
+ fail ("quota set with max objects 0 failed for the bucket: $bucketname \n");
+ }
+ delete_bucket();
+}
+
+sub quota_set_neg_size {
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-size=-1`;
+ if ($set_quota !~ /./){
+ pass ("quota set for the bucket: $bucketname with max size -1\n");
+ } else {
+ fail ("quota set failed for the bucket: $bucketname with max size -1 \n");
+ }
+ delete_bucket();
+}
+
+sub quota_set_neg_objs {
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-objects=-1`;
+ if ($set_quota !~ /./){
+ pass ("quota set for the bucket: $bucketname max objects -1 \n");
+ } else {
+ fail ("quota set failed for the bucket: $bucketname \n with max objects -1");
+ }
+ delete_bucket();
+}
+
+sub quota_set_user_objs {
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=bucket`;
+ my $set_quota1 = `$radosgw_admin quota set --bucket=$bucketname --max-objects=1`;
+ if ($set_quota1 !~ /./){
+ print "bucket quota max_objs set for the given user: $bucketname \n";
+ } else {
+ print "bucket quota max_objs set failed for the given user: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_set_user_size {
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=bucket`;
+ my $set_quota1 = `$radosgw_admin quota set --bucket=$bucketname --max-size=1048576000`;
+ if ($set_quota1 !~ /./){
+ print "bucket quota max size set for the given user: $bucketname \n";
+ } else {
+ print "bucket quota max size set failed for the user: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_set_max_obj {
+ # set max objects
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-objects=1`;
+ if ($set_quota !~ /./){
+ print "quota set for the bucket: $bucketname \n";
+ } else {
+ print "quota set failed for the bucket: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_enable {
+ my $en_quota = `$radosgw_admin quota enable --bucket=$bucketname`;
+ if ($en_quota !~ /./){
+ print "quota enabled for the bucket: $bucketname \n";
+ } else {
+ print "quota enable failed for the bucket: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_disable {
+ my $dis_quota = `$radosgw_admin quota disable --bucket=$bucketname`;
+ if ($dis_quota !~ /./){
+ print "quota disabled for the bucket: $bucketname \n";
+ } else {
+ print "quota disable failed for the bucket: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+# upload a file to the bucket
+sub upload_file {
+ print "adding file to bucket: $mytestfilename\n";
+ ($bucket->add_key_filename( $mytestfilename, $testfileloc,
+ { content_type => 'text/plain', },
+ ) and (print "upload file successful\n" ) and return 0 ) or (return 1);
+}
+
+# delete the bucket
+sub delete_bucket {
+ #($bucket->delete_key($mytestfilename1) and print "delete keys on bucket succeeded second time\n" ) or die $s3->err . "delete keys on bucket failed second time\n" . $s3->errstr;
+ ($bucket->delete_bucket) and (print "bucket delete succeeded \n") or die $s3->err . "delete bucket failed\n" . $s3->errstr;
+}
+
+# set bucket quota with max_objects and verify
+sub test_max_objects {
+ my $size = '10Mb';
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_max_obj();
+ quota_enable();
+ my $ret_value = upload_file();
+ if ($ret_value == 0){
+ pass ( "Test max objects passed" );
+ } else {
+ fail ( "Test max objects failed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# Set bucket quota for specific user and ensure max objects set for the user is validated
+sub test_max_objects_per_user{
+ my $size = '10Mb';
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_user_objs();
+ quota_enable();
+ my $ret_value = upload_file();
+ if ($ret_value == 0){
+ pass ( "Test max objects for the given user passed" );
+ } else {
+ fail ( "Test max objects for the given user failed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota with max_objects and try to exceed the max_objects and verify
+sub test_beyond_max_objs {
+ my $size = "10Mb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_max_obj();
+ quota_enable();
+ upload_file();
+ my $ret_value = readd_file();
+ if ($ret_value == 1){
+ pass ( "set max objects and test beyond max objects passed" );
+ } else {
+ fail ( "set max objects and test beyond max objects failed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota for a user with max_objects and try to exceed the max_objects and verify
+sub test_beyond_max_objs_user {
+ my $size = "10Mb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_user_objs();
+ quota_enable();
+ upload_file();
+ my $ret_value = readd_file();
+ if ($ret_value == 1){
+ pass ( "set max objects for a given user and test beyond max objects passed" );
+ } else {
+ fail ( "set max objects for a given user and test beyond max objects failed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota for max size and ensure it is validated
+sub test_quota_size {
+ my $ret_value;
+ my $size = "2Gb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_max_size();
+ quota_enable();
+ my $ret_value = upload_file();
+ if ($ret_value == 1) {
+ pass ( "set max size and ensure that objects upload beyond max size is not entertained" );
+ my $retdel = delete_keys($mytestfilename);
+ if ($retdel == 0) {
+ print "delete objects successful \n";
+ my $size1 = "1Gb";
+ create_file($size1);
+ my $ret_val1 = upload_file();
+ if ($ret_val1 == 0) {
+ pass ( "set max size and ensure that the max size is in effect" );
+ } else {
+ fail ( "set max size and ensure the max size takes effect" );
+ }
+ }
+ } else {
+ fail ( "set max size and ensure that objects beyond max size is not allowed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota for max size for a given user and ensure it is validated
+sub test_quota_size_user {
+ my $ret_value;
+ my $size = "2Gb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_user_size();
+ quota_enable();
+ my $ret_value = upload_file();
+ if ($ret_value == 1) {
+ pass ( "set max size for a given user and ensure that objects upload beyond max size is not entertained" );
+ my $retdel = delete_keys($mytestfilename);
+ if ($retdel == 0) {
+ print "delete objects successful \n";
+ my $size1 = "1Gb";
+ create_file($size1);
+ my $ret_val1 = upload_file();
+ if ($ret_val1 == 0) {
+ pass ( "set max size for a given user and ensure that the max size is in effect" );
+ } else {
+ fail ( "set max size for a given user and ensure the max size takes effect" );
+ }
+ }
+ } else {
+ fail ( "set max size for a given user and ensure that objects beyond max size is not allowed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota size but disable quota and verify
+sub test_quota_size_disabled {
+ my $ret_value;
+ my $size = "2Gb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_max_size();
+ quota_disable();
+ my $ret_value = upload_file();
+ if ($ret_value == 0) {
+ pass ( "bucket quota size doesnt take effect when quota is disabled" );
+ } else {
+ fail ( "bucket quota size doesnt take effect when quota is disabled" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota size for a given user but disable quota and verify
+sub test_quota_size_disabled_user {
+ my $ret_value;
+ my $size = "2Gb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_user_size();
+ quota_disable();
+ my $ret_value = upload_file();
+ if ($ret_value == 0) {
+ pass ( "bucket quota size for a given user doesnt take effect when quota is disabled" );
+ } else {
+ fail ( "bucket quota size for a given user doesnt take effect when quota is disabled" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota for specified user and verify
+
+#== Main starts here===
+ceph_os_info();
+test_max_objects();
+test_max_objects_per_user();
+test_beyond_max_objs();
+test_beyond_max_objs_user();
+quota_set_max_size_zero();
+quota_set_max_objs_zero();
+quota_set_neg_objs();
+quota_set_neg_size();
+test_quota_size();
+test_quota_size_user();
+test_quota_size_disabled();
+test_quota_size_disabled_user();
+
+print "OK";
diff --git a/qa/workunits/rgw/s3_multipart_upload.pl b/qa/workunits/rgw/s3_multipart_upload.pl
new file mode 100755
index 000000000..ab29e6b03
--- /dev/null
+++ b/qa/workunits/rgw/s3_multipart_upload.pl
@@ -0,0 +1,151 @@
+#! /usr/bin/perl
+
+=head1 NAME
+
+s3_multipart_upload.pl - Script to test rgw multipart upload using s3 interface.
+
+=head1 SYNOPSIS
+
+Use:
+ perl s3_multipart_upload.pl [--help]
+
+Examples:
+ perl s3_multipart_upload.pl
+ or
+ perl s3_multipart_upload.pl --help
+
+=head1 DESCRIPTION
+
+This script intends to test the rgw multipart upload followed by a download
+and verify checksum using s3 interface and reports test results
+
+=head1 ARGUMENTS
+
+s3_multipart_upload.pl takes the following arguments:
+ --help
+ (optional) Displays the usage message.
+
+=cut
+
+use Amazon::S3;
+use Data::Dumper;
+use IO::File;
+use Getopt::Long;
+use Digest::MD5;
+use Pod::Usage();
+use FindBin;
+use lib $FindBin::Bin;
+use s3_utilities;
+use Net::Domain qw(hostfqdn);
+
+my $help;
+
+Getopt::Long::GetOptions(
+ 'help' => \$help
+);
+Pod::Usage::pod2usage(-verbose => 1) && exit if ($help);
+
+#== local variables ===
+my $s3;
+my $hostdom = $ENV{RGW_FQDN}||hostfqdn();
+my $port = $ENV{RGW_PORT}||80;
+our $hostname = "$hostdom:$port";
+our $testfileloc;
+our $mytestfilename;
+
+# upload a file to the bucket
+sub upload_file {
+ my ($fsize, $i) = @_;
+ create_file($fsize, $i);
+ print "adding file to bucket $bucketname: $mytestfilename\n";
+ ($bucket->add_key_filename( $mytestfilename, $testfileloc,
+ { content_type => 'text/plain', },
+ ) and (print "upload file successful\n" ) and return 0 ) or (print "upload failed\n" and return 1);
+}
+
+# delete the bucket
+sub delete_bucket {
+ ($bucket->delete_bucket) and (print "bucket delete succeeded \n") or die $s3->err . "delete bucket failed\n" . $s3->errstr;
+}
+
+# Function to perform multipart upload of given file size to the user bucket via s3 interface
+sub multipart_upload
+{
+ my ($size, $parts) = @_;
+ # generate random user every time
+ my $user = rand();
+ # Divide the file size in to equal parts and upload to bucket in multiple parts
+ my $fsize = ($size/$parts);
+ my $fsize1;
+ run_s3($user);
+ if ($parts == 10){
+ $fsize1 = '100Mb';
+ } elsif ($parts == 100){
+ $fsize1 = '10Mb';
+ }
+ foreach my $i(1..$parts){
+ print "uploading file - part $i \n";
+ upload_file($fsize1, $i);
+ }
+ fetch_file_from_bucket($fsize1, $parts);
+ compare_cksum($fsize1, $parts);
+ purge_data($user);
+}
+
+# Function to download the files from bucket to verify there is no data corruption
+sub fetch_file_from_bucket
+{
+ # fetch file from the bucket
+ my ($fsize, $parts) = @_;
+ foreach my $i(1..$parts){
+ my $src_file = "$fsize.$i";
+ my $dest_file = "/tmp/downloadfile.$i";
+ print
+ "Downloading $src_file from bucket to $dest_file \n";
+ $response =
+ $bucket->get_key_filename( $src_file, GET,
+ $dest_file )
+ or die $s3->err . ": " . $s3->errstr;
+ }
+}
+
+# Compare the source file with destination file and verify checksum to ensure
+# the files are not corrupted
+sub compare_cksum
+{
+ my ($fsize, $parts)=@_;
+ my $md5 = Digest::MD5->new;
+ my $flag = 0;
+ foreach my $i (1..$parts){
+ my $src_file = "/tmp/"."$fsize".".$i";
+ my $dest_file = "/tmp/downloadfile".".$i";
+ open( FILE, $src_file )
+ or die "Error: Could not open $src_file for MD5 checksum...";
+ open( DLFILE, $dest_file )
+ or die "Error: Could not open $dest_file for MD5 checksum.";
+ binmode(FILE);
+ binmode(DLFILE);
+ my $md5sum = $md5->addfile(*FILE)->hexdigest;
+ my $md5sumdl = $md5->addfile(*DLFILE)->hexdigest;
+ close FILE;
+ close DLFILE;
+ # compare the checksums
+ if ( $md5sum eq $md5sumdl ) {
+ $flag++;
+ }
+ }
+ if ($flag == $parts){
+ pass("checksum verification for multipart upload passed" );
+ }else{
+ fail("checksum verification for multipart upload failed" );
+ }
+}
+
+#== Main starts here===
+ceph_os_info();
+check();
+# The following test runs multi part upload of file size 1Gb in 10 parts
+multipart_upload('1048576000', 10);
+# The following test runs multipart upload of 1 Gb file in 100 parts
+multipart_upload('1048576000', 100);
+print "OK";
diff --git a/qa/workunits/rgw/s3_user_quota.pl b/qa/workunits/rgw/s3_user_quota.pl
new file mode 100755
index 000000000..6d5c02a9a
--- /dev/null
+++ b/qa/workunits/rgw/s3_user_quota.pl
@@ -0,0 +1,191 @@
+#! /usr/bin/perl
+
+=head1 NAME
+
+s3_user_quota.pl - Script to test the rgw user quota functionality using s3 interface.
+
+=head1 SYNOPSIS
+
+Use:
+ perl s3_user_quota.pl [--help]
+
+Examples:
+ perl s3_user_quota.pl
+ or
+ perl s3_user_quota.pl --help
+
+=head1 DESCRIPTION
+
+This script intends to test the rgw user quota funcionality using s3 interface
+and reports the test results
+
+=head1 ARGUMENTS
+
+s3_user_quota.pl takes the following arguments:
+ --help
+ (optional) Displays the usage message.
+
+=cut
+
+use Amazon::S3;
+use Data::Dumper;
+use IO::File;
+use Getopt::Long;
+use Digest::MD5;
+use Pod::Usage();
+use FindBin;
+use lib $FindBin::Bin;
+use s3_utilities;
+use Net::Domain qw(hostfqdn);
+
+my $help;
+
+Getopt::Long::GetOptions(
+ 'help' => \$help
+);
+Pod::Usage::pod2usage(-verbose => 1) && exit if ($help);
+
+#== local variables ===
+our $mytestfilename;
+my $mytestfilename1;
+my $logmsg;
+my $kruft;
+my $s3;
+my $hostdom = $ENV{RGW_FQDN}||hostfqdn();
+my $port = $ENV{RGW_PORT}||80;
+our $hostname = "$hostdom:$port";
+our $testfileloc;
+our $cnt;
+
+sub quota_set_max_size_per_user {
+ my ($maxsize, $size1,$rgw_user) = @_;
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=user --max-size=$maxsize`;
+ if (($set_quota !~ /./)&&($maxsize == 0)){
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 1){
+ pass("quota set for user: $rgw_user with max_size=$maxsize passed" );
+ }else {
+ fail("quota set for user: $rgw_user with max_size=$maxsize failed" );
+ }
+ } elsif (($set_quota !~ /./) && ($maxsize != 0)) {
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 0){
+ pass("quota set for user: $rgw_user with max_size=$maxsize passed" );
+ }else {
+ fail("quota set for user: $rgw_user with max_size=$maxsize failed" );
+ }
+ }
+ delete_keys($mytestfilename);
+ purge_data($rgw_user);
+ return 0;
+}
+
+sub max_size_per_user {
+ my ($maxsize, $size1,$rgw_user) = @_;
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=user --max-size=$maxsize`;
+ if (($set_quota !~ /./) && ($maxsize != 0)) {
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 0){
+ $cnt++;
+ }
+ }
+ return $cnt;
+}
+
+sub quota_set_max_obj_per_user {
+ # set max objects
+ my ($maxobjs, $size1, $rgw_user) = @_;
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=user --max-objects=$maxobjs`;
+ if (($set_quota !~ /./) && ($maxobjs == 0)){
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 1){
+ pass("quota set for user: $rgw_user with max_objects=$maxobjs passed" );
+ }else {
+ fail("quota set for user: $rgw_user with max_objects=$maxobjs failed" );
+ }
+ } elsif (($set_quota !~ /./) && ($maxobjs == 1)) {
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 0){
+ pass("quota set for user: $rgw_user with max_objects=$maxobjs passed" );
+ }else {
+ fail("quota set for user: $rgw_user with max_objects=$maxobjs failed" );
+ }
+ }
+ delete_keys($mytestfilename);
+ purge_data($rgw_user);
+}
+
+sub quota_enable_user {
+ my ($rgw_user) = @_;
+ my $en_quota = `$radosgw_admin quota enable --uid=$rgw_user --quota-scope=user`;
+ if ($en_quota !~ /./){
+ print "quota enabled for the user $rgw_user \n";
+ } else {
+ print "quota enable failed for the user $rgw_user \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_disable_user {
+ my $dis_quota = `$radosgw_admin quota disable --uid=$rgw_user --quota-scope=user`;
+ if ($dis_quota !~ /./){
+ print "quota disabled for the user $rgw_user \n";
+ } else {
+ print "quota disable failed for the user $rgw_user \n";
+ exit 1;
+ }
+ return 0;
+}
+
+# upload a file to the bucket
+sub upload_file {
+ print "adding file to bucket $bucketname: $mytestfilename\n";
+ ($bucket->add_key_filename( $mytestfilename, $testfileloc,
+ { content_type => 'text/plain', },
+ ) and (print "upload file successful\n" ) and return 0 ) or (return 1);
+}
+
+# delete the bucket
+sub delete_bucket {
+ ($bucket->delete_bucket) and (print "bucket delete succeeded \n") or die $s3->err . "delete bucket failed\n" . $s3->errstr;
+}
+
+#Function to upload the given file size to bucket and verify
+sub test_max_objs {
+ my ($size, $rgw_user) = @_;
+ create_file($size);
+ quota_enable_user($rgw_user);
+ my $ret_value = upload_file();
+ return $ret_value;
+}
+
+# set user quota and ensure it is validated
+sub test_user_quota_max_size{
+ my ($max_buckets,$size, $fsize) = @_;
+ my $usr = rand();
+ foreach my $i (1..$max_buckets){
+ my $ret_value = max_size_per_user($size, $fsize, $usr );
+ }
+ if ($ret_value == $max_buckets){
+ fail( "user quota max size for $usr failed on $max_buckets buckets" );
+ } else {
+ pass( "user quota max size for $usr passed on $max_buckets buckets" );
+ }
+ delete_keys($mytestfilename);
+ purge_data($usr);
+}
+
+#== Main starts here===
+ceph_os_info();
+check();
+quota_set_max_obj_per_user('0', '10Mb', 'usr1');
+quota_set_max_obj_per_user('1', '10Mb', 'usr2');
+quota_set_max_size_per_user(0, '10Mb', 'usr1');
+quota_set_max_size_per_user(1048576000, '1Gb', 'usr2');
+test_user_quota_max_size(3,1048576000,'100Mb');
+test_user_quota_max_size(2,1048576000, '1Gb');
+print "OK";
diff --git a/qa/workunits/rgw/s3_utilities.pm b/qa/workunits/rgw/s3_utilities.pm
new file mode 100644
index 000000000..3c3fae900
--- /dev/null
+++ b/qa/workunits/rgw/s3_utilities.pm
@@ -0,0 +1,233 @@
+# Common subroutines shared by the s3 testing code
+my $sec;
+my $min;
+my $hour;
+my $mon;
+my $year;
+my $mday;
+my $wday;
+my $yday;
+my $isdst;
+my $PASS_CNT = 0;
+my $FAIL_CNT = 0;
+
+our $radosgw_admin = $ENV{RGW_ADMIN}||"sudo radosgw-admin";
+
+# function to get the current time stamp from the test set up
+sub get_timestamp {
+ ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time);
+ if ($mon < 10) { $mon = "0$mon"; }
+ if ($hour < 10) { $hour = "0$hour"; }
+ if ($min < 10) { $min = "0$min"; }
+ if ($sec < 10) { $sec = "0$sec"; }
+ $year=$year+1900;
+ return $year . '_' . $mon . '_' . $mday . '_' . $hour . '_' . $min . '_' . $sec;
+}
+
+# Function to check if radosgw is already running
+sub get_status {
+ my $service = "radosgw";
+ my $cmd = "pgrep $service";
+ my $status = get_cmd_op($cmd);
+ if ($status =~ /\d+/ ){
+ return 0;
+ }
+ return 1;
+}
+
+# function to execute the command and return output
+sub get_cmd_op
+{
+ my $cmd = shift;
+ my $excmd = `$cmd`;
+ return $excmd;
+}
+
+#Function that executes the CLI commands and returns the output of the command
+sub get_command_output {
+ my $cmd_output = shift;
+ open( FH, ">>$test_log" );
+ print FH "\"$cmd_output\"\n";
+ my $exec_cmd = `$cmd_output 2>&1`;
+ print FH "$exec_cmd\n";
+ close(FH);
+ return $exec_cmd;
+}
+
+# Function to get the hostname
+sub get_hostname
+{
+ my $cmd = "hostname";
+ my $get_host = get_command_output($cmd);
+ chomp($get_host);
+ return($get_host);
+}
+
+sub pass {
+ my ($comment) = @_;
+ print "Comment required." unless length $comment;
+ chomp $comment;
+ print_border2();
+ print "Test case: $TC_CNT PASSED - $comment \n";
+ print_border2();
+ $PASS_CNT++;
+}
+
+sub fail {
+ my ($comment) = @_;
+ print "Comment required." unless length $comment;
+ chomp $comment;
+ print_border2();
+ print "Test case: $TC_CNT FAILED - $comment \n";
+ print_border2();
+ $FAIL_CNT++;
+}
+
+sub print_border2 {
+ print "~" x 90 . "\n";
+}
+
+# Function to create the user "qa_user" and extract the user access_key and secret_key of the user
+sub get_user_info
+{
+ my ($rgw_user) = @_;
+ my $cmd = "$radosgw_admin user create --uid=$rgw_user --display-name=$rgw_user";
+ my $cmd_op = get_command_output($cmd);
+ if ($cmd_op !~ /keys/){
+ return (0,0);
+ }
+ my @get_user = (split/\n/,$cmd_op);
+ foreach (@get_user) {
+ if ($_ =~ /access_key/ ){
+ $get_acc_key = $_;
+ } elsif ($_ =~ /secret_key/ ){
+ $get_sec_key = $_;
+ }
+ }
+ my $access_key = $get_acc_key;
+ my $acc_key = (split /:/, $access_key)[1];
+ $acc_key =~ s/\\//g;
+ $acc_key =~ s/ //g;
+ $acc_key =~ s/"//g;
+ $acc_key =~ s/,//g;
+ my $secret_key = $get_sec_key;
+ my $sec_key = (split /:/, $secret_key)[1];
+ $sec_key =~ s/\\//g;
+ $sec_key =~ s/ //g;
+ $sec_key =~ s/"//g;
+ $sec_key =~ s/,//g;
+ return ($acc_key, $sec_key);
+}
+
+# Function that deletes the given user and all associated user data
+sub purge_data
+{
+ my ($rgw_user) = @_;
+ my $cmd = "$radosgw_admin user rm --uid=$rgw_user --purge-data";
+ my $cmd_op = get_command_output($cmd);
+ if ($cmd_op !~ /./){
+ print "user $rgw_user deleted\n";
+ } else {
+ print "user $rgw_user NOT deleted\n";
+ return 1;
+ }
+ return 0;
+}
+
+# Read PRETTY_NAME from /etc/os-release
+sub os_pretty_name
+{
+ open(FH, '<', '/etc/os-release') or die $!;
+ while (my $line = <FH>) {
+ chomp $line;
+ if ($line =~ /^\s*PRETTY_NAME=\"?([^"]*)\"?/) {
+ return $1;
+ }
+ }
+ close(FH);
+}
+
+
+# Function to get the Ceph and distro info
+sub ceph_os_info
+{
+ my $ceph_v = get_command_output ( "ceph -v" );
+ my @ceph_arr = split(" ",$ceph_v);
+ $ceph_v = "Ceph Version: $ceph_arr[2]";
+ my $os_distro = os_pretty_name();
+ $os_distro = "Linux Flavor:$os_distro";
+ return ($ceph_v, $os_distro);
+}
+
+# Execute the test case based on the input to the script
+sub create_file {
+ my ($file_size, $part) = @_;
+ my $cnt;
+ $mytestfilename = "$file_size.$part";
+ $testfileloc = "/tmp/".$mytestfilename;
+ if ($file_size == '10Mb'){
+ $cnt = 1;
+ } elsif ($file_size == '100Mb'){
+ $cnt = 10;
+ } elsif ($file_size == '500Mb'){
+ $cnt = 50;
+ } elsif ($file_size == '1Gb'){
+ $cnt = 100;
+ } elsif ($file_size == '2Gb'){
+ $cnt = 200;
+ }
+ my $ret = system("dd if=/dev/zero of=$testfileloc bs=10485760 count=$cnt");
+ if ($ret) { exit 1 };
+ return 0;
+}
+
+sub run_s3
+{
+# Run tests for the S3 functionality
+ # Modify access key and secret key to suit the user account
+ my ($user) = @_;
+ our ( $access_key, $secret_key ) = get_user_info($user);
+ if ( ($access_key) && ($secret_key) ) {
+ $s3 = Amazon::S3->new(
+ {
+ aws_access_key_id => $access_key,
+ aws_secret_access_key => $secret_key,
+ host => $hostname,
+ secure => 0,
+ retry => 1,
+ }
+ );
+ }
+
+our $bucketname = 'buck_'.get_timestamp();
+# create a new bucket (the test bucket)
+our $bucket = $s3->add_bucket( { bucket => $bucketname } )
+ or die $s3->err. "bucket $bucketname create failed\n". $s3->errstr;
+ print "Bucket Created: $bucketname \n";
+ return 0;
+}
+
+# delete keys
+sub delete_keys {
+ (($bucket->delete_key($_[0])) and return 0) or return 1;
+}
+
+# Read the file back to bucket
+sub readd_file {
+ system("dd if=/dev/zero of=/tmp/10MBfile1 bs=10485760 count=1");
+ $mytestfilename1 = '10MBfile1';
+ print "readding file to bucket: $mytestfilename1\n";
+ ((($bucket->add_key_filename( $mytestfilename1, $testfileloc,
+ { content_type => 'text/plain', },
+ )) and (print "readding file success\n") and return 0) or (return 1));
+}
+
+# check if rgw service is already running
+sub check
+{
+ my $state = get_status();
+ if ($state) {
+ exit 1;
+ }
+}
+1
diff --git a/qa/workunits/rgw/test-keystone-service-token.py b/qa/workunits/rgw/test-keystone-service-token.py
new file mode 100755
index 000000000..2c7f21e93
--- /dev/null
+++ b/qa/workunits/rgw/test-keystone-service-token.py
@@ -0,0 +1,189 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2022 Binero
+#
+# Author: Tobias Urdin <tobias.urdin@binero.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+
+import sys
+import requests
+import time
+
+
+# b4221c214dd64ee6a464g2153fae3813 is ID of deadbeef project
+SWIFT_URL = 'http://localhost:8000/swift/v1/AUTH_b4221c214dd64ee6a464g2153fae3813'
+KEYSTONE_URL = 'http://localhost:5000'
+
+
+def get_stats():
+ stats_url = '%s/stats' % KEYSTONE_URL
+ return requests.get(stats_url)
+
+
+def test_list_containers():
+ # Loop five list container requests with same token
+ for i in range(0, 5):
+ r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-1'})
+ if r.status_code != 204:
+ print('FAILED, status code is %d not 204' % r.status_code)
+ sys.exit(1)
+
+ # Get stats from fake Keystone server
+ r = get_stats()
+ if r.status_code != 200:
+ print('FAILED, status code is %d not 200' % r.status_code)
+ sys.exit(1)
+ stats = r.json()
+
+ # Verify admin token was cached
+ if stats['post_total'] != 1:
+ print('FAILED, post_total stat is %d not 1' % stats['post_total'])
+ sys.exit(1)
+
+ # Verify user token was cached
+ if stats['get_total'] != 1:
+ print('FAILED, get_total stat is %d not 1' % stats['get_total'])
+ sys.exit(1)
+
+ print('Wait for cache to be invalid')
+ time.sleep(11)
+
+ r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-1'})
+ if r.status_code != 204:
+ print('FAILED, status code is %d not 204' % r.status_code)
+ sys.exit(1)
+
+ # Get stats from fake Keystone server
+ r = get_stats()
+ if r.status_code != 200:
+ print('FAILED, status code is %d not 200' % r.status_code)
+ sys.exit(1)
+ stats = r.json()
+
+ if stats['post_total'] != 2:
+ print('FAILED, post_total stat is %d not 2' % stats['post_total'])
+ sys.exit(1)
+
+ if stats['get_total'] != 2:
+ print('FAILED, get_total stat is %d not 2' % stats['get_total'])
+ sys.exit(1)
+
+
+def test_expired_token():
+ # Try listing containers with an expired token
+ for i in range(0, 3):
+ r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-2'})
+ if r.status_code != 401:
+ print('FAILED, status code is %d not 401' % r.status_code)
+ sys.exit(1)
+
+ # Get stats from fake Keystone server
+ r = get_stats()
+ if r.status_code != 200:
+ print('FAILED, status code is %d not 200' % r.status_code)
+ sys.exit(1)
+ stats = r.json()
+
+ # Verify admin token was cached
+ if stats['post_total'] != 2:
+ print('FAILED, post_total stat is %d not 2' % stats['post_total'])
+ sys.exit(1)
+
+ # Verify we got to fake Keystone server since expired tokens is not cached
+ if stats['get_total'] != 5:
+ print('FAILED, get_total stat is %d not 5' % stats['get_total'])
+ sys.exit(1)
+
+
+def test_expired_token_with_service_token():
+ # Try listing containers with an expired token but with a service token
+ for i in range(0, 3):
+ r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-2', 'X-Service-Token': 'admin-token-1'})
+ if r.status_code != 204:
+ print('FAILED, status code is %d not 204' % r.status_code)
+ sys.exit(1)
+
+ # Get stats from fake Keystone server
+ r = get_stats()
+ if r.status_code != 200:
+ print('FAILED, status code is %d not 200' % r.status_code)
+ sys.exit(1)
+ stats = r.json()
+
+ # Verify admin token was cached
+ if stats['post_total'] != 2:
+ print('FAILED, post_total stat is %d not 2' % stats['post_total'])
+ sys.exit(1)
+
+ # Verify we got to fake Keystone server since expired tokens is not cached
+ if stats['get_total'] != 7:
+ print('FAILED, get_total stat is %d not 7' % stats['get_total'])
+ sys.exit(1)
+
+ print('Wait for cache to be invalid')
+ time.sleep(11)
+
+ r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-2', 'X-Service-Token': 'admin-token-1'})
+ if r.status_code != 204:
+ print('FAILED, status code is %d not 204' % r.status_code)
+ sys.exit(1)
+
+ # Get stats from fake Keystone server
+ r = get_stats()
+ if r.status_code != 200:
+ print('FAILED, status code is %d not 200' % r.status_code)
+ sys.exit(1)
+ stats = r.json()
+
+ if stats['post_total'] != 3:
+ print('FAILED, post_total stat is %d not 3' % stats['post_total'])
+ sys.exit(1)
+
+ if stats['get_total'] != 9:
+ print('FAILED, get_total stat is %d not 9' % stats['get_total'])
+ sys.exit(1)
+
+
+def test_expired_token_with_invalid_service_token():
+ print('Wait for cache to be invalid')
+ time.sleep(11)
+
+ # Test with a token that doesn't have allowed role as service token
+ for i in range(0, 3):
+ r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-2', 'X-Service-Token': 'user-token-1'})
+ if r.status_code != 401:
+ print('FAILED, status code is %d not 401' % r.status_code)
+ sys.exit(1)
+
+ # Make sure we get user-token-1 cached
+ r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-1'})
+ if r.status_code != 204:
+ print('FAILED, status code is %d not 204' % r.status_code)
+ sys.exit(1)
+
+ # Test that a cached token (that is invalid as service token) cannot be used as service token
+ for i in range(0, 3):
+ r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-2', 'X-Service-Token': 'user-token-1'})
+ if r.status_code != 401:
+ print('FAILED, status code is %d not 401' % r.status_code)
+ sys.exit(1)
+
+
+def main():
+ test_list_containers()
+ test_expired_token()
+ test_expired_token_with_service_token()
+ test_expired_token_with_invalid_service_token()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/qa/workunits/rgw/test_librgw_file.sh b/qa/workunits/rgw/test_librgw_file.sh
new file mode 100755
index 000000000..1371ff711
--- /dev/null
+++ b/qa/workunits/rgw/test_librgw_file.sh
@@ -0,0 +1,59 @@
+#!/bin/sh -e
+
+
+if [ -z ${AWS_ACCESS_KEY_ID} ]
+then
+ export AWS_ACCESS_KEY_ID=`openssl rand -base64 20`
+ export AWS_SECRET_ACCESS_KEY=`openssl rand -base64 40`
+
+ radosgw-admin user create --uid ceph-test-librgw-file \
+ --access-key $AWS_ACCESS_KEY_ID \
+ --secret $AWS_SECRET_ACCESS_KEY \
+ --display-name "librgw test user" \
+ --email librgw@example.com || echo "librgw user exists"
+
+ # keyring override for teuthology env
+ KEYRING="/etc/ceph/ceph.keyring"
+ K="-k ${KEYRING}"
+fi
+
+# nfsns is the main suite
+
+# create herarchy, and then list it
+echo "phase 1.1"
+ceph_test_librgw_file_nfsns ${K} --hier1 --dirs1 --create --rename --verbose
+
+# the older librgw_file can consume the namespace
+echo "phase 1.2"
+ceph_test_librgw_file_nfsns ${K} --getattr --verbose
+
+# and delete the hierarchy
+echo "phase 1.3"
+ceph_test_librgw_file_nfsns ${K} --hier1 --dirs1 --delete --verbose
+
+# bulk create/delete buckets
+echo "phase 2.1"
+ceph_test_librgw_file_cd ${K} --create --multi --verbose
+echo "phase 2.2"
+ceph_test_librgw_file_cd ${K} --delete --multi --verbose
+
+# write continuation test
+echo "phase 3.1"
+ceph_test_librgw_file_aw ${K} --create --large --verify
+echo "phase 3.2"
+ceph_test_librgw_file_aw ${K} --delete --large
+
+# continued readdir
+echo "phase 4.1"
+ceph_test_librgw_file_marker ${K} --create --marker1 --marker2 --nobjs=100 --verbose
+echo "phase 4.2"
+ceph_test_librgw_file_marker ${K} --delete --verbose
+
+# advanced i/o--but skip readv/writev for now--split delete from
+# create and stat ops to avoid fault in sysobject cache
+echo "phase 5.1"
+ceph_test_librgw_file_gp ${K} --get --stat --put --create
+echo "phase 5.2"
+ceph_test_librgw_file_gp ${K} --delete
+
+exit 0
diff --git a/qa/workunits/rgw/test_rgw_bucket_check.py b/qa/workunits/rgw/test_rgw_bucket_check.py
new file mode 100755
index 000000000..bfa6d65d6
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_bucket_check.py
@@ -0,0 +1,194 @@
+#!/usr/bin/env python3
+
+import logging as log
+import json
+import botocore
+from common import exec_cmd, create_user, boto_connect, put_objects, create_unlinked_objects
+from botocore.config import Config
+
+"""
+Tests behavior of radosgw-admin bucket check commands.
+"""
+# The test cases in this file have been annotated for inventory.
+# To extract the inventory (in csv format) use the command:
+#
+# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
+#
+#
+
+""" Constants """
+USER = 'check-tester'
+DISPLAY_NAME = 'Check Testing'
+ACCESS_KEY = 'OJODXSLNX4LUNHQG99PA'
+SECRET_KEY = '3l6ffld34qaymfomuh832j94738aie2x4p2o8h6n'
+BUCKET_NAME = 'check-bucket'
+
+def main():
+ """
+ execute bucket check commands
+ """
+ create_user(USER, DISPLAY_NAME, ACCESS_KEY, SECRET_KEY)
+
+ connection = boto_connect(ACCESS_KEY, SECRET_KEY, Config(retries = {
+ 'total_max_attempts': 1,
+ }))
+
+ # pre-test cleanup
+ try:
+ bucket = connection.Bucket(BUCKET_NAME)
+ bucket.objects.all().delete()
+ bucket.object_versions.all().delete()
+ bucket.delete()
+ except botocore.exceptions.ClientError as e:
+ if not e.response['Error']['Code'] == 'NoSuchBucket':
+ raise
+
+ bucket = connection.create_bucket(Bucket=BUCKET_NAME)
+
+ null_version_keys = ['a', 'z']
+ null_version_objs = put_objects(bucket, null_version_keys)
+
+ connection.BucketVersioning(BUCKET_NAME).enable()
+
+ ok_keys = ['a', 'b', 'c', 'd']
+ unlinked_keys = ['c', 'd', 'e', 'f']
+ ok_objs = put_objects(bucket, ok_keys)
+
+ # TESTCASE 'recalculated bucket check stats are correct'
+ log.debug('TEST: recalculated bucket check stats are correct\n')
+ exec_cmd(f'radosgw-admin bucket check --fix --bucket {BUCKET_NAME}')
+ out = exec_cmd(f'radosgw-admin bucket stats --bucket {BUCKET_NAME}')
+ json_out = json.loads(out)
+ log.debug(json_out['usage'])
+ assert json_out['usage']['rgw.main']['num_objects'] == 6
+
+ # TESTCASE 'bucket check unlinked does not report normal entries'
+ log.debug('TEST: bucket check unlinked does not report normal entries\n')
+ out = exec_cmd(f'radosgw-admin bucket check unlinked --bucket {BUCKET_NAME} --min-age-hours 0 --dump-keys')
+ json_out = json.loads(out)
+ assert len(json_out) == 0
+
+ unlinked_objs = create_unlinked_objects(connection, bucket, unlinked_keys)
+
+ # TESTCASE 'bucket check unlinked finds unlistable entries'
+ log.debug('TEST: bucket check unlinked finds unlistable entries\n')
+ out = exec_cmd(f'radosgw-admin bucket check unlinked --bucket {BUCKET_NAME} --min-age-hours 0 --dump-keys')
+ json_out = json.loads(out)
+ assert len(json_out) == len(unlinked_keys)
+
+ # TESTCASE 'unlinked entries are not listable'
+ log.debug('TEST: unlinked entries are not listable\n')
+ for ov in bucket.object_versions.all():
+ assert (ov.key, ov.version_id) not in unlinked_objs, f'object "{ov.key}:{ov.version_id}" was found in bucket listing'
+
+ # TESTCASE 'GET returns 404 for unlinked entry keys that have no other versions'
+ log.debug('TEST: GET returns 404 for unlinked entry keys that have no other versions\n')
+ noent_keys = set(unlinked_keys) - set(ok_keys)
+ for key in noent_keys:
+ try:
+ bucket.Object(key).get()
+ assert False, 'GET did not return 404 for key={key} with no prior successful PUT'
+ except botocore.exceptions.ClientError as e:
+ assert e.response['ResponseMetadata']['HTTPStatusCode'] == 404
+
+ # TESTCASE 'bucket check unlinked fixes unlistable entries'
+ log.debug('TEST: bucket check unlinked fixes unlistable entries\n')
+ out = exec_cmd(f'radosgw-admin bucket check unlinked --bucket {BUCKET_NAME} --fix --min-age-hours 0 --rgw-olh-pending-timeout-sec 0 --dump-keys')
+ json_out = json.loads(out)
+ assert len(json_out) == len(unlinked_keys)
+ for o in unlinked_objs:
+ try:
+ connection.ObjectVersion(bucket.name, o[0], o[1]).head()
+ assert False, f'head for unlistable object {o[0]}:{o[1]} succeeded after fix'
+ except botocore.exceptions.ClientError as e:
+ assert e.response['ResponseMetadata']['HTTPStatusCode'] == 404
+
+ # TESTCASE 'bucket check unlinked fix does not affect normal entries'
+ log.debug('TEST: bucket check unlinked does not affect normal entries\n')
+ all_listable = list(bucket.object_versions.all())
+ assert len(all_listable) == len(ok_keys) + len(null_version_keys), 'some normal objects were not accounted for in object listing after unlinked fix'
+ for o in ok_objs:
+ assert o in map(lambda x: (x.key, x.version_id), all_listable), "normal object not listable after fix"
+ connection.ObjectVersion(bucket.name, o[0], o[1]).head()
+
+ # TESTCASE 'bucket check unlinked does not find new unlistable entries after fix'
+ log.debug('TEST: bucket check unlinked does not find new unlistable entries after fix\n')
+ out = exec_cmd(f'radosgw-admin bucket check unlinked --bucket {BUCKET_NAME} --min-age-hours 0 --dump-keys')
+ json_out = json.loads(out)
+ assert len(json_out) == 0
+
+ # for this set of keys we can produce leftover OLH object/entries by
+ # deleting the normal object instance since we should already have a leftover
+ # pending xattr on the OLH object due to the errors associated with the
+ # prior unlinked entries that were created for the same keys
+ leftover_pending_xattr_keys = set(ok_keys).intersection(unlinked_keys)
+ objs_to_delete = filter(lambda x: x[0] in leftover_pending_xattr_keys, ok_objs)
+
+ for o in objs_to_delete:
+ connection.ObjectVersion(bucket.name, o[0], o[1]).delete()
+
+ for key in leftover_pending_xattr_keys:
+ out = exec_cmd(f'radosgw-admin bi list --bucket {BUCKET_NAME} --object {key}')
+ idx_entries = json.loads(out.replace(b'\x80', b'0x80'))
+ assert len(idx_entries) > 0, 'failed to create leftover OLH entries for key {key}'
+
+ # TESTCASE 'bucket check olh finds leftover OLH entries'
+ log.debug('TEST: bucket check olh finds leftover OLH entries\n')
+ out = exec_cmd(f'radosgw-admin bucket check olh --bucket {BUCKET_NAME} --dump-keys')
+ json_out = json.loads(out)
+ assert len(json_out) == len(leftover_pending_xattr_keys)
+
+ # TESTCASE 'bucket check olh fixes leftover OLH entries'
+ log.debug('TEST: bucket check olh fixes leftover OLH entries\n')
+ out = exec_cmd(f'radosgw-admin bucket check olh --bucket {BUCKET_NAME} --fix --rgw-olh-pending-timeout-sec 0 --dump-keys')
+ json_out = json.loads(out)
+ assert len(json_out) == len(leftover_pending_xattr_keys)
+
+ for key in leftover_pending_xattr_keys:
+ out = exec_cmd(f'radosgw-admin bi list --bucket {BUCKET_NAME} --object {key}')
+ idx_entries = json.loads(out.replace(b'\x80', b'0x80'))
+ assert len(idx_entries) == 0, 'index entries still exist for key={key} after olh fix'
+
+ # TESTCASE 'bucket check olh does not find new leftover OLH entries after fix'
+ log.debug('TEST: bucket check olh does not find new leftover OLH entries after fix\n')
+ out = exec_cmd(f'radosgw-admin bucket check olh --bucket {BUCKET_NAME} --dump-keys')
+ json_out = json.loads(out)
+ assert len(json_out) == 0
+
+ # TESTCASE 'bucket check fixes do not affect null version objects'
+ log.debug('TEST: verify that bucket check fixes do not affect null version objects\n')
+ for o in null_version_objs:
+ connection.ObjectVersion(bucket.name, o[0], 'null').head()
+
+ all_versions = list(map(lambda x: (x.key, x.version_id), bucket.object_versions.all()))
+ for key in null_version_keys:
+ assert (key, 'null') in all_versions
+
+ # TESTCASE 'bucket check stats are correct in the presence of unlinked entries'
+ log.debug('TEST: bucket check stats are correct in the presence of unlinked entries\n')
+ bucket.object_versions.all().delete()
+ null_version_objs = put_objects(bucket, null_version_keys)
+ ok_objs = put_objects(bucket, ok_keys)
+ unlinked_objs = create_unlinked_objects(connection, bucket, unlinked_keys)
+ exec_cmd(f'radosgw-admin bucket check --fix --bucket {BUCKET_NAME}')
+ out = exec_cmd(f'radosgw-admin bucket check unlinked --bucket {BUCKET_NAME} --fix --min-age-hours 0 --rgw-olh-pending-timeout-sec 0 --dump-keys')
+ json_out = json.loads(out)
+ assert len(json_out) == len(unlinked_keys)
+ bucket.object_versions.all().delete()
+ out = exec_cmd(f'radosgw-admin bucket stats --bucket {BUCKET_NAME}')
+ json_out = json.loads(out)
+ log.debug(json_out['usage'])
+ assert json_out['usage']['rgw.main']['size'] == 0
+ assert json_out['usage']['rgw.main']['num_objects'] == 0
+ assert json_out['usage']['rgw.main']['size_actual'] == 0
+ assert json_out['usage']['rgw.main']['size_kb'] == 0
+ assert json_out['usage']['rgw.main']['size_kb_actual'] == 0
+ assert json_out['usage']['rgw.main']['size_kb_utilized'] == 0
+
+ # Clean up
+ log.debug("Deleting bucket {}".format(BUCKET_NAME))
+ bucket.object_versions.all().delete()
+ bucket.delete()
+
+main()
+log.info("Completed bucket check tests")
diff --git a/qa/workunits/rgw/test_rgw_datacache.py b/qa/workunits/rgw/test_rgw_datacache.py
new file mode 100755
index 000000000..f070ec0f1
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_datacache.py
@@ -0,0 +1,209 @@
+#!/usr/bin/python3
+
+import logging as log
+from configobj import ConfigObj
+import subprocess
+import json
+import os
+
+"""
+Runs a test against a rgw with the data cache enabled. A client must be
+set in the config for this task. This client must be the same client
+that is in the config for the `rgw` task.
+
+In the `overrides` section `datacache` and `datacache` must be configured for
+the `rgw` task and the ceph conf overrides must contain the below config
+variables in the client section.
+
+`s3cmd` must be added as an extra_package to the install task.
+
+In the `workunit` task, `- rgw/run-datacache.sh` must be set for the client that
+is in the config for the `rgw` task. The `RGW_DATACACHE_PATH` variable must be
+set in the workunit's `env` and it must match the `datacache_path` given to the
+`rgw` task in `overrides`.
+Ex:
+- install:
+ extra_packages:
+ deb: ['s3cmd']
+ rpm: ['s3cmd']
+- overrides:
+ rgw:
+ datacache: true
+ datacache_path: /tmp/rgw_datacache
+ install:
+ extra_packages:
+ deb: ['s3cmd']
+ rpm: ['s3cmd']
+ ceph:
+ conf:
+ client:
+ rgw d3n l1 datacache persistent path: /tmp/rgw_datacache/
+ rgw d3n l1 datacache size: 10737417240
+ rgw d3n l1 local datacache enabled: true
+ rgw enable ops log: true
+- rgw:
+ client.0:
+- workunit:
+ clients:
+ client.0:
+ - rgw/run-datacache.sh
+ env:
+ RGW_DATACACHE_PATH: /tmp/rgw_datacache
+ cleanup: true
+"""
+
+log.basicConfig(level=log.DEBUG)
+
+""" Constants """
+USER = 'rgw_datacache_user'
+DISPLAY_NAME = 'DatacacheUser'
+ACCESS_KEY = 'NX5QOQKC6BH2IDN8HC7A'
+SECRET_KEY = 'LnEsqNNqZIpkzauboDcLXLcYaWwLQ3Kop0zAnKIn'
+BUCKET_NAME = 'datacachebucket'
+FILE_NAME = '7M.dat'
+GET_FILE_NAME = '7M-get.dat'
+
+def exec_cmd(cmd):
+ log.debug("exec_cmd(%s)", cmd)
+ try:
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
+ out, err = proc.communicate()
+ if proc.returncode == 0:
+ log.info('command succeeded')
+ if out is not None: log.info(out)
+ return out
+ else:
+ raise Exception("error: %s \nreturncode: %s" % (err, proc.returncode))
+ except Exception as e:
+ log.error('command failed')
+ log.error(e)
+ return False
+
+def get_radosgw_endpoint():
+ out = exec_cmd('sudo netstat -nltp | egrep "rados|valgr"') # short for radosgw/valgrind
+ x = out.decode('utf8').split(" ")
+ port = [i for i in x if ':' in i][0].split(':')[1]
+ log.info('radosgw port: %s' % port)
+ proto = "http"
+ hostname = '127.0.0.1'
+
+ if port == '443':
+ proto = "https"
+
+ endpoint = hostname
+
+ log.info("radosgw endpoint is: %s", endpoint)
+ return endpoint, proto
+
+def create_s3cmd_config(path, proto):
+ """
+ Creates a minimal config file for s3cmd
+ """
+ log.info("Creating s3cmd config...")
+
+ use_https_config = "False"
+ log.info("proto for s3cmd config is %s", proto)
+ if proto == "https":
+ use_https_config = "True"
+
+ s3cmd_config = ConfigObj(
+ indent_type='',
+ infile={
+ 'default':
+ {
+ 'host_bucket': 'no.way.in.hell',
+ 'use_https': use_https_config,
+ },
+ }
+ )
+
+ f = open(path, 'wb')
+ s3cmd_config.write(f)
+ f.close()
+ log.info("s3cmd config written")
+
+def get_cmd_output(cmd_out):
+ out = cmd_out.decode('utf8')
+ out = out.strip('\n')
+ return out
+
+def main():
+ """
+ execute the datacache test
+ """
+ # setup for test
+ cache_dir = os.environ['RGW_DATACACHE_PATH']
+ log.debug("datacache dir from config is: %s", cache_dir)
+
+ out = exec_cmd('pwd')
+ pwd = get_cmd_output(out)
+ log.debug("pwd is: %s", pwd)
+
+ endpoint, proto = get_radosgw_endpoint()
+
+ # create 7M file to put
+ outfile = pwd + '/' + FILE_NAME
+ exec_cmd('dd if=/dev/urandom of=%s bs=1M count=7' % (outfile))
+
+ # create user
+ exec_cmd('radosgw-admin user create --uid %s --display-name %s --access-key %s --secret %s'
+ % (USER, DISPLAY_NAME, ACCESS_KEY, SECRET_KEY))
+
+ # create s3cmd config
+ s3cmd_config_path = pwd + '/s3cfg'
+ create_s3cmd_config(s3cmd_config_path, proto)
+
+ # create a bucket
+ exec_cmd('s3cmd --access_key=%s --secret_key=%s --config=%s --no-check-hostname --host=%s mb s3://%s'
+ % (ACCESS_KEY, SECRET_KEY, s3cmd_config_path, endpoint, BUCKET_NAME))
+
+ # put an object in the bucket
+ exec_cmd('s3cmd --access_key=%s --secret_key=%s --config=%s --no-check-hostname --host=%s put %s s3://%s'
+ % (ACCESS_KEY, SECRET_KEY, s3cmd_config_path, endpoint, outfile, BUCKET_NAME))
+
+ # get object from bucket
+ get_file_path = pwd + '/' + GET_FILE_NAME
+ exec_cmd('s3cmd --access_key=%s --secret_key=%s --config=%s --no-check-hostname --host=%s get s3://%s/%s %s --force'
+ % (ACCESS_KEY, SECRET_KEY, s3cmd_config_path, endpoint, BUCKET_NAME, FILE_NAME, get_file_path))
+
+ # get info of object
+ out = exec_cmd('radosgw-admin object stat --bucket=%s --object=%s' % (BUCKET_NAME, FILE_NAME))
+
+ json_op = json.loads(out)
+ cached_object_name = json_op['manifest']['prefix']
+ log.debug("Cached object name is: %s", cached_object_name)
+
+ # check that the cache is enabled (does the cache directory empty)
+ out = exec_cmd('find %s -type f | wc -l' % (cache_dir))
+ chk_cache_dir = int(get_cmd_output(out))
+ log.debug("Check cache dir content: %s", chk_cache_dir)
+ if chk_cache_dir == 0:
+ log.info("NOTICE: datacache test object not found, inspect if datacache was bypassed or disabled during this check.")
+ return
+
+ # list the files in the cache dir for troubleshooting
+ out = exec_cmd('ls -l %s' % (cache_dir))
+ # get name of cached object and check if it exists in the cache
+ out = exec_cmd('find %s -name "*%s1"' % (cache_dir, cached_object_name))
+ cached_object_path = get_cmd_output(out)
+ log.debug("Path of file in datacache is: %s", cached_object_path)
+ out = exec_cmd('basename %s' % (cached_object_path))
+ basename_cmd_out = get_cmd_output(out)
+ log.debug("Name of file in datacache is: %s", basename_cmd_out)
+
+ # check to see if the cached object is in Ceph
+ out = exec_cmd('rados ls -p default.rgw.buckets.data')
+ rados_ls_out = get_cmd_output(out)
+ log.debug("rados ls output is: %s", rados_ls_out)
+
+ assert(basename_cmd_out in rados_ls_out)
+ log.debug("RGW Datacache test SUCCESS")
+
+ # remove datacache dir
+ #cmd = exec_cmd('rm -rf %s' % (cache_dir))
+ #log.debug("RGW Datacache dir deleted")
+ #^ commenting for future refrence - the work unit will continue running tests and if the cache_dir is removed
+ # all the writes to cache will fail with errno 2 ENOENT No such file or directory.
+
+main()
+log.info("Completed Datacache tests")
diff --git a/qa/workunits/rgw/test_rgw_gc_log.sh b/qa/workunits/rgw/test_rgw_gc_log.sh
new file mode 100755
index 000000000..ab4015aef
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_gc_log.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -e
+
+ceph_test_rgw_gc_log
+
+exit 0
diff --git a/qa/workunits/rgw/test_rgw_obj.sh b/qa/workunits/rgw/test_rgw_obj.sh
new file mode 100755
index 000000000..01dd2b5ee
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_obj.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -e
+
+ceph_test_rgw_obj
+
+exit 0
diff --git a/qa/workunits/rgw/test_rgw_orphan_list.sh b/qa/workunits/rgw/test_rgw_orphan_list.sh
new file mode 100755
index 000000000..34d550cea
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_orphan_list.sh
@@ -0,0 +1,519 @@
+#!/usr/bin/env bash
+
+# set -x
+set -e
+
+# if defined, debug messages will be displayed and prepended with the string
+# debug="DEBUG"
+
+huge_size=5100 # in megabytes
+big_size=7 # in megabytes
+
+huge_obj=/tmp/huge_obj.temp.$$
+big_obj=/tmp/big_obj.temp.$$
+empty_obj=/tmp/empty_obj.temp.$$
+
+fifo=/tmp/orphan-fifo.$$
+awscli_dir=${HOME}/awscli_temp
+export PATH=${PATH}:${awscli_dir}
+
+rgw_host=$(hostname --fqdn)
+if echo "$rgw_host" | grep -q '\.' ; then
+ :
+else
+ host_domain=".front.sepia.ceph.com"
+ echo "WARNING: rgw hostname -- $rgw_host -- does not appear to be fully qualified; PUNTING and appending $host_domain"
+ rgw_host="${rgw_host}${host_domain}"
+fi
+rgw_port=80
+
+echo "Fully Qualified Domain Name: $rgw_host"
+
+success() {
+ echo OK.
+ exit 0
+}
+
+########################################################################
+# INSTALL AND CONFIGURE TOOLING
+
+install_awscli() {
+ # NB: this does verify authenticity and integrity of downloaded
+ # file; see
+ # https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html
+ here="$(pwd)"
+ cd "$HOME"
+ curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
+ unzip awscliv2.zip
+ mkdir -p $awscli_dir
+ ./aws/install -i $awscli_dir
+ cd "$here"
+}
+
+uninstall_awscli() {
+ here="$(pwd)"
+ cd "$HOME"
+ rm -rf $awscli_dir ./aws awscliv2.zip
+ cd "$here"
+}
+
+sudo yum -y install s3cmd
+sudo yum -y install python3-setuptools
+sudo yum -y install python3-pip
+sudo pip3 install --upgrade setuptools
+sudo pip3 install python-swiftclient
+
+# get ready for transition from s3cmd to awscli
+if false ;then
+ install_awscli
+ aws --version
+ uninstall_awscli
+fi
+
+s3config=/tmp/s3config.$$
+
+# do not include the port when it is 80; the host base is used in the
+# v4 signature and it needs to follow this convention for signatures
+# to match
+if [ "$rgw_port" -ne 80 ] ;then
+ s3_host_base="${rgw_host}:${rgw_port}"
+else
+ s3_host_base="$rgw_host"
+fi
+
+cat >${s3config} <<EOF
+[default]
+host_base = $s3_host_base
+access_key = 0555b35654ad1656d804
+secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
+bucket_location = us-east-1
+check_ssl_certificate = True
+check_ssl_hostname = True
+default_mime_type = binary/octet-stream
+delete_removed = False
+dry_run = False
+enable_multipart = True
+encoding = UTF-8
+encrypt = False
+follow_symlinks = False
+force = False
+guess_mime_type = True
+host_bucket = anything.with.three.dots
+multipart_chunk_size_mb = 15
+multipart_max_chunks = 10000
+recursive = False
+recv_chunk = 65536
+send_chunk = 65536
+signature_v2 = False
+socket_timeout = 300
+use_https = False
+use_mime_magic = True
+verbosity = WARNING
+EOF
+
+
+# set up swift authentication
+export ST_AUTH=http://${rgw_host}:${rgw_port}/auth/v1.0
+export ST_USER=test:tester
+export ST_KEY=testing
+
+create_users() {
+ # Create S3 user
+ local akey='0555b35654ad1656d804'
+ local skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+ radosgw-admin user create --uid testid \
+ --access-key $akey --secret $skey \
+ --display-name 'M. Tester' --email tester@ceph.com
+
+ # Create Swift user
+ radosgw-admin user create --subuser=test:tester \
+ --display-name=Tester-Subuser --key-type=swift \
+ --secret=testing --access=full
+}
+
+myswift() {
+ if [ -n "$debug" ] ;then
+ echo "${debug}: swift --verbose --debug $@"
+ fi
+ swift --verbose --debug "$@"
+ local code=$?
+ if [ $code -ne 0 ] ;then
+ echo "ERROR: code = $code ; command = s3cmd --config=${s3config} --verbose --debug "$@""
+ exit $code
+ fi
+}
+
+mys3cmd() {
+ if [ -n "$debug" ] ;then
+ echo "${debug}: s3cmd --config=${s3config} --verbose --debug $@"
+ fi
+ s3cmd --config=${s3config} --verbose --debug "$@"
+ local code=$?
+ if [ $code -ne 0 ] ;then
+ echo "ERROR: code = $code ; command = s3cmd --config=${s3config} --verbose --debug "$@""
+ exit $code
+ fi
+}
+
+mys3uploadkill() {
+ if [ $# -ne 5 ] ;then
+ echo "$0: error expecting 5 arguments"
+ exit 1
+ fi
+
+ local_file="$1"
+ remote_bkt="$2"
+ remote_obj="$3"
+ fifo="$4"
+ stop_part="$5"
+
+ mkfifo $fifo
+
+ s3cmd --config=${s3config} put $local_file \
+ s3://${remote_bkt}/${remote_obj} \
+ --progress \
+ --multipart-chunk-size-mb=5 >$fifo &
+ set +e # don't allow errors to stop script
+ while read line ;do
+ echo "$line" | grep --quiet "part $stop_part "
+ if [ ${PIPESTATUS[1]} -eq 0 ] ;then
+ kill -9 $(jobs -p)
+ break
+ fi
+ done <$fifo
+ set -e
+
+ rm -f $fifo
+}
+
+mys3upload() {
+ obj=$1
+ bucket=$2
+ dest_obj=$3
+
+ mys3cmd put -q $obj s3://${bucket}/$dest_obj
+}
+
+########################################################################
+# PREP
+
+create_users
+dd if=/dev/urandom of=$big_obj bs=1M count=${big_size}
+dd if=/dev/urandom of=$huge_obj bs=1M count=${huge_size}
+touch $empty_obj
+
+quick_tests() {
+ echo TRY A SWIFT COMMAND
+ myswift upload swift-plain-ctr $big_obj --object-name swift-obj-test
+ myswift list
+ myswift list swift-plain-ctr
+
+ echo TRY A RADOSGW-ADMIN COMMAND
+ radosgw-admin bucket list # make sure rgw is up and running
+}
+
+########################################################################
+# S3 TESTS
+
+####################################
+# regular multipart test
+
+mys3cmd mb s3://multipart-bkt
+mys3upload $huge_obj multipart-bkt multipart-obj
+mys3cmd ls
+mys3cmd ls s3://multipart-bkt
+
+####################################
+# multipart test with incomplete uploads
+
+bkt="incomplete-mp-bkt-1"
+
+mys3cmd mb s3://$bkt
+
+mys3uploadkill $huge_obj $bkt incomplete-mp-obj-c $fifo 20
+
+# generate an incomplete multipart with more than 1,000 parts
+mys3uploadkill $huge_obj $bkt incomplete-mp-obj-b $fifo 1005
+
+# generate more than 1000 incomplet multiparts
+for c in $(seq 1005) ;do
+ mys3uploadkill $huge_obj $bkt incomplete-mp-obj-c-$c $fifo 3
+done
+
+####################################
+# resharded bucket
+
+bkt=resharded-bkt-1
+
+mys3cmd mb s3://$bkt
+
+for f in $(seq 8) ; do
+ dest_obj="reshard-obj-${f}"
+ mys3cmd put -q $big_obj s3://${bkt}/$dest_obj
+done
+
+radosgw-admin bucket reshard --num-shards 3 --bucket=$bkt --yes-i-really-mean-it
+radosgw-admin bucket reshard --num-shards 5 --bucket=$bkt --yes-i-really-mean-it
+
+####################################
+# versioned bucket
+
+if true ;then
+ echo "WARNING: versioned bucket test currently turned off"
+else
+ bkt=versioned-bkt-1
+
+ mys3cmd mb s3://$bkt
+
+ # bucket-enable-versioning $bkt
+
+ for f in $(seq 3) ;do
+ for g in $(seq 10) ;do
+ dest_obj="versioned-obj-${g}"
+ mys3cmd put -q $big_obj s3://${bkt}/$dest_obj
+ done
+ done
+
+ for g in $(seq 1 2 10) ;do
+ dest_obj="versioned-obj-${g}"
+ mys3cmd rm s3://${bkt}/$dest_obj
+ done
+fi
+
+############################################################
+# copy small objects
+
+o_bkt="orig-bkt-1"
+d_bkt="copy-bkt-1"
+mys3cmd mb s3://$o_bkt
+
+for f in $(seq 4) ;do
+ dest_obj="orig-obj-$f"
+ mys3cmd put -q $big_obj s3://${o_bkt}/$dest_obj
+done
+
+mys3cmd mb s3://$d_bkt
+
+mys3cmd cp s3://${o_bkt}/orig-obj-1 s3://${d_bkt}/copied-obj-1
+mys3cmd cp s3://${o_bkt}/orig-obj-3 s3://${d_bkt}/copied-obj-3
+
+for f in $(seq 5 6) ;do
+ dest_obj="orig-obj-$f"
+ mys3cmd put -q $big_obj s3://${d_bkt}/$dest_obj
+done
+
+############################################################
+# copy small objects and delete original
+
+o_bkt="orig-bkt-2"
+d_bkt="copy-bkt-2"
+
+mys3cmd mb s3://$o_bkt
+
+for f in $(seq 4) ;do
+ dest_obj="orig-obj-$f"
+ mys3cmd put -q $big_obj s3://${o_bkt}/$dest_obj
+done
+
+mys3cmd mb s3://$d_bkt
+
+mys3cmd cp s3://${o_bkt}/orig-obj-1 s3://${d_bkt}/copied-obj-1
+mys3cmd cp s3://${o_bkt}/orig-obj-3 s3://${d_bkt}/copied-obj-3
+
+for f in $(seq 5 6) ;do
+ dest_obj="orig-obj-$f"
+ mys3cmd put -q $big_obj s3://${d_bkt}/$dest_obj
+done
+
+mys3cmd rb --recursive s3://${o_bkt}
+
+############################################################
+# copy multipart objects
+
+o_bkt="orig-mp-bkt-3"
+d_bkt="copy-mp-bkt-3"
+
+mys3cmd mb s3://$o_bkt
+
+for f in $(seq 2) ;do
+ dest_obj="orig-multipart-obj-$f"
+ mys3cmd put -q $huge_obj s3://${o_bkt}/$dest_obj
+done
+
+mys3cmd mb s3://$d_bkt
+
+mys3cmd cp s3://${o_bkt}/orig-multipart-obj-1 \
+ s3://${d_bkt}/copied-multipart-obj-1
+
+for f in $(seq 5 5) ;do
+ dest_obj="orig-multipart-obj-$f"
+ mys3cmd put -q $huge_obj s3://${d_bkt}/$dest_obj
+done
+
+
+############################################################
+# copy multipart objects and delete original
+
+o_bkt="orig-mp-bkt-4"
+d_bkt="copy-mp-bkt-4"
+
+mys3cmd mb s3://$o_bkt
+
+for f in $(seq 2) ;do
+ dest_obj="orig-multipart-obj-$f"
+ mys3cmd put -q $huge_obj s3://${o_bkt}/$dest_obj
+done
+
+mys3cmd mb s3://$d_bkt
+
+mys3cmd cp s3://${o_bkt}/orig-multipart-obj-1 \
+ s3://${d_bkt}/copied-multipart-obj-1
+
+for f in $(seq 5 5) ;do
+ dest_obj="orig-multipart-obj-$f"
+ mys3cmd put -q $huge_obj s3://${d_bkt}/$dest_obj
+done
+
+mys3cmd rb --recursive s3://$o_bkt
+
+########################################################################
+# SWIFT TESTS
+
+# 600MB
+segment_size=629145600
+
+############################################################
+# plain test
+
+for f in $(seq 4) ;do
+ myswift upload swift-plain-ctr $big_obj --object-name swift-obj-$f
+done
+
+############################################################
+# zero-len test
+
+myswift upload swift-zerolen-ctr $empty_obj --object-name subdir/
+myswift upload swift-zerolen-ctr $big_obj --object-name subdir/abc1
+myswift upload swift-zerolen-ctr $empty_obj --object-name subdir/empty1
+myswift upload swift-zerolen-ctr $big_obj --object-name subdir/xyz1
+
+############################################################
+# dlo test
+
+# upload in 300MB segments
+myswift upload swift-dlo-ctr $huge_obj --object-name dlo-obj-1 \
+ -S $segment_size
+
+############################################################
+# slo test
+
+# upload in 300MB segments
+myswift upload swift-slo-ctr $huge_obj --object-name slo-obj-1 \
+ -S $segment_size --use-slo
+
+############################################################
+# large object copy test
+
+# upload in 300MB segments
+o_ctr=swift-orig-ctr
+o_obj=slo-orig-obj-1
+d_ctr=swift-copy-ctr
+d_obj=slo-copy-obj-1
+myswift upload $o_ctr $big_obj --object-name $o_obj
+
+myswift copy --destination /${d_ctr}/${d_obj} \
+ $o_ctr $o_obj
+
+myswift delete $o_ctr $o_obj
+
+############################################################
+# huge dlo object copy test
+
+o_ctr=swift-orig-dlo-ctr-1
+o_obj=dlo-orig-dlo-obj-1
+d_ctr=swift-copy-dlo-ctr-1
+d_obj=dlo-copy-dlo-obj-1
+
+myswift upload $o_ctr $huge_obj --object-name $o_obj \
+ -S $segment_size
+
+myswift copy --destination /${d_ctr}/${d_obj} \
+ $o_ctr $o_obj
+
+############################################################
+# huge dlo object copy and orig delete
+
+o_ctr=swift-orig-dlo-ctr-2
+o_obj=dlo-orig-dlo-obj-2
+d_ctr=swift-copy-dlo-ctr-2
+d_obj=dlo-copy-dlo-obj-2
+
+myswift upload $o_ctr $huge_obj --object-name $o_obj \
+ -S $segment_size
+
+myswift copy --destination /${d_ctr}/${d_obj} \
+ $o_ctr $o_obj
+
+myswift delete $o_ctr $o_obj
+
+############################################################
+# huge slo object copy test
+
+o_ctr=swift-orig-slo-ctr-1
+o_obj=slo-orig-slo-obj-1
+d_ctr=swift-copy-slo-ctr-1
+d_obj=slo-copy-slo-obj-1
+myswift upload $o_ctr $huge_obj --object-name $o_obj \
+ -S $segment_size --use-slo
+
+myswift copy --destination /${d_ctr}/${d_obj} $o_ctr $o_obj
+
+############################################################
+# huge slo object copy test and orig delete
+
+o_ctr=swift-orig-slo-ctr-2
+o_obj=slo-orig-slo-obj-2
+d_ctr=swift-copy-slo-ctr-2
+d_obj=slo-copy-slo-obj-2
+myswift upload $o_ctr $huge_obj --object-name $o_obj \
+ -S $segment_size --use-slo
+
+myswift copy --destination /${d_ctr}/${d_obj} $o_ctr $o_obj
+
+myswift delete $o_ctr $o_obj
+
+########################################################################
+# FORCE GARBAGE COLLECTION
+
+sleep 6 # since for testing age at which gc can happen is 5 secs
+radosgw-admin gc process --include-all
+
+
+########################################
+# DO ORPHAN LIST
+
+pool="default.rgw.buckets.data"
+
+rgw-orphan-list $pool
+
+# we only expect there to be one output file, but loop just in case
+ol_error=""
+for f in orphan-list-*.out ; do
+ if [ -s "$f" ] ;then # if file non-empty
+ ol_error="${ol_error}:$f"
+ echo "One ore more orphans found in $f:"
+ cat "$f"
+ fi
+done
+
+if [ -n "$ol_error" ] ;then
+ echo "ERROR: orphans found when none expected"
+ exit 1
+fi
+
+########################################################################
+# CLEAN UP
+
+rm -f $empty_obj $big_obj $huge_obj $s3config
+
+success
diff --git a/qa/workunits/rgw/test_rgw_reshard.py b/qa/workunits/rgw/test_rgw_reshard.py
new file mode 100755
index 000000000..6326e7b17
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_reshard.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python3
+
+import errno
+import time
+import logging as log
+import json
+import os
+from common import exec_cmd, boto_connect, create_user, put_objects, create_unlinked_objects
+
+"""
+Rgw manual and dynamic resharding testing against a running instance
+"""
+# The test cases in this file have been annotated for inventory.
+# To extract the inventory (in csv format) use the command:
+#
+# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
+#
+#
+
+""" Constants """
+USER = 'tester'
+DISPLAY_NAME = 'Testing'
+ACCESS_KEY = 'NX5QOQKC6BH2IDN8HC7A'
+SECRET_KEY = 'LnEsqNNqZIpkzauboDcLXLcYaWwLQ3Kop0zAnKIn'
+BUCKET_NAME = 'a-bucket'
+VER_BUCKET_NAME = 'myver'
+INDEX_POOL = 'default.rgw.buckets.index'
+
+class BucketStats:
+ def __init__(self, bucket_name, bucket_id, num_objs=0, size_kb=0, num_shards=0):
+ self.bucket_name = bucket_name
+ self.bucket_id = bucket_id
+ self.num_objs = num_objs
+ self.size_kb = size_kb
+ self.num_shards = num_shards if num_shards > 0 else 1
+
+ def get_num_shards(self):
+ self.num_shards = get_bucket_num_shards(self.bucket_name, self.bucket_id)
+
+
+def get_bucket_stats(bucket_name):
+ """
+ function to get bucket stats
+ """
+ cmd = exec_cmd("radosgw-admin bucket stats --bucket {}".format(bucket_name))
+ json_op = json.loads(cmd)
+ #print(json.dumps(json_op, indent = 4, sort_keys=True))
+ bucket_id = json_op['id']
+ num_shards = json_op['num_shards']
+ if len(json_op['usage']) > 0:
+ num_objects = json_op['usage']['rgw.main']['num_objects']
+ size_kb = json_op['usage']['rgw.main']['size_kb']
+ else:
+ num_objects = 0
+ size_kb = 0
+ log.debug(" \nBUCKET_STATS: \nbucket: {} id: {} num_objects: {} size_kb: {} num_shards: {}\n".format(bucket_name, bucket_id,
+ num_objects, size_kb, num_shards))
+ return BucketStats(bucket_name, bucket_id, num_objects, size_kb, num_shards)
+
+def get_bucket_layout(bucket_name):
+ res = exec_cmd("radosgw-admin bucket layout --bucket {}".format(bucket_name))
+ return json.loads(res)
+
+def get_bucket_shard0(bucket_name):
+ bucket_id = get_bucket_stats(bucket_name).bucket_id
+ index_gen = get_bucket_layout(bucket_name)['layout']['current_index']['gen']
+ return '.dir.%s.%d.0' % (bucket_id, index_gen)
+
+def get_bucket_num_shards(bucket_name, bucket_id):
+ """
+ function to get bucket num shards
+ """
+ metadata = 'bucket.instance:' + bucket_name + ':' + bucket_id
+ cmd = exec_cmd('radosgw-admin metadata get {}'.format(metadata))
+ json_op = json.loads(cmd)
+ num_shards = json_op['data']['bucket_info']['num_shards']
+ return num_shards
+
+def run_bucket_reshard_cmd(bucket_name, num_shards, **kwargs):
+ cmd = 'radosgw-admin bucket reshard --bucket {} --num-shards {}'.format(bucket_name, num_shards)
+ cmd += ' --rgw-reshard-bucket-lock-duration 30' # reduce to minimum
+ if 'error_at' in kwargs:
+ cmd += ' --inject-error-at {}'.format(kwargs.pop('error_at'))
+ elif 'abort_at' in kwargs:
+ cmd += ' --inject-abort-at {}'.format(kwargs.pop('abort_at'))
+ if 'error_code' in kwargs:
+ cmd += ' --inject-error-code {}'.format(kwargs.pop('error_code'))
+ return exec_cmd(cmd, **kwargs)
+
+def test_bucket_reshard(conn, name, **fault):
+ # create a bucket with non-default ACLs to verify that reshard preserves them
+ bucket = conn.create_bucket(Bucket=name, ACL='authenticated-read')
+ grants = bucket.Acl().grants
+
+ objs = []
+ try:
+ # create objs
+ for i in range(0, 20):
+ objs += [bucket.put_object(Key='key' + str(i), Body=b"some_data")]
+
+ old_shard_count = get_bucket_stats(name).num_shards
+ num_shards_expected = old_shard_count + 1
+
+ # try reshard with fault injection
+ _, ret = run_bucket_reshard_cmd(name, num_shards_expected, check_retcode=False, **fault)
+
+ if fault.get('error_code') == errno.ECANCELED:
+ assert(ret == 0) # expect ECANCELED to retry and succeed
+ else:
+ assert(ret != 0 and ret != errno.EBUSY)
+
+ # check shard count
+ cur_shard_count = get_bucket_stats(name).num_shards
+ assert(cur_shard_count == old_shard_count)
+
+ # verify that the bucket is writeable by deleting an object
+ objs.pop().delete()
+
+ assert grants == bucket.Acl().grants # recheck grants after cancel
+
+ # retry reshard without fault injection. if radosgw-admin aborted,
+ # we'll have to retry until the reshard lock expires
+ while True:
+ _, ret = run_bucket_reshard_cmd(name, num_shards_expected, check_retcode=False)
+ if ret == errno.EBUSY:
+ log.info('waiting 30 seconds for reshard lock to expire...')
+ time.sleep(30)
+ continue
+ assert(ret == 0)
+ break
+
+ # recheck shard count
+ final_shard_count = get_bucket_stats(name).num_shards
+ assert(final_shard_count == num_shards_expected)
+
+ assert grants == bucket.Acl().grants # recheck grants after commit
+ finally:
+ # cleanup on resharded bucket must succeed
+ bucket.delete_objects(Delete={'Objects':[{'Key':o.key} for o in objs]})
+ bucket.delete()
+
+
+def main():
+ """
+ execute manual and dynamic resharding commands
+ """
+ create_user(USER, DISPLAY_NAME, ACCESS_KEY, SECRET_KEY)
+
+ connection = boto_connect(ACCESS_KEY, SECRET_KEY)
+
+ # create a bucket
+ bucket = connection.create_bucket(Bucket=BUCKET_NAME)
+ ver_bucket = connection.create_bucket(Bucket=VER_BUCKET_NAME)
+ connection.BucketVersioning(VER_BUCKET_NAME).enable()
+
+ bucket_acl = connection.BucketAcl(BUCKET_NAME).load()
+ ver_bucket_acl = connection.BucketAcl(VER_BUCKET_NAME).load()
+
+ # TESTCASE 'reshard-add','reshard','add','add bucket to resharding queue','succeeds'
+ log.debug('TEST: reshard add\n')
+
+ num_shards_expected = get_bucket_stats(BUCKET_NAME).num_shards + 1
+ cmd = exec_cmd('radosgw-admin reshard add --bucket {} --num-shards {}'.format(BUCKET_NAME, num_shards_expected))
+ cmd = exec_cmd('radosgw-admin reshard list')
+ json_op = json.loads(cmd)
+ log.debug('bucket name {}'.format(json_op[0]['bucket_name']))
+ assert json_op[0]['bucket_name'] == BUCKET_NAME
+ assert json_op[0]['tentative_new_num_shards'] == num_shards_expected
+
+ # TESTCASE 'reshard-process','reshard','','process bucket resharding','succeeds'
+ log.debug('TEST: reshard process\n')
+ cmd = exec_cmd('radosgw-admin reshard process')
+ time.sleep(5)
+ # check bucket shards num
+ bucket_stats1 = get_bucket_stats(BUCKET_NAME)
+ if bucket_stats1.num_shards != num_shards_expected:
+ log.error("Resharding failed on bucket {}. Expected number of shards are not created\n".format(BUCKET_NAME))
+
+ # TESTCASE 'reshard-add','reshard','add','add non empty bucket to resharding queue','succeeds'
+ log.debug('TEST: reshard add non empty bucket\n')
+ # create objs
+ num_objs = 8
+ for i in range(0, num_objs):
+ connection.Object(BUCKET_NAME, ('key'+str(i))).put(Body=b"some_data")
+
+ num_shards_expected = get_bucket_stats(BUCKET_NAME).num_shards + 1
+ cmd = exec_cmd('radosgw-admin reshard add --bucket {} --num-shards {}'.format(BUCKET_NAME, num_shards_expected))
+ cmd = exec_cmd('radosgw-admin reshard list')
+ json_op = json.loads(cmd)
+ assert json_op[0]['bucket_name'] == BUCKET_NAME
+ assert json_op[0]['tentative_new_num_shards'] == num_shards_expected
+
+ # TESTCASE 'reshard process ,'reshard','process','reshard non empty bucket','succeeds'
+ log.debug('TEST: reshard process non empty bucket\n')
+ cmd = exec_cmd('radosgw-admin reshard process')
+ # check bucket shards num
+ bucket_stats1 = get_bucket_stats(BUCKET_NAME)
+ if bucket_stats1.num_shards != num_shards_expected:
+ log.error("Resharding failed on bucket {}. Expected number of shards are not created\n".format(BUCKET_NAME))
+
+ # TESTCASE 'manual bucket resharding','inject error','fail','check bucket accessibility', 'retry reshard'
+ log.debug('TEST: reshard bucket with EIO injected at set_target_layout\n')
+ test_bucket_reshard(connection, 'error-at-set-target-layout', error_at='set_target_layout')
+ log.debug('TEST: reshard bucket with ECANCELED injected at set_target_layout\n')
+ test_bucket_reshard(connection, 'error-at-set-target-layout', error_at='set_target_layout', error_code=errno.ECANCELED)
+ log.debug('TEST: reshard bucket with abort at set_target_layout\n')
+ test_bucket_reshard(connection, 'abort-at-set-target-layout', abort_at='set_target_layout')
+
+ log.debug('TEST: reshard bucket with EIO injected at block_writes\n')
+ test_bucket_reshard(connection, 'error-at-block-writes', error_at='block_writes')
+ log.debug('TEST: reshard bucket with abort at block_writes\n')
+ test_bucket_reshard(connection, 'abort-at-block-writes', abort_at='block_writes')
+
+ log.debug('TEST: reshard bucket with EIO injected at commit_target_layout\n')
+ test_bucket_reshard(connection, 'error-at-commit-target-layout', error_at='commit_target_layout')
+ log.debug('TEST: reshard bucket with ECANCELED injected at commit_target_layout\n')
+ test_bucket_reshard(connection, 'error-at-commit-target-layout', error_at='commit_target_layout', error_code=errno.ECANCELED)
+ log.debug('TEST: reshard bucket with abort at commit_target_layout\n')
+ test_bucket_reshard(connection, 'abort-at-commit-target-layout', abort_at='commit_target_layout')
+
+ log.debug('TEST: reshard bucket with EIO injected at do_reshard\n')
+ test_bucket_reshard(connection, 'error-at-do-reshard', error_at='do_reshard')
+ log.debug('TEST: reshard bucket with abort at do_reshard\n')
+ test_bucket_reshard(connection, 'abort-at-do-reshard', abort_at='do_reshard')
+
+ # TESTCASE 'versioning reshard-','bucket', reshard','versioning reshard','succeeds'
+ log.debug(' test: reshard versioned bucket')
+ num_shards_expected = get_bucket_stats(VER_BUCKET_NAME).num_shards + 1
+ cmd = exec_cmd('radosgw-admin bucket reshard --bucket {} --num-shards {}'.format(VER_BUCKET_NAME,
+ num_shards_expected))
+ # check bucket shards num
+ ver_bucket_stats = get_bucket_stats(VER_BUCKET_NAME)
+ assert ver_bucket_stats.num_shards == num_shards_expected
+
+ # TESTCASE 'check acl'
+ new_bucket_acl = connection.BucketAcl(BUCKET_NAME).load()
+ assert new_bucket_acl == bucket_acl
+ new_ver_bucket_acl = connection.BucketAcl(VER_BUCKET_NAME).load()
+ assert new_ver_bucket_acl == ver_bucket_acl
+
+ # TESTCASE 'check reshard removes olh entries with empty name'
+ log.debug(' test: reshard removes olh entries with empty name')
+ bucket.objects.all().delete()
+
+
+ # get name of shard 0 object, add a bogus olh entry with empty name
+ bucket_shard0 = get_bucket_shard0(BUCKET_NAME)
+ if 'CEPH_ROOT' in os.environ:
+ k = '%s/qa/workunits/rgw/olh_noname_key' % os.environ['CEPH_ROOT']
+ v = '%s/qa/workunits/rgw/olh_noname_val' % os.environ['CEPH_ROOT']
+ else:
+ k = 'olh_noname_key'
+ v = 'olh_noname_val'
+ exec_cmd('rados -p %s setomapval %s --omap-key-file %s < %s' % (INDEX_POOL, bucket_shard0, k, v))
+
+ # check that bi list has one entry with empty name
+ cmd = exec_cmd('radosgw-admin bi list --bucket %s' % BUCKET_NAME)
+ json_op = json.loads(cmd.decode('utf-8', 'ignore')) # ignore utf-8 can't decode 0x80
+ assert len(json_op) == 1
+ assert json_op[0]['entry']['key']['name'] == ''
+
+ # reshard to prune the bogus olh
+ cmd = exec_cmd('radosgw-admin bucket reshard --bucket %s --num-shards %s --yes-i-really-mean-it' % (BUCKET_NAME, 1))
+
+ # get that bi list has zero entries
+ cmd = exec_cmd('radosgw-admin bi list --bucket %s' % BUCKET_NAME)
+ json_op = json.loads(cmd.decode('utf-8', 'ignore')) # ignore utf-8 can't decode 0x80
+ assert len(json_op) == 0
+
+ # TESTCASE 'check that PUT succeeds during reshard'
+ log.debug(' test: PUT succeeds during reshard')
+ num_shards = get_bucket_stats(VER_BUCKET_NAME).num_shards
+ exec_cmd('''radosgw-admin --inject-delay-at=do_reshard --inject-delay-ms=5000 \
+ bucket reshard --bucket {} --num-shards {}'''
+ .format(VER_BUCKET_NAME, num_shards + 1), wait = False)
+ time.sleep(1)
+ ver_bucket.put_object(Key='put_during_reshard', Body=b"some_data")
+ log.debug('put object successful')
+
+ # TESTCASE 'check that bucket stats are correct after reshard with unlinked entries'
+ log.debug('TEST: check that bucket stats are correct after reshard with unlinked entries\n')
+ ver_bucket.object_versions.all().delete()
+ ok_keys = ['a', 'b', 'c']
+ unlinked_keys = ['x', 'y', 'z']
+ put_objects(ver_bucket, ok_keys)
+ create_unlinked_objects(connection, ver_bucket, unlinked_keys)
+ cmd = exec_cmd(f'radosgw-admin bucket reshard --bucket {VER_BUCKET_NAME} --num-shards 17 --yes-i-really-mean-it')
+ out = exec_cmd(f'radosgw-admin bucket check unlinked --bucket {VER_BUCKET_NAME} --fix --min-age-hours 0 --rgw-olh-pending-timeout-sec 0 --dump-keys')
+ json_out = json.loads(out)
+ assert len(json_out) == len(unlinked_keys)
+ ver_bucket.object_versions.all().delete()
+ out = exec_cmd(f'radosgw-admin bucket stats --bucket {VER_BUCKET_NAME}')
+ json_out = json.loads(out)
+ log.debug(json_out['usage'])
+ assert json_out['usage']['rgw.main']['size'] == 0
+ assert json_out['usage']['rgw.main']['num_objects'] == 0
+ assert json_out['usage']['rgw.main']['size_actual'] == 0
+ assert json_out['usage']['rgw.main']['size_kb'] == 0
+ assert json_out['usage']['rgw.main']['size_kb_actual'] == 0
+ assert json_out['usage']['rgw.main']['size_kb_utilized'] == 0
+
+ # Clean up
+ log.debug("Deleting bucket {}".format(BUCKET_NAME))
+ bucket.objects.all().delete()
+ bucket.delete()
+ log.debug("Deleting bucket {}".format(VER_BUCKET_NAME))
+ ver_bucket.object_versions.all().delete()
+ ver_bucket.delete()
+
+main()
+log.info("Completed resharding tests")
diff --git a/qa/workunits/rgw/test_rgw_s3_mp_reupload.py b/qa/workunits/rgw/test_rgw_s3_mp_reupload.py
new file mode 100755
index 000000000..b3cb2d5ab
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_s3_mp_reupload.py
@@ -0,0 +1,121 @@
+import boto3
+import botocore.exceptions
+import sys
+import os
+import subprocess
+
+#boto3.set_stream_logger(name='botocore')
+
+# handles two optional system arguments:
+# <bucket-name> : default is "bkt134"
+# <0 or 1> : 0 -> upload aborted, 1 -> completed; default is completed
+
+if len(sys.argv) >= 2:
+ bucket_name = sys.argv[1]
+else:
+ bucket_name = "bkt314738362229"
+print("bucket nams is %s" % bucket_name)
+
+complete_mpu = True
+if len(sys.argv) >= 3:
+ complete_mpu = int(sys.argv[2]) > 0
+
+versioned_bucket = False
+if len(sys.argv) >= 4:
+ versioned_bucket = int(sys.argv[3]) > 0
+
+rgw_host = os.environ['RGW_HOST']
+access_key = os.environ['RGW_ACCESS_KEY']
+secret_key = os.environ['RGW_SECRET_KEY']
+
+try:
+ endpoint='http://%s:%d' % (rgw_host, 80)
+ client = boto3.client('s3',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+ res = client.create_bucket(Bucket=bucket_name)
+except botocore.exceptions.EndpointConnectionError:
+ try:
+ endpoint='https://%s:%d' % (rgw_host, 443)
+ client = boto3.client('s3',
+ endpoint_url=endpoint,
+ verify=False,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+ res = client.create_bucket(Bucket=bucket_name)
+ except botocore.exceptions.EndpointConnectionError:
+ endpoint='http://%s:%d' % (rgw_host, 8000)
+ client = boto3.client('s3',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+ res = client.create_bucket(Bucket=bucket_name)
+
+print("endpoint is %s" % endpoint)
+
+if versioned_bucket:
+ res = client.put_bucket_versioning(
+ Bucket=bucket_name,
+ VersioningConfiguration={
+ 'MFADelete': 'Disabled',
+ 'Status': 'Enabled'}
+ )
+
+key = "mpu_test4"
+nparts = 2
+ndups = 11
+do_reupload = True
+
+part_path = "/tmp/mp_part_5m"
+subprocess.run(["dd", "if=/dev/urandom", "of=" + part_path, "bs=1M", "count=5"], check=True)
+
+f = open(part_path, 'rb')
+
+res = client.create_multipart_upload(Bucket=bucket_name, Key=key)
+mpu_id = res["UploadId"]
+
+print("start UploadId=%s" % (mpu_id))
+
+parts = []
+parts2 = []
+
+for ix in range(0,nparts):
+ part_num = ix + 1
+ f.seek(0)
+ res = client.upload_part(Body=f, Bucket=bucket_name, Key=key,
+ UploadId=mpu_id, PartNumber=part_num)
+ # save
+ etag = res['ETag']
+ part = {'ETag': etag, 'PartNumber': part_num}
+ print("phase 1 uploaded part %s" % part)
+ parts.append(part)
+
+if do_reupload:
+ # just re-upload part 1
+ part_num = 1
+ for ix in range(0,ndups):
+ f.seek(0)
+ res = client.upload_part(Body=f, Bucket=bucket_name, Key=key,
+ UploadId=mpu_id, PartNumber=part_num)
+ etag = res['ETag']
+ part = {'ETag': etag, 'PartNumber': part_num}
+ print ("phase 2 uploaded part %s" % part)
+
+ # save
+ etag = res['ETag']
+ part = {'ETag': etag, 'PartNumber': part_num}
+ parts2.append(part)
+
+if complete_mpu:
+ print("completing multipart upload, parts=%s" % parts)
+ res = client.complete_multipart_upload(
+ Bucket=bucket_name, Key=key, UploadId=mpu_id,
+ MultipartUpload={'Parts': parts})
+else:
+ print("aborting multipart upload, parts=%s" % parts)
+ res = client.abort_multipart_upload(
+ Bucket=bucket_name, Key=key, UploadId=mpu_id)
+
+# clean up
+subprocess.run(["rm", "-f", part_path], check=True)
diff --git a/qa/workunits/rgw/test_rgw_s3_mp_reupload.sh b/qa/workunits/rgw/test_rgw_s3_mp_reupload.sh
new file mode 100755
index 000000000..5d73fd048
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_s3_mp_reupload.sh
@@ -0,0 +1,110 @@
+#!/usr/bin/env bash
+
+# INITIALIZATION
+
+mydir=$(dirname $0)
+data_pool=default.rgw.buckets.data
+orphan_list_out=/tmp/orphan_list.$$
+radoslist_out=/tmp/radoslist.$$
+rados_ls_out=/tmp/rados_ls.$$
+diff_out=/tmp/diff.$$
+
+rgw_host="$(hostname --fqdn)"
+echo "INFO: fully qualified domain name: $rgw_host"
+
+export RGW_ACCESS_KEY="0555b35654ad1656d804"
+export RGW_SECRET_KEY="h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
+export RGW_HOST="${RGW_HOST:-$rgw_host}"
+
+# random argument determines if multipart is aborted or completed 50/50
+outcome=$((RANDOM % 2))
+if [ $outcome -eq 0 ] ;then
+ echo "== TESTING *ABORTING* MULTIPART UPLOAD WITH RE-UPLOADS =="
+else
+ echo "== TESTING *COMPLETING* MULTIPART UPLOAD WITH RE-UPLOADS =="
+fi
+
+# random argument determines if multipart is aborted or completed 50/50
+versioning=$((RANDOM % 2))
+if [ $versioning -eq 0 ] ;then
+ echo "== TESTING NON-VERSIONED BUCKET =="
+else
+ echo "== TESTING VERSIONED BUCKET =="
+fi
+
+# create a randomized bucket name
+bucket="reupload-bkt-$((RANDOM % 899999 + 100000))"
+
+
+# SET UP PYTHON VIRTUAL ENVIRONMENT
+
+# install boto3
+python3 -m venv $mydir
+source $mydir/bin/activate
+pip install pip --upgrade
+pip install boto3
+
+
+# CREATE RGW USER IF NECESSARY
+
+if radosgw-admin user info --access-key $RGW_ACCESS_KEY 2>/dev/null ;then
+ echo INFO: user already exists
+else
+ echo INFO: creating user
+ radosgw-admin user create --uid testid \
+ --access-key $RGW_ACCESS_KEY \
+ --secret $RGW_SECRET_KEY \
+ --display-name 'M. Tester' \
+ --email tester@ceph.com 2>/dev/null
+fi
+
+
+# RUN REUPLOAD TEST
+
+$mydir/bin/python3 ${mydir}/test_rgw_s3_mp_reupload.py $bucket $outcome $versioning
+
+
+# ANALYZE FOR ERRORS
+# (NOTE: for now we're choosing not to use the rgw-orphan-list tool)
+
+# force garbage collection to remove extra parts
+radosgw-admin gc process --include-all 2>/dev/null
+
+marker=$(radosgw-admin metadata get bucket:$bucket 2>/dev/null | grep bucket_id | sed 's/.*: "\(.*\)".*/\1/')
+
+# determine expected rados objects
+radosgw-admin bucket radoslist --bucket=$bucket 2>/dev/null | sort >$radoslist_out
+echo "radosgw-admin bucket radoslist:"
+cat $radoslist_out
+
+# determine found rados objects
+rados ls -p $data_pool 2>/dev/null | grep "^$marker" | sort >$rados_ls_out
+echo "rados ls:"
+cat $rados_ls_out
+
+# compare expected and found
+diff $radoslist_out $rados_ls_out >$diff_out
+if [ $(cat $diff_out | wc -l) -ne 0 ] ;then
+ error=1
+ echo "ERROR: Found differences between expected and actual rados objects for test bucket."
+ echo " note: indicators: '>' found but not expected; '<' expected but not found."
+ cat $diff_out
+fi
+
+
+# CLEAN UP
+
+deactivate
+
+rm -f $orphan_list_out $radoslist_out $rados_ls_out $diff_out
+
+
+# PRODUCE FINAL RESULTS
+
+if [ -n "$error" ] ;then
+ echo "== FAILED =="
+ exit 1
+fi
+
+echo "== PASSED =="
+exit 0
diff --git a/qa/workunits/rgw/test_rgw_throttle.sh b/qa/workunits/rgw/test_rgw_throttle.sh
new file mode 100755
index 000000000..f637b8f08
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_throttle.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -e
+
+ceph_test_rgw_throttle
+
+exit 0
diff --git a/qa/workunits/rgw/test_rgw_versioning.py b/qa/workunits/rgw/test_rgw_versioning.py
new file mode 100755
index 000000000..fc69e138d
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_versioning.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python3
+
+import logging as log
+import json
+import uuid
+import botocore
+import time
+from common import exec_cmd, create_user, boto_connect
+from botocore.config import Config
+
+"""
+Tests behavior of bucket versioning.
+"""
+# The test cases in this file have been annotated for inventory.
+# To extract the inventory (in csv format) use the command:
+#
+# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
+#
+#
+
+""" Constants """
+USER = 'versioning-tester'
+DISPLAY_NAME = 'Versioning Testing'
+ACCESS_KEY = 'LTA662PVVDTDWX6M2AB0'
+SECRET_KEY = 'pvtchqajgzqx5581t6qbddbkj0bgf3a69qdkjcea'
+BUCKET_NAME = 'versioning-bucket'
+DATA_POOL = 'default.rgw.buckets.data'
+
+def main():
+ """
+ execute versioning tests
+ """
+ create_user(USER, DISPLAY_NAME, ACCESS_KEY, SECRET_KEY)
+
+ connection = boto_connect(ACCESS_KEY, SECRET_KEY, Config(retries = {
+ 'total_max_attempts': 1,
+ }))
+
+ # pre-test cleanup
+ try:
+ bucket = connection.Bucket(BUCKET_NAME)
+ bucket.objects.all().delete()
+ bucket.object_versions.all().delete()
+ bucket.delete()
+ except botocore.exceptions.ClientError as e:
+ if not e.response['Error']['Code'] == 'NoSuchBucket':
+ raise
+
+ bucket = connection.create_bucket(Bucket=BUCKET_NAME)
+ connection.BucketVersioning(BUCKET_NAME).enable()
+
+ # reproducer for bug from https://tracker.ceph.com/issues/59663
+ # TESTCASE 'verify that index entries and OLH objects are cleaned up after redundant deletes'
+ log.debug('TEST: verify that index entries and OLH objects are cleaned up after redundant deletes\n')
+ key = str(uuid.uuid4())
+ resp = bucket.Object(key).delete()
+ assert 'DeleteMarker' in resp, 'DeleteMarker key not present in response'
+ assert resp['DeleteMarker'], 'DeleteMarker value not True in response'
+ assert 'VersionId' in resp, 'VersionId key not present in response'
+ version_id = resp['VersionId']
+ bucket.Object(key).delete()
+ connection.ObjectVersion(bucket.name, key, version_id).delete()
+ # bucket index should now be empty
+ out = exec_cmd(f'radosgw-admin bi list --bucket {BUCKET_NAME}')
+ json_out = json.loads(out.replace(b'\x80', b'0x80'))
+ assert len(json_out) == 0, 'bucket index was not empty after all objects were deleted'
+
+ (_out, ret) = exec_cmd(f'rados -p {DATA_POOL} ls | grep {key}', check_retcode=False)
+ assert ret != 0, 'olh object was not cleaned up'
+
+ # TESTCASE 'verify that index entries and OLH objects are cleaned up after index linking error'
+ log.debug('TEST: verify that index entries and OLH objects are cleaned up after index linking error\n')
+ key = str(uuid.uuid4())
+ try:
+ exec_cmd('ceph config set client rgw_debug_inject_set_olh_err 2')
+ time.sleep(1)
+ bucket.Object(key).delete()
+ finally:
+ exec_cmd('ceph config rm client rgw_debug_inject_set_olh_err')
+ out = exec_cmd(f'radosgw-admin bi list --bucket {BUCKET_NAME}')
+ json_out = json.loads(out.replace(b'\x80', b'0x80'))
+ assert len(json_out) == 0, 'bucket index was not empty after op failed'
+ (_out, ret) = exec_cmd(f'rados -p {DATA_POOL} ls | grep {key}', check_retcode=False)
+ assert ret != 0, 'olh object was not cleaned up'
+
+ # TESTCASE 'verify that original null object version is intact after failed olh upgrade'
+ log.debug('TEST: verify that original null object version is intact after failed olh upgrade\n')
+ connection.BucketVersioning(BUCKET_NAME).suspend()
+ key = str(uuid.uuid4())
+ put_resp = bucket.put_object(Key=key, Body=b"data")
+ connection.BucketVersioning(BUCKET_NAME).enable()
+ try:
+ exec_cmd('ceph config set client rgw_debug_inject_set_olh_err 2')
+ time.sleep(1)
+ # expected to fail due to the above error injection
+ bucket.put_object(Key=key, Body=b"new data")
+ except Exception as e:
+ log.debug(e)
+ finally:
+ exec_cmd('ceph config rm client rgw_debug_inject_set_olh_err')
+ get_resp = bucket.Object(key).get()
+ assert put_resp.e_tag == get_resp['ETag'], 'get did not return null version with correct etag'
+
+ # Clean up
+ log.debug("Deleting bucket {}".format(BUCKET_NAME))
+ bucket.object_versions.all().delete()
+ bucket.delete()
+
+main()
+log.info("Completed bucket versioning tests")