summaryrefslogtreecommitdiffstats
path: root/src/test/rgw/rgw_multi
diff options
context:
space:
mode:
Diffstat (limited to 'src/test/rgw/rgw_multi')
-rw-r--r--src/test/rgw/rgw_multi/__init__.py0
-rw-r--r--src/test/rgw/rgw_multi/conn.py30
-rw-r--r--src/test/rgw/rgw_multi/multisite.py395
-rw-r--r--src/test/rgw/rgw_multi/tests.py1316
-rw-r--r--src/test/rgw/rgw_multi/tests_az.py597
-rw-r--r--src/test/rgw/rgw_multi/tests_es.py276
-rw-r--r--src/test/rgw/rgw_multi/tests_ps.py4958
-rw-r--r--src/test/rgw/rgw_multi/tools.py97
-rw-r--r--src/test/rgw/rgw_multi/zone_az.py40
-rw-r--r--src/test/rgw/rgw_multi/zone_cloud.py320
-rw-r--r--src/test/rgw/rgw_multi/zone_es.py250
-rw-r--r--src/test/rgw/rgw_multi/zone_ps.py428
-rw-r--r--src/test/rgw/rgw_multi/zone_rados.py109
13 files changed, 8816 insertions, 0 deletions
diff --git a/src/test/rgw/rgw_multi/__init__.py b/src/test/rgw/rgw_multi/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/test/rgw/rgw_multi/__init__.py
diff --git a/src/test/rgw/rgw_multi/conn.py b/src/test/rgw/rgw_multi/conn.py
new file mode 100644
index 000000000..b03db3673
--- /dev/null
+++ b/src/test/rgw/rgw_multi/conn.py
@@ -0,0 +1,30 @@
+import boto
+import boto.s3.connection
+
+
+def get_gateway_connection(gateway, credentials):
+ """ connect to the given gateway """
+ if gateway.connection is None:
+ gateway.connection = boto.connect_s3(
+ aws_access_key_id = credentials.access_key,
+ aws_secret_access_key = credentials.secret,
+ host = gateway.host,
+ port = gateway.port,
+ is_secure = False,
+ calling_format = boto.s3.connection.OrdinaryCallingFormat())
+ return gateway.connection
+
+def get_gateway_secure_connection(gateway, credentials):
+ """ secure connect to the given gateway """
+ if gateway.ssl_port == 0:
+ return None
+ if gateway.secure_connection is None:
+ gateway.secure_connection = boto.connect_s3(
+ aws_access_key_id = credentials.access_key,
+ aws_secret_access_key = credentials.secret,
+ host = gateway.host,
+ port = gateway.ssl_port,
+ is_secure = True,
+ validate_certs=False,
+ calling_format = boto.s3.connection.OrdinaryCallingFormat())
+ return gateway.secure_connection
diff --git a/src/test/rgw/rgw_multi/multisite.py b/src/test/rgw/rgw_multi/multisite.py
new file mode 100644
index 000000000..dfcde085e
--- /dev/null
+++ b/src/test/rgw/rgw_multi/multisite.py
@@ -0,0 +1,395 @@
+from abc import ABCMeta, abstractmethod
+from io import StringIO
+
+import json
+
+from .conn import get_gateway_connection, get_gateway_secure_connection
+
+class Cluster:
+ """ interface to run commands against a distinct ceph cluster """
+ __metaclass__ = ABCMeta
+
+ @abstractmethod
+ def admin(self, args = None, **kwargs):
+ """ execute a radosgw-admin command """
+ pass
+
+class Gateway:
+ """ interface to control a single radosgw instance """
+ __metaclass__ = ABCMeta
+
+ def __init__(self, host = None, port = None, cluster = None, zone = None, ssl_port = 0):
+ self.host = host
+ self.port = port
+ self.cluster = cluster
+ self.zone = zone
+ self.connection = None
+ self.secure_connection = None
+ self.ssl_port = ssl_port
+
+ @abstractmethod
+ def start(self, args = []):
+ """ start the gateway with the given args """
+ pass
+
+ @abstractmethod
+ def stop(self):
+ """ stop the gateway """
+ pass
+
+ def endpoint(self):
+ return 'http://%s:%d' % (self.host, self.port)
+
+class SystemObject:
+ """ interface for system objects, represented in json format and
+ manipulated with radosgw-admin commands """
+ __metaclass__ = ABCMeta
+
+ def __init__(self, data = None, uuid = None):
+ self.data = data
+ self.id = uuid
+ if data:
+ self.load_from_json(data)
+
+ @abstractmethod
+ def build_command(self, command):
+ """ return the command line for the given command, including arguments
+ to specify this object """
+ pass
+
+ @abstractmethod
+ def load_from_json(self, data):
+ """ update internal state based on json data """
+ pass
+
+ def command(self, cluster, cmd, args = None, **kwargs):
+ """ run the given command and return the output and retcode """
+ args = self.build_command(cmd) + (args or [])
+ return cluster.admin(args, **kwargs)
+
+ def json_command(self, cluster, cmd, args = None, **kwargs):
+ """ run the given command, parse the output and return the resulting
+ data and retcode """
+ s, r = self.command(cluster, cmd, args or [], **kwargs)
+ if r == 0:
+ data = json.loads(s)
+ self.load_from_json(data)
+ self.data = data
+ return self.data, r
+
+ # mixins for supported commands
+ class Create(object):
+ def create(self, cluster, args = None, **kwargs):
+ """ create the object with the given arguments """
+ return self.json_command(cluster, 'create', args, **kwargs)
+
+ class Delete(object):
+ def delete(self, cluster, args = None, **kwargs):
+ """ delete the object """
+ # not json_command() because delete has no output
+ _, r = self.command(cluster, 'delete', args, **kwargs)
+ if r == 0:
+ self.data = None
+ return r
+
+ class Get(object):
+ def get(self, cluster, args = None, **kwargs):
+ """ read the object from storage """
+ kwargs['read_only'] = True
+ return self.json_command(cluster, 'get', args, **kwargs)
+
+ class Set(object):
+ def set(self, cluster, data, args = None, **kwargs):
+ """ set the object by json """
+ kwargs['stdin'] = StringIO(json.dumps(data))
+ return self.json_command(cluster, 'set', args, **kwargs)
+
+ class Modify(object):
+ def modify(self, cluster, args = None, **kwargs):
+ """ modify the object with the given arguments """
+ return self.json_command(cluster, 'modify', args, **kwargs)
+
+ class CreateDelete(Create, Delete): pass
+ class GetSet(Get, Set): pass
+
+class Zone(SystemObject, SystemObject.CreateDelete, SystemObject.GetSet, SystemObject.Modify):
+ def __init__(self, name, zonegroup = None, cluster = None, data = None, zone_id = None, gateways = None):
+ self.name = name
+ self.zonegroup = zonegroup
+ self.cluster = cluster
+ self.gateways = gateways or []
+ super(Zone, self).__init__(data, zone_id)
+
+ def zone_arg(self):
+ """ command-line argument to specify this zone """
+ return ['--rgw-zone', self.name]
+
+ def zone_args(self):
+ """ command-line arguments to specify this zone/zonegroup/realm """
+ args = self.zone_arg()
+ if self.zonegroup:
+ args += self.zonegroup.zonegroup_args()
+ return args
+
+ def build_command(self, command):
+ """ build a command line for the given command and args """
+ return ['zone', command] + self.zone_args()
+
+ def load_from_json(self, data):
+ """ load the zone from json """
+ self.id = data['id']
+ self.name = data['name']
+
+ def start(self, args = None):
+ """ start all gateways """
+ for g in self.gateways:
+ g.start(args)
+
+ def stop(self):
+ """ stop all gateways """
+ for g in self.gateways:
+ g.stop()
+
+ def period(self):
+ return self.zonegroup.period if self.zonegroup else None
+
+ def realm(self):
+ return self.zonegroup.realm() if self.zonegroup else None
+
+ def is_read_only(self):
+ return False
+
+ def tier_type(self):
+ raise NotImplementedError
+
+ def syncs_from(self, zone_name):
+ return zone_name != self.name
+
+ def has_buckets(self):
+ return True
+
+ def get_conn(self, credentials):
+ return ZoneConn(self, credentials) # not implemented, but can be used
+
+class ZoneConn(object):
+ def __init__(self, zone, credentials):
+ self.zone = zone
+ self.name = zone.name
+ """ connect to the zone's first gateway """
+ if isinstance(credentials, list):
+ self.credentials = credentials[0]
+ else:
+ self.credentials = credentials
+
+ if self.zone.gateways is not None:
+ self.conn = get_gateway_connection(self.zone.gateways[0], self.credentials)
+ self.secure_conn = get_gateway_secure_connection(self.zone.gateways[0], self.credentials)
+ # create connections for the rest of the gateways (if exist)
+ for gw in list(self.zone.gateways):
+ get_gateway_connection(gw, self.credentials)
+ get_gateway_secure_connection(gw, self.credentials)
+
+
+ def get_connection(self):
+ return self.conn
+
+ def get_bucket(self, bucket_name, credentials):
+ raise NotImplementedError
+
+ def check_bucket_eq(self, zone, bucket_name):
+ raise NotImplementedError
+
+class ZoneGroup(SystemObject, SystemObject.CreateDelete, SystemObject.GetSet, SystemObject.Modify):
+ def __init__(self, name, period = None, data = None, zonegroup_id = None, zones = None, master_zone = None):
+ self.name = name
+ self.period = period
+ self.zones = zones or []
+ self.master_zone = master_zone
+ super(ZoneGroup, self).__init__(data, zonegroup_id)
+ self.rw_zones = []
+ self.ro_zones = []
+ self.zones_by_type = {}
+ for z in self.zones:
+ if z.is_read_only():
+ self.ro_zones.append(z)
+ else:
+ self.rw_zones.append(z)
+
+ def zonegroup_arg(self):
+ """ command-line argument to specify this zonegroup """
+ return ['--rgw-zonegroup', self.name]
+
+ def zonegroup_args(self):
+ """ command-line arguments to specify this zonegroup/realm """
+ args = self.zonegroup_arg()
+ realm = self.realm()
+ if realm:
+ args += realm.realm_arg()
+ return args
+
+ def build_command(self, command):
+ """ build a command line for the given command and args """
+ return ['zonegroup', command] + self.zonegroup_args()
+
+ def zone_by_id(self, zone_id):
+ """ return the matching zone by id """
+ for zone in self.zones:
+ if zone.id == zone_id:
+ return zone
+ return None
+
+ def load_from_json(self, data):
+ """ load the zonegroup from json """
+ self.id = data['id']
+ self.name = data['name']
+ master_id = data['master_zone']
+ if not self.master_zone or master_id != self.master_zone.id:
+ self.master_zone = self.zone_by_id(master_id)
+
+ def add(self, cluster, zone, args = None, **kwargs):
+ """ add an existing zone to the zonegroup """
+ args = zone.zone_arg() + (args or [])
+ data, r = self.json_command(cluster, 'add', args, **kwargs)
+ if r == 0:
+ zone.zonegroup = self
+ self.zones.append(zone)
+ return data, r
+
+ def remove(self, cluster, zone, args = None, **kwargs):
+ """ remove an existing zone from the zonegroup """
+ args = zone.zone_arg() + (args or [])
+ data, r = self.json_command(cluster, 'remove', args, **kwargs)
+ if r == 0:
+ zone.zonegroup = None
+ self.zones.remove(zone)
+ return data, r
+
+ def realm(self):
+ return self.period.realm if self.period else None
+
+class Period(SystemObject, SystemObject.Get):
+ def __init__(self, realm = None, data = None, period_id = None, zonegroups = None, master_zonegroup = None):
+ self.realm = realm
+ self.zonegroups = zonegroups or []
+ self.master_zonegroup = master_zonegroup
+ super(Period, self).__init__(data, period_id)
+
+ def zonegroup_by_id(self, zonegroup_id):
+ """ return the matching zonegroup by id """
+ for zonegroup in self.zonegroups:
+ if zonegroup.id == zonegroup_id:
+ return zonegroup
+ return None
+
+ def build_command(self, command):
+ """ build a command line for the given command and args """
+ return ['period', command]
+
+ def load_from_json(self, data):
+ """ load the period from json """
+ self.id = data['id']
+ master_id = data['master_zonegroup']
+ if not self.master_zonegroup or master_id != self.master_zonegroup.id:
+ self.master_zonegroup = self.zonegroup_by_id(master_id)
+
+ def update(self, zone, args = None, **kwargs):
+ """ run 'radosgw-admin period update' on the given zone """
+ assert(zone.cluster)
+ args = zone.zone_args() + (args or [])
+ if kwargs.pop('commit', False):
+ args.append('--commit')
+ return self.json_command(zone.cluster, 'update', args, **kwargs)
+
+ def commit(self, zone, args = None, **kwargs):
+ """ run 'radosgw-admin period commit' on the given zone """
+ assert(zone.cluster)
+ args = zone.zone_args() + (args or [])
+ return self.json_command(zone.cluster, 'commit', args, **kwargs)
+
+class Realm(SystemObject, SystemObject.CreateDelete, SystemObject.GetSet):
+ def __init__(self, name, period = None, data = None, realm_id = None):
+ self.name = name
+ self.current_period = period
+ super(Realm, self).__init__(data, realm_id)
+
+ def realm_arg(self):
+ """ return the command-line arguments that specify this realm """
+ return ['--rgw-realm', self.name]
+
+ def build_command(self, command):
+ """ build a command line for the given command and args """
+ return ['realm', command] + self.realm_arg()
+
+ def load_from_json(self, data):
+ """ load the realm from json """
+ self.id = data['id']
+
+ def pull(self, cluster, gateway, credentials, args = [], **kwargs):
+ """ pull an existing realm from the given gateway """
+ args += ['--url', gateway.endpoint()]
+ args += credentials.credential_args()
+ return self.json_command(cluster, 'pull', args, **kwargs)
+
+ def master_zonegroup(self):
+ """ return the current period's master zonegroup """
+ if self.current_period is None:
+ return None
+ return self.current_period.master_zonegroup
+
+ def meta_master_zone(self):
+ """ return the current period's metadata master zone """
+ zonegroup = self.master_zonegroup()
+ if zonegroup is None:
+ return None
+ return zonegroup.master_zone
+
+class Credentials:
+ def __init__(self, access_key, secret):
+ self.access_key = access_key
+ self.secret = secret
+
+ def credential_args(self):
+ return ['--access-key', self.access_key, '--secret', self.secret]
+
+class User(SystemObject):
+ def __init__(self, uid, data = None, name = None, credentials = None, tenant = None):
+ self.name = name
+ self.credentials = credentials or []
+ self.tenant = tenant
+ super(User, self).__init__(data, uid)
+
+ def user_arg(self):
+ """ command-line argument to specify this user """
+ args = ['--uid', self.id]
+ if self.tenant:
+ args += ['--tenant', self.tenant]
+ return args
+
+ def build_command(self, command):
+ """ build a command line for the given command and args """
+ return ['user', command] + self.user_arg()
+
+ def load_from_json(self, data):
+ """ load the user from json """
+ self.id = data['user_id']
+ self.name = data['display_name']
+ self.credentials = [Credentials(k['access_key'], k['secret_key']) for k in data['keys']]
+
+ def create(self, zone, args = None, **kwargs):
+ """ create the user with the given arguments """
+ assert(zone.cluster)
+ args = zone.zone_args() + (args or [])
+ return self.json_command(zone.cluster, 'create', args, **kwargs)
+
+ def info(self, zone, args = None, **kwargs):
+ """ read the user from storage """
+ assert(zone.cluster)
+ args = zone.zone_args() + (args or [])
+ kwargs['read_only'] = True
+ return self.json_command(zone.cluster, 'info', args, **kwargs)
+
+ def delete(self, zone, args = None, **kwargs):
+ """ delete the user """
+ assert(zone.cluster)
+ args = zone.zone_args() + (args or [])
+ return self.command(zone.cluster, 'delete', args, **kwargs)
diff --git a/src/test/rgw/rgw_multi/tests.py b/src/test/rgw/rgw_multi/tests.py
new file mode 100644
index 000000000..ae45714cd
--- /dev/null
+++ b/src/test/rgw/rgw_multi/tests.py
@@ -0,0 +1,1316 @@
+import json
+import random
+import string
+import sys
+import time
+import logging
+import errno
+import dateutil.parser
+
+from itertools import combinations
+from io import StringIO
+
+import boto
+import boto.s3.connection
+from boto.s3.website import WebsiteConfiguration
+from boto.s3.cors import CORSConfiguration
+
+from nose.tools import eq_ as eq
+from nose.plugins.attrib import attr
+from nose.plugins.skip import SkipTest
+
+from .multisite import Zone, ZoneGroup, Credentials
+
+from .conn import get_gateway_connection
+from .tools import assert_raises
+
+class Config:
+ """ test configuration """
+ def __init__(self, **kwargs):
+ # by default, wait up to 5 minutes before giving up on a sync checkpoint
+ self.checkpoint_retries = kwargs.get('checkpoint_retries', 60)
+ self.checkpoint_delay = kwargs.get('checkpoint_delay', 5)
+ # allow some time for realm reconfiguration after changing master zone
+ self.reconfigure_delay = kwargs.get('reconfigure_delay', 5)
+ self.tenant = kwargs.get('tenant', '')
+
+# rgw multisite tests, written against the interfaces provided in rgw_multi.
+# these tests must be initialized and run by another module that provides
+# implementations of these interfaces by calling init_multi()
+realm = None
+user = None
+config = None
+def init_multi(_realm, _user, _config=None):
+ global realm
+ realm = _realm
+ global user
+ user = _user
+ global config
+ config = _config or Config()
+ realm_meta_checkpoint(realm)
+
+def get_user():
+ return user.id if user is not None else ''
+
+def get_tenant():
+ return config.tenant if config is not None and config.tenant is not None else ''
+
+def get_realm():
+ return realm
+
+log = logging.getLogger('rgw_multi.tests')
+
+num_buckets = 0
+run_prefix=''.join(random.choice(string.ascii_lowercase) for _ in range(6))
+
+def get_zone_connection(zone, credentials):
+ """ connect to the zone's first gateway """
+ if isinstance(credentials, list):
+ credentials = credentials[0]
+ return get_gateway_connection(zone.gateways[0], credentials)
+
+def mdlog_list(zone, period = None):
+ cmd = ['mdlog', 'list']
+ if period:
+ cmd += ['--period', period]
+ (mdlog_json, _) = zone.cluster.admin(cmd, read_only=True)
+ return json.loads(mdlog_json)
+
+def mdlog_autotrim(zone):
+ zone.cluster.admin(['mdlog', 'autotrim'])
+
+def datalog_list(zone, args = None):
+ cmd = ['datalog', 'list'] + (args or [])
+ (datalog_json, _) = zone.cluster.admin(cmd, read_only=True)
+ return json.loads(datalog_json)
+
+def datalog_status(zone):
+ cmd = ['datalog', 'status']
+ (datalog_json, _) = zone.cluster.admin(cmd, read_only=True)
+ return json.loads(datalog_json)
+
+def datalog_autotrim(zone):
+ zone.cluster.admin(['datalog', 'autotrim'])
+
+def bilog_list(zone, bucket, args = None):
+ cmd = ['bilog', 'list', '--bucket', bucket] + (args or [])
+ cmd += ['--tenant', config.tenant, '--uid', user.name] if config.tenant else []
+ bilog, _ = zone.cluster.admin(cmd, read_only=True)
+ return json.loads(bilog)
+
+def bilog_autotrim(zone, args = None):
+ zone.cluster.admin(['bilog', 'autotrim'] + (args or []))
+
+def parse_meta_sync_status(meta_sync_status_json):
+ log.debug('current meta sync status=%s', meta_sync_status_json)
+ sync_status = json.loads(meta_sync_status_json)
+
+ sync_info = sync_status['sync_status']['info']
+ global_sync_status = sync_info['status']
+ num_shards = sync_info['num_shards']
+ period = sync_info['period']
+ realm_epoch = sync_info['realm_epoch']
+
+ sync_markers=sync_status['sync_status']['markers']
+ log.debug('sync_markers=%s', sync_markers)
+ assert(num_shards == len(sync_markers))
+
+ markers={}
+ for i in range(num_shards):
+ # get marker, only if it's an incremental marker for the same realm epoch
+ if realm_epoch > sync_markers[i]['val']['realm_epoch'] or sync_markers[i]['val']['state'] == 0:
+ markers[i] = ''
+ else:
+ markers[i] = sync_markers[i]['val']['marker']
+
+ return period, realm_epoch, num_shards, markers
+
+def meta_sync_status(zone):
+ for _ in range(config.checkpoint_retries):
+ cmd = ['metadata', 'sync', 'status'] + zone.zone_args()
+ meta_sync_status_json, retcode = zone.cluster.admin(cmd, check_retcode=False, read_only=True)
+ if retcode == 0:
+ return parse_meta_sync_status(meta_sync_status_json)
+ assert(retcode == 2) # ENOENT
+ time.sleep(config.checkpoint_delay)
+
+ assert False, 'failed to read metadata sync status for zone=%s' % zone.name
+
+def meta_master_log_status(master_zone):
+ cmd = ['mdlog', 'status'] + master_zone.zone_args()
+ mdlog_status_json, retcode = master_zone.cluster.admin(cmd, read_only=True)
+ mdlog_status = json.loads(mdlog_status_json)
+
+ markers = {i: s['marker'] for i, s in enumerate(mdlog_status)}
+ log.debug('master meta markers=%s', markers)
+ return markers
+
+def compare_meta_status(zone, log_status, sync_status):
+ if len(log_status) != len(sync_status):
+ log.error('len(log_status)=%d, len(sync_status)=%d', len(log_status), len(sync_status))
+ return False
+
+ msg = ''
+ for i, l, s in zip(log_status, log_status.values(), sync_status.values()):
+ if l > s:
+ if len(msg):
+ msg += ', '
+ msg += 'shard=' + str(i) + ' master=' + l + ' target=' + s
+
+ if len(msg) > 0:
+ log.warning('zone %s behind master: %s', zone.name, msg)
+ return False
+
+ return True
+
+def zone_meta_checkpoint(zone, meta_master_zone = None, master_status = None):
+ if not meta_master_zone:
+ meta_master_zone = zone.realm().meta_master_zone()
+ if not master_status:
+ master_status = meta_master_log_status(meta_master_zone)
+
+ current_realm_epoch = realm.current_period.data['realm_epoch']
+
+ log.info('starting meta checkpoint for zone=%s', zone.name)
+
+ for _ in range(config.checkpoint_retries):
+ period, realm_epoch, num_shards, sync_status = meta_sync_status(zone)
+ if realm_epoch < current_realm_epoch:
+ log.warning('zone %s is syncing realm epoch=%d, behind current realm epoch=%d',
+ zone.name, realm_epoch, current_realm_epoch)
+ else:
+ log.debug('log_status=%s', master_status)
+ log.debug('sync_status=%s', sync_status)
+ if compare_meta_status(zone, master_status, sync_status):
+ log.info('finish meta checkpoint for zone=%s', zone.name)
+ return
+
+ time.sleep(config.checkpoint_delay)
+ assert False, 'failed meta checkpoint for zone=%s' % zone.name
+
+def zonegroup_meta_checkpoint(zonegroup, meta_master_zone = None, master_status = None):
+ if not meta_master_zone:
+ meta_master_zone = zonegroup.realm().meta_master_zone()
+ if not master_status:
+ master_status = meta_master_log_status(meta_master_zone)
+
+ for zone in zonegroup.zones:
+ if zone == meta_master_zone:
+ continue
+ zone_meta_checkpoint(zone, meta_master_zone, master_status)
+
+def realm_meta_checkpoint(realm):
+ log.info('meta checkpoint')
+
+ meta_master_zone = realm.meta_master_zone()
+ master_status = meta_master_log_status(meta_master_zone)
+
+ for zonegroup in realm.current_period.zonegroups:
+ zonegroup_meta_checkpoint(zonegroup, meta_master_zone, master_status)
+
+def parse_data_sync_status(data_sync_status_json):
+ log.debug('current data sync status=%s', data_sync_status_json)
+ sync_status = json.loads(data_sync_status_json)
+
+ global_sync_status=sync_status['sync_status']['info']['status']
+ num_shards=sync_status['sync_status']['info']['num_shards']
+
+ sync_markers=sync_status['sync_status']['markers']
+ log.debug('sync_markers=%s', sync_markers)
+ assert(num_shards == len(sync_markers))
+
+ markers={}
+ for i in range(num_shards):
+ markers[i] = sync_markers[i]['val']['marker']
+
+ return (num_shards, markers)
+
+def data_sync_status(target_zone, source_zone):
+ if target_zone == source_zone:
+ return None
+
+ for _ in range(config.checkpoint_retries):
+ cmd = ['data', 'sync', 'status'] + target_zone.zone_args()
+ cmd += ['--source-zone', source_zone.name]
+ data_sync_status_json, retcode = target_zone.cluster.admin(cmd, check_retcode=False, read_only=True)
+ if retcode == 0:
+ return parse_data_sync_status(data_sync_status_json)
+
+ assert(retcode == 2) # ENOENT
+ time.sleep(config.checkpoint_delay)
+
+ assert False, 'failed to read data sync status for target_zone=%s source_zone=%s' % \
+ (target_zone.name, source_zone.name)
+
+def bucket_sync_status(target_zone, source_zone, bucket_name):
+ if target_zone == source_zone:
+ return None
+
+ cmd = ['bucket', 'sync', 'markers'] + target_zone.zone_args()
+ cmd += ['--source-zone', source_zone.name]
+ cmd += ['--bucket', bucket_name]
+ cmd += ['--tenant', config.tenant, '--uid', user.name] if config.tenant else []
+ while True:
+ bucket_sync_status_json, retcode = target_zone.cluster.admin(cmd, check_retcode=False, read_only=True)
+ if retcode == 0:
+ break
+
+ assert(retcode == 2) # ENOENT
+
+ sync_status = json.loads(bucket_sync_status_json)
+
+ markers={}
+ for entry in sync_status:
+ val = entry['val']
+ pos = val['inc_marker']['position'].split('#')[-1] # get rid of shard id; e.g., 6#00000000002.132.3 -> 00000000002.132.3
+ markers[entry['key']] = pos
+
+ return markers
+
+def data_source_log_status(source_zone):
+ source_cluster = source_zone.cluster
+ cmd = ['datalog', 'status'] + source_zone.zone_args()
+ datalog_status_json, retcode = source_cluster.admin(cmd, read_only=True)
+ datalog_status = json.loads(datalog_status_json)
+
+ markers = {i: s['marker'] for i, s in enumerate(datalog_status)}
+ log.debug('data markers for zone=%s markers=%s', source_zone.name, markers)
+ return markers
+
+def bucket_source_log_status(source_zone, bucket_name):
+ cmd = ['bilog', 'status'] + source_zone.zone_args()
+ cmd += ['--bucket', bucket_name]
+ cmd += ['--tenant', config.tenant, '--uid', user.name] if config.tenant else []
+ source_cluster = source_zone.cluster
+ bilog_status_json, retcode = source_cluster.admin(cmd, read_only=True)
+ bilog_status = json.loads(bilog_status_json)
+
+ m={}
+ markers={}
+ try:
+ m = bilog_status['markers']
+ except:
+ pass
+
+ for s in m:
+ key = s['key']
+ val = s['val']
+ markers[key] = val
+
+ log.debug('bilog markers for zone=%s bucket=%s markers=%s', source_zone.name, bucket_name, markers)
+ return markers
+
+def compare_data_status(target_zone, source_zone, log_status, sync_status):
+ if len(log_status) != len(sync_status):
+ log.error('len(log_status)=%d len(sync_status)=%d', len(log_status), len(sync_status))
+ return False
+
+ msg = ''
+ for i, l, s in zip(log_status, log_status.values(), sync_status.values()):
+ if l > s:
+ if len(msg):
+ msg += ', '
+ msg += 'shard=' + str(i) + ' master=' + l + ' target=' + s
+
+ if len(msg) > 0:
+ log.warning('data of zone %s behind zone %s: %s', target_zone.name, source_zone.name, msg)
+ return False
+
+ return True
+
+def compare_bucket_status(target_zone, source_zone, bucket_name, log_status, sync_status):
+ if len(log_status) != len(sync_status):
+ log.error('len(log_status)=%d len(sync_status)=%d', len(log_status), len(sync_status))
+ return False
+
+ msg = ''
+ for i, l, s in zip(log_status, log_status.values(), sync_status.values()):
+ if l > s:
+ if len(msg):
+ msg += ', '
+ msg += 'shard=' + str(i) + ' master=' + l + ' target=' + s
+
+ if len(msg) > 0:
+ log.warning('bucket %s zone %s behind zone %s: %s', bucket_name, target_zone.name, source_zone.name, msg)
+ return False
+
+ return True
+
+def zone_data_checkpoint(target_zone, source_zone):
+ if not target_zone.syncs_from(source_zone.name):
+ return
+
+ log_status = data_source_log_status(source_zone)
+ log.info('starting data checkpoint for target_zone=%s source_zone=%s', target_zone.name, source_zone.name)
+
+ for _ in range(config.checkpoint_retries):
+ num_shards, sync_status = data_sync_status(target_zone, source_zone)
+
+ log.debug('log_status=%s', log_status)
+ log.debug('sync_status=%s', sync_status)
+
+ if compare_data_status(target_zone, source_zone, log_status, sync_status):
+ log.info('finished data checkpoint for target_zone=%s source_zone=%s',
+ target_zone.name, source_zone.name)
+ return
+ time.sleep(config.checkpoint_delay)
+
+ assert False, 'failed data checkpoint for target_zone=%s source_zone=%s' % \
+ (target_zone.name, source_zone.name)
+
+def zonegroup_data_checkpoint(zonegroup_conns):
+ for source_conn in zonegroup_conns.rw_zones:
+ for target_conn in zonegroup_conns.zones:
+ if source_conn.zone == target_conn.zone:
+ continue
+ log.debug('data checkpoint: source=%s target=%s', source_conn.zone.name, target_conn.zone.name)
+ zone_data_checkpoint(target_conn.zone, source_conn.zone)
+
+def zone_bucket_checkpoint(target_zone, source_zone, bucket_name):
+ if not target_zone.syncs_from(source_zone.name):
+ return
+
+ cmd = ['bucket', 'sync', 'checkpoint']
+ cmd += ['--bucket', bucket_name, '--source-zone', source_zone.name]
+ retry_delay_ms = config.checkpoint_delay * 1000
+ timeout_sec = config.checkpoint_retries * config.checkpoint_delay
+ cmd += ['--retry-delay-ms', str(retry_delay_ms), '--timeout-sec', str(timeout_sec)]
+ cmd += target_zone.zone_args()
+ target_zone.cluster.admin(cmd, debug_rgw=1)
+
+def zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name):
+ for source_conn in zonegroup_conns.rw_zones:
+ for target_conn in zonegroup_conns.zones:
+ if source_conn.zone == target_conn.zone:
+ continue
+ log.debug('bucket checkpoint: source=%s target=%s bucket=%s', source_conn.zone.name, target_conn.zone.name, bucket_name)
+ zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket_name)
+ for source_conn, target_conn in combinations(zonegroup_conns.zones, 2):
+ if target_conn.zone.has_buckets():
+ target_conn.check_bucket_eq(source_conn, bucket_name)
+
+def set_master_zone(zone):
+ zone.modify(zone.cluster, ['--master'])
+ zonegroup = zone.zonegroup
+ zonegroup.period.update(zone, commit=True)
+ zonegroup.master_zone = zone
+ log.info('Set master zone=%s, waiting %ds for reconfiguration..', zone.name, config.reconfigure_delay)
+ time.sleep(config.reconfigure_delay)
+
+def set_sync_from_all(zone, flag):
+ s = 'true' if flag else 'false'
+ zone.modify(zone.cluster, ['--sync-from-all={}'.format(s)])
+ zonegroup = zone.zonegroup
+ zonegroup.period.update(zone, commit=True)
+ log.info('Set sync_from_all flag on zone %s to %s', zone.name, s)
+ time.sleep(config.reconfigure_delay)
+
+def set_redirect_zone(zone, redirect_zone):
+ id_str = redirect_zone.id if redirect_zone else ''
+ zone.modify(zone.cluster, ['--redirect-zone={}'.format(id_str)])
+ zonegroup = zone.zonegroup
+ zonegroup.period.update(zone, commit=True)
+ log.info('Set redirect_zone zone %s to "%s"', zone.name, id_str)
+ time.sleep(config.reconfigure_delay)
+
+def enable_bucket_sync(zone, bucket_name):
+ cmd = ['bucket', 'sync', 'enable', '--bucket', bucket_name] + zone.zone_args()
+ zone.cluster.admin(cmd)
+
+def disable_bucket_sync(zone, bucket_name):
+ cmd = ['bucket', 'sync', 'disable', '--bucket', bucket_name] + zone.zone_args()
+ zone.cluster.admin(cmd)
+
+def check_buckets_sync_status_obj_not_exist(zone, buckets):
+ for _ in range(config.checkpoint_retries):
+ cmd = ['log', 'list'] + zone.zone_arg()
+ log_list, ret = zone.cluster.admin(cmd, check_retcode=False, read_only=True)
+ for bucket in buckets:
+ if log_list.find(':'+bucket+":") >= 0:
+ break
+ else:
+ return
+ time.sleep(config.checkpoint_delay)
+ assert False
+
+def gen_bucket_name():
+ global num_buckets
+
+ num_buckets += 1
+ return run_prefix + '-' + str(num_buckets)
+
+class ZonegroupConns:
+ def __init__(self, zonegroup):
+ self.zonegroup = zonegroup
+ self.zones = []
+ self.ro_zones = []
+ self.rw_zones = []
+ self.master_zone = None
+ for z in zonegroup.zones:
+ zone_conn = z.get_conn(user.credentials)
+ self.zones.append(zone_conn)
+ if z.is_read_only():
+ self.ro_zones.append(zone_conn)
+ else:
+ self.rw_zones.append(zone_conn)
+
+ if z == zonegroup.master_zone:
+ self.master_zone = zone_conn
+
+def check_all_buckets_exist(zone_conn, buckets):
+ if not zone_conn.zone.has_buckets():
+ return True
+
+ for b in buckets:
+ try:
+ zone_conn.get_bucket(b)
+ except:
+ log.critical('zone %s does not contain bucket %s', zone.name, b)
+ return False
+
+ return True
+
+def check_all_buckets_dont_exist(zone_conn, buckets):
+ if not zone_conn.zone.has_buckets():
+ return True
+
+ for b in buckets:
+ try:
+ zone_conn.get_bucket(b)
+ except:
+ continue
+
+ log.critical('zone %s contains bucket %s', zone.zone, b)
+ return False
+
+ return True
+
+def create_bucket_per_zone(zonegroup_conns, buckets_per_zone = 1):
+ buckets = []
+ zone_bucket = []
+ for zone in zonegroup_conns.rw_zones:
+ for i in range(buckets_per_zone):
+ bucket_name = gen_bucket_name()
+ log.info('create bucket zone=%s name=%s', zone.name, bucket_name)
+ bucket = zone.create_bucket(bucket_name)
+ buckets.append(bucket_name)
+ zone_bucket.append((zone, bucket))
+
+ return buckets, zone_bucket
+
+def create_bucket_per_zone_in_realm():
+ buckets = []
+ zone_bucket = []
+ for zonegroup in realm.current_period.zonegroups:
+ zg_conn = ZonegroupConns(zonegroup)
+ b, z = create_bucket_per_zone(zg_conn)
+ buckets.extend(b)
+ zone_bucket.extend(z)
+ return buckets, zone_bucket
+
+def test_bucket_create():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, _ = create_bucket_per_zone(zonegroup_conns)
+ zonegroup_meta_checkpoint(zonegroup)
+
+ for zone in zonegroup_conns.zones:
+ assert check_all_buckets_exist(zone, buckets)
+
+def test_bucket_recreate():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, _ = create_bucket_per_zone(zonegroup_conns)
+ zonegroup_meta_checkpoint(zonegroup)
+
+
+ for zone in zonegroup_conns.zones:
+ assert check_all_buckets_exist(zone, buckets)
+
+ # recreate buckets on all zones, make sure they weren't removed
+ for zone in zonegroup_conns.rw_zones:
+ for bucket_name in buckets:
+ bucket = zone.create_bucket(bucket_name)
+
+ for zone in zonegroup_conns.zones:
+ assert check_all_buckets_exist(zone, buckets)
+
+ zonegroup_meta_checkpoint(zonegroup)
+
+ for zone in zonegroup_conns.zones:
+ assert check_all_buckets_exist(zone, buckets)
+
+def test_bucket_remove():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
+ zonegroup_meta_checkpoint(zonegroup)
+
+ for zone in zonegroup_conns.zones:
+ assert check_all_buckets_exist(zone, buckets)
+
+ for zone, bucket_name in zone_bucket:
+ zone.conn.delete_bucket(bucket_name)
+
+ zonegroup_meta_checkpoint(zonegroup)
+
+ for zone in zonegroup_conns.zones:
+ assert check_all_buckets_dont_exist(zone, buckets)
+
+def get_bucket(zone, bucket_name):
+ return zone.conn.get_bucket(bucket_name)
+
+def get_key(zone, bucket_name, obj_name):
+ b = get_bucket(zone, bucket_name)
+ return b.get_key(obj_name)
+
+def new_key(zone, bucket_name, obj_name):
+ b = get_bucket(zone, bucket_name)
+ return b.new_key(obj_name)
+
+def check_bucket_eq(zone_conn1, zone_conn2, bucket):
+ if zone_conn2.zone.has_buckets():
+ zone_conn2.check_bucket_eq(zone_conn1, bucket.name)
+
+def test_object_sync():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
+
+ objnames = [ 'myobj', '_myobj', ':', '&' ]
+ content = 'asdasd'
+
+ # don't wait for meta sync just yet
+ for zone, bucket_name in zone_bucket:
+ for objname in objnames:
+ k = new_key(zone, bucket_name, objname)
+ k.set_contents_from_string(content)
+
+ zonegroup_meta_checkpoint(zonegroup)
+
+ for source_conn, bucket in zone_bucket:
+ for target_conn in zonegroup_conns.zones:
+ if source_conn.zone == target_conn.zone:
+ continue
+
+ zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
+ check_bucket_eq(source_conn, target_conn, bucket)
+
+def test_object_delete():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
+
+ objname = 'myobj'
+ content = 'asdasd'
+
+ # don't wait for meta sync just yet
+ for zone, bucket in zone_bucket:
+ k = new_key(zone, bucket, objname)
+ k.set_contents_from_string(content)
+
+ zonegroup_meta_checkpoint(zonegroup)
+
+ # check object exists
+ for source_conn, bucket in zone_bucket:
+ for target_conn in zonegroup_conns.zones:
+ if source_conn.zone == target_conn.zone:
+ continue
+
+ zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
+ check_bucket_eq(source_conn, target_conn, bucket)
+
+ # check object removal
+ for source_conn, bucket in zone_bucket:
+ k = get_key(source_conn, bucket, objname)
+ k.delete()
+ for target_conn in zonegroup_conns.zones:
+ if source_conn.zone == target_conn.zone:
+ continue
+
+ zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
+ check_bucket_eq(source_conn, target_conn, bucket)
+
+def get_latest_object_version(key):
+ for k in key.bucket.list_versions(key.name):
+ if k.is_latest:
+ return k
+ return None
+
+def test_versioned_object_incremental_sync():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
+
+ # enable versioning
+ for _, bucket in zone_bucket:
+ bucket.configure_versioning(True)
+
+ zonegroup_meta_checkpoint(zonegroup)
+
+ # upload a dummy object to each bucket and wait for sync. this forces each
+ # bucket to finish a full sync and switch to incremental
+ for source_conn, bucket in zone_bucket:
+ new_key(source_conn, bucket, 'dummy').set_contents_from_string('')
+ for target_conn in zonegroup_conns.zones:
+ if source_conn.zone == target_conn.zone:
+ continue
+ zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
+
+ for _, bucket in zone_bucket:
+ # create and delete multiple versions of an object from each zone
+ for zone_conn in zonegroup_conns.rw_zones:
+ obj = 'obj-' + zone_conn.name
+ k = new_key(zone_conn, bucket, obj)
+
+ k.set_contents_from_string('version1')
+ log.debug('version1 id=%s', k.version_id)
+ # don't delete version1 - this tests that the initial version
+ # doesn't get squashed into later versions
+
+ # create and delete the following object versions to test that
+ # the operations don't race with each other during sync
+ k.set_contents_from_string('version2')
+ log.debug('version2 id=%s', k.version_id)
+ k.bucket.delete_key(obj, version_id=k.version_id)
+
+ k.set_contents_from_string('version3')
+ log.debug('version3 id=%s', k.version_id)
+ k.bucket.delete_key(obj, version_id=k.version_id)
+
+ for _, bucket in zone_bucket:
+ zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
+
+ for _, bucket in zone_bucket:
+ # overwrite the acls to test that metadata-only entries are applied
+ for zone_conn in zonegroup_conns.rw_zones:
+ obj = 'obj-' + zone_conn.name
+ k = new_key(zone_conn, bucket.name, obj)
+ v = get_latest_object_version(k)
+ v.make_public()
+
+ for _, bucket in zone_bucket:
+ zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
+
+def test_concurrent_versioned_object_incremental_sync():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ zone = zonegroup_conns.rw_zones[0]
+
+ # create a versioned bucket
+ bucket = zone.create_bucket(gen_bucket_name())
+ log.debug('created bucket=%s', bucket.name)
+ bucket.configure_versioning(True)
+
+ zonegroup_meta_checkpoint(zonegroup)
+
+ # upload a dummy object and wait for sync. this forces each zone to finish
+ # a full sync and switch to incremental
+ new_key(zone, bucket, 'dummy').set_contents_from_string('')
+ zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
+
+ # create several concurrent versions on each zone and let them race to sync
+ obj = 'obj'
+ for i in range(10):
+ for zone_conn in zonegroup_conns.rw_zones:
+ k = new_key(zone_conn, bucket, obj)
+ k.set_contents_from_string('version1')
+ log.debug('zone=%s version=%s', zone_conn.zone.name, k.version_id)
+
+ zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
+ zonegroup_data_checkpoint(zonegroup_conns)
+
+def test_version_suspended_incremental_sync():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+
+ zone = zonegroup_conns.rw_zones[0]
+
+ # create a non-versioned bucket
+ bucket = zone.create_bucket(gen_bucket_name())
+ log.debug('created bucket=%s', bucket.name)
+ zonegroup_meta_checkpoint(zonegroup)
+
+ # upload an initial object
+ key1 = new_key(zone, bucket, 'obj')
+ key1.set_contents_from_string('')
+ log.debug('created initial version id=%s', key1.version_id)
+ zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
+
+ # enable versioning
+ bucket.configure_versioning(True)
+ zonegroup_meta_checkpoint(zonegroup)
+
+ # re-upload the object as a new version
+ key2 = new_key(zone, bucket, 'obj')
+ key2.set_contents_from_string('')
+ log.debug('created new version id=%s', key2.version_id)
+ zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
+
+ # suspend versioning
+ bucket.configure_versioning(False)
+ zonegroup_meta_checkpoint(zonegroup)
+
+ # re-upload the object as a 'null' version
+ key3 = new_key(zone, bucket, 'obj')
+ key3.set_contents_from_string('')
+ log.debug('created null version id=%s', key3.version_id)
+ zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
+
+def test_delete_marker_full_sync():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
+
+ # enable versioning
+ for _, bucket in zone_bucket:
+ bucket.configure_versioning(True)
+ zonegroup_meta_checkpoint(zonegroup)
+
+ for zone, bucket in zone_bucket:
+ # upload an initial object
+ key1 = new_key(zone, bucket, 'obj')
+ key1.set_contents_from_string('')
+
+ # create a delete marker
+ key2 = new_key(zone, bucket, 'obj')
+ key2.delete()
+
+ # wait for full sync
+ for _, bucket in zone_bucket:
+ zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
+
+def test_suspended_delete_marker_full_sync():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
+
+ # enable/suspend versioning
+ for _, bucket in zone_bucket:
+ bucket.configure_versioning(True)
+ bucket.configure_versioning(False)
+ zonegroup_meta_checkpoint(zonegroup)
+
+ for zone, bucket in zone_bucket:
+ # upload an initial object
+ key1 = new_key(zone, bucket, 'obj')
+ key1.set_contents_from_string('')
+
+ # create a delete marker
+ key2 = new_key(zone, bucket, 'obj')
+ key2.delete()
+
+ # wait for full sync
+ for _, bucket in zone_bucket:
+ zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
+
+def test_bucket_versioning():
+ buckets, zone_bucket = create_bucket_per_zone_in_realm()
+ for _, bucket in zone_bucket:
+ bucket.configure_versioning(True)
+ res = bucket.get_versioning_status()
+ key = 'Versioning'
+ assert(key in res and res[key] == 'Enabled')
+
+def test_bucket_acl():
+ buckets, zone_bucket = create_bucket_per_zone_in_realm()
+ for _, bucket in zone_bucket:
+ assert(len(bucket.get_acl().acl.grants) == 1) # single grant on owner
+ bucket.set_acl('public-read')
+ assert(len(bucket.get_acl().acl.grants) == 2) # new grant on AllUsers
+
+def test_bucket_cors():
+ buckets, zone_bucket = create_bucket_per_zone_in_realm()
+ for _, bucket in zone_bucket:
+ cors_cfg = CORSConfiguration()
+ cors_cfg.add_rule(['DELETE'], 'https://www.example.com', allowed_header='*', max_age_seconds=3000)
+ bucket.set_cors(cors_cfg)
+ assert(bucket.get_cors().to_xml() == cors_cfg.to_xml())
+
+def test_bucket_delete_notempty():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
+ zonegroup_meta_checkpoint(zonegroup)
+
+ for zone_conn, bucket_name in zone_bucket:
+ # upload an object to each bucket on its own zone
+ conn = zone_conn.get_connection()
+ bucket = conn.get_bucket(bucket_name)
+ k = bucket.new_key('foo')
+ k.set_contents_from_string('bar')
+ # attempt to delete the bucket before this object can sync
+ try:
+ conn.delete_bucket(bucket_name)
+ except boto.exception.S3ResponseError as e:
+ assert(e.error_code == 'BucketNotEmpty')
+ continue
+ assert False # expected 409 BucketNotEmpty
+
+ # assert that each bucket still exists on the master
+ c1 = zonegroup_conns.master_zone.conn
+ for _, bucket_name in zone_bucket:
+ assert c1.get_bucket(bucket_name)
+
+def test_multi_period_incremental_sync():
+ zonegroup = realm.master_zonegroup()
+ if len(zonegroup.zones) < 3:
+ raise SkipTest("test_multi_period_incremental_sync skipped. Requires 3 or more zones in master zonegroup.")
+
+ # periods to include in mdlog comparison
+ mdlog_periods = [realm.current_period.id]
+
+ # create a bucket in each zone
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
+
+ zonegroup_meta_checkpoint(zonegroup)
+
+ z1, z2, z3 = zonegroup.zones[0:3]
+ assert(z1 == zonegroup.master_zone)
+
+ # kill zone 3 gateways to freeze sync status to incremental in first period
+ z3.stop()
+
+ # change master to zone 2 -> period 2
+ set_master_zone(z2)
+ mdlog_periods += [realm.current_period.id]
+
+ for zone_conn, _ in zone_bucket:
+ if zone_conn.zone == z3:
+ continue
+ bucket_name = gen_bucket_name()
+ log.info('create bucket zone=%s name=%s', zone_conn.name, bucket_name)
+ bucket = zone_conn.conn.create_bucket(bucket_name)
+ buckets.append(bucket_name)
+
+ # wait for zone 1 to sync
+ zone_meta_checkpoint(z1)
+
+ # change master back to zone 1 -> period 3
+ set_master_zone(z1)
+ mdlog_periods += [realm.current_period.id]
+
+ for zone_conn, bucket_name in zone_bucket:
+ if zone_conn.zone == z3:
+ continue
+ bucket_name = gen_bucket_name()
+ log.info('create bucket zone=%s name=%s', zone_conn.name, bucket_name)
+ zone_conn.conn.create_bucket(bucket_name)
+ buckets.append(bucket_name)
+
+ # restart zone 3 gateway and wait for sync
+ z3.start()
+ zonegroup_meta_checkpoint(zonegroup)
+
+ # verify that we end up with the same objects
+ for bucket_name in buckets:
+ for source_conn, _ in zone_bucket:
+ for target_conn in zonegroup_conns.zones:
+ if source_conn.zone == target_conn.zone:
+ continue
+
+ if target_conn.zone.has_buckets():
+ target_conn.check_bucket_eq(source_conn, bucket_name)
+
+ # verify that mdlogs are not empty and match for each period
+ for period in mdlog_periods:
+ master_mdlog = mdlog_list(z1, period)
+ assert len(master_mdlog) > 0
+ for zone in zonegroup.zones:
+ if zone == z1:
+ continue
+ mdlog = mdlog_list(zone, period)
+ assert len(mdlog) == len(master_mdlog)
+
+ # autotrim mdlogs for master zone
+ mdlog_autotrim(z1)
+
+ # autotrim mdlogs for peers
+ for zone in zonegroup.zones:
+ if zone == z1:
+ continue
+ mdlog_autotrim(zone)
+
+ # verify that mdlogs are empty for each period
+ for period in mdlog_periods:
+ for zone in zonegroup.zones:
+ mdlog = mdlog_list(zone, period)
+ assert len(mdlog) == 0
+
+def test_datalog_autotrim():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
+
+ # upload an object to each zone to generate a datalog entry
+ for zone, bucket in zone_bucket:
+ k = new_key(zone, bucket.name, 'key')
+ k.set_contents_from_string('body')
+
+ # wait for metadata and data sync to catch up
+ zonegroup_meta_checkpoint(zonegroup)
+ zonegroup_data_checkpoint(zonegroup_conns)
+
+ # trim each datalog
+ for zone, _ in zone_bucket:
+ # read max markers for each shard
+ status = datalog_status(zone.zone)
+
+ datalog_autotrim(zone.zone)
+
+ for shard_id, shard_status in enumerate(status):
+ try:
+ before_trim = dateutil.parser.isoparse(shard_status['last_update'])
+ except: # empty timestamps look like "0.000000" and will fail here
+ continue
+ entries = datalog_list(zone.zone, ['--shard-id', str(shard_id), '--max-entries', '1'])
+ if not len(entries):
+ continue
+ after_trim = dateutil.parser.isoparse(entries[0]['timestamp'])
+ assert before_trim < after_trim, "any datalog entries must be newer than trim"
+
+def test_multi_zone_redirect():
+ zonegroup = realm.master_zonegroup()
+ if len(zonegroup.rw_zones) < 2:
+ raise SkipTest("test_multi_period_incremental_sync skipped. Requires 3 or more zones in master zonegroup.")
+
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ (zc1, zc2) = zonegroup_conns.rw_zones[0:2]
+
+ z1, z2 = (zc1.zone, zc2.zone)
+
+ set_sync_from_all(z2, False)
+
+ # create a bucket on the first zone
+ bucket_name = gen_bucket_name()
+ log.info('create bucket zone=%s name=%s', z1.name, bucket_name)
+ bucket = zc1.conn.create_bucket(bucket_name)
+ obj = 'testredirect'
+
+ key = bucket.new_key(obj)
+ data = 'A'*512
+ key.set_contents_from_string(data)
+
+ zonegroup_meta_checkpoint(zonegroup)
+
+ # try to read object from second zone (should fail)
+ bucket2 = get_bucket(zc2, bucket_name)
+ assert_raises(boto.exception.S3ResponseError, bucket2.get_key, obj)
+
+ set_redirect_zone(z2, z1)
+
+ key2 = bucket2.get_key(obj)
+
+ eq(data, key2.get_contents_as_string(encoding='ascii'))
+
+ key = bucket.new_key(obj)
+
+ for x in ['a', 'b', 'c', 'd']:
+ data = x*512
+ key.set_contents_from_string(data)
+ eq(data, key2.get_contents_as_string(encoding='ascii'))
+
+ # revert config changes
+ set_sync_from_all(z2, True)
+ set_redirect_zone(z2, None)
+
+def test_zonegroup_remove():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ if len(zonegroup.zones) < 2:
+ raise SkipTest("test_zonegroup_remove skipped. Requires 2 or more zones in master zonegroup.")
+
+ zonegroup_meta_checkpoint(zonegroup)
+ z1, z2 = zonegroup.zones[0:2]
+ c1, c2 = (z1.cluster, z2.cluster)
+
+ # get admin credentials out of existing zone
+ system_key = z1.data['system_key']
+ admin_creds = Credentials(system_key['access_key'], system_key['secret_key'])
+
+ # create a new zone in zonegroup on c2 and commit
+ zone = Zone('remove', zonegroup, c2)
+ zone.create(c2, admin_creds.credential_args())
+ zonegroup.zones.append(zone)
+ zonegroup.period.update(zone, commit=True)
+
+ zonegroup.remove(c1, zone)
+
+ # another 'zonegroup remove' should fail with ENOENT
+ _, retcode = zonegroup.remove(c1, zone, check_retcode=False)
+ assert(retcode == 2) # ENOENT
+
+ # delete the new zone
+ zone.delete(c2)
+
+ # validate the resulting period
+ zonegroup.period.update(z1, commit=True)
+
+
+def test_zg_master_zone_delete():
+
+ master_zg = realm.master_zonegroup()
+ master_zone = master_zg.master_zone
+
+ assert(len(master_zg.zones) >= 1)
+ master_cluster = master_zg.zones[0].cluster
+
+ rm_zg = ZoneGroup('remove_zg')
+ rm_zg.create(master_cluster)
+
+ rm_zone = Zone('remove', rm_zg, master_cluster)
+ rm_zone.create(master_cluster)
+ master_zg.period.update(master_zone, commit=True)
+
+
+ rm_zone.delete(master_cluster)
+ # Period update: This should now fail as the zone will be the master zone
+ # in that zg
+ _, retcode = master_zg.period.update(master_zone, check_retcode=False)
+ assert(retcode == errno.EINVAL)
+
+ # Proceed to delete the zonegroup as well, previous period now does not
+ # contain a dangling master_zone, this must succeed
+ rm_zg.delete(master_cluster)
+ master_zg.period.update(master_zone, commit=True)
+
+def test_set_bucket_website():
+ buckets, zone_bucket = create_bucket_per_zone_in_realm()
+ for _, bucket in zone_bucket:
+ website_cfg = WebsiteConfiguration(suffix='index.html',error_key='error.html')
+ try:
+ bucket.set_website_configuration(website_cfg)
+ except boto.exception.S3ResponseError as e:
+ if e.error_code == 'MethodNotAllowed':
+ raise SkipTest("test_set_bucket_website skipped. Requires rgw_enable_static_website = 1.")
+ assert(bucket.get_website_configuration_with_xml()[1] == website_cfg.to_xml())
+
+def test_set_bucket_policy():
+ policy = '''{
+ "Version": "2012-10-17",
+ "Statement": [{
+ "Effect": "Allow",
+ "Principal": "*"
+ }]
+}'''
+ buckets, zone_bucket = create_bucket_per_zone_in_realm()
+ for _, bucket in zone_bucket:
+ bucket.set_policy(policy)
+ assert(bucket.get_policy().decode('ascii') == policy)
+
+def test_bucket_sync_disable():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
+ zonegroup_meta_checkpoint(zonegroup)
+
+ for bucket_name in buckets:
+ disable_bucket_sync(realm.meta_master_zone(), bucket_name)
+
+ for zone in zonegroup.zones:
+ check_buckets_sync_status_obj_not_exist(zone, buckets)
+
+ zonegroup_data_checkpoint(zonegroup_conns)
+
+def test_bucket_sync_enable_right_after_disable():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
+
+ objnames = ['obj1', 'obj2', 'obj3', 'obj4']
+ content = 'asdasd'
+
+ for zone, bucket in zone_bucket:
+ for objname in objnames:
+ k = new_key(zone, bucket.name, objname)
+ k.set_contents_from_string(content)
+
+ zonegroup_meta_checkpoint(zonegroup)
+
+ for bucket_name in buckets:
+ zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name)
+
+ for bucket_name in buckets:
+ disable_bucket_sync(realm.meta_master_zone(), bucket_name)
+ enable_bucket_sync(realm.meta_master_zone(), bucket_name)
+
+ objnames_2 = ['obj5', 'obj6', 'obj7', 'obj8']
+
+ for zone, bucket in zone_bucket:
+ for objname in objnames_2:
+ k = new_key(zone, bucket.name, objname)
+ k.set_contents_from_string(content)
+
+ for bucket_name in buckets:
+ zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name)
+
+ zonegroup_data_checkpoint(zonegroup_conns)
+
+def test_bucket_sync_disable_enable():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
+
+ objnames = [ 'obj1', 'obj2', 'obj3', 'obj4' ]
+ content = 'asdasd'
+
+ for zone, bucket in zone_bucket:
+ for objname in objnames:
+ k = new_key(zone, bucket.name, objname)
+ k.set_contents_from_string(content)
+
+ zonegroup_meta_checkpoint(zonegroup)
+
+ for bucket_name in buckets:
+ zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name)
+
+ for bucket_name in buckets:
+ disable_bucket_sync(realm.meta_master_zone(), bucket_name)
+
+ zonegroup_meta_checkpoint(zonegroup)
+
+ objnames_2 = [ 'obj5', 'obj6', 'obj7', 'obj8' ]
+
+ for zone, bucket in zone_bucket:
+ for objname in objnames_2:
+ k = new_key(zone, bucket.name, objname)
+ k.set_contents_from_string(content)
+
+ for bucket_name in buckets:
+ enable_bucket_sync(realm.meta_master_zone(), bucket_name)
+
+ for bucket_name in buckets:
+ zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name)
+
+ zonegroup_data_checkpoint(zonegroup_conns)
+
+def test_multipart_object_sync():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
+
+ _, bucket = zone_bucket[0]
+
+ # initiate a multipart upload
+ upload = bucket.initiate_multipart_upload('MULTIPART')
+ mp = boto.s3.multipart.MultiPartUpload(bucket)
+ mp.key_name = upload.key_name
+ mp.id = upload.id
+ part_size = 5 * 1024 * 1024 # 5M min part size
+ mp.upload_part_from_file(StringIO('a' * part_size), 1)
+ mp.upload_part_from_file(StringIO('b' * part_size), 2)
+ mp.upload_part_from_file(StringIO('c' * part_size), 3)
+ mp.upload_part_from_file(StringIO('d' * part_size), 4)
+ mp.complete_upload()
+
+ zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
+
+def test_encrypted_object_sync():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+
+ if len(zonegroup.rw_zones) < 2:
+ raise SkipTest("test_zonegroup_remove skipped. Requires 2 or more zones in master zonegroup.")
+
+ (zone1, zone2) = zonegroup_conns.rw_zones[0:2]
+
+ # create a bucket on the first zone
+ bucket_name = gen_bucket_name()
+ log.info('create bucket zone=%s name=%s', zone1.name, bucket_name)
+ bucket = zone1.conn.create_bucket(bucket_name)
+
+ # upload an object with sse-c encryption
+ sse_c_headers = {
+ 'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+ 'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+ 'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
+ }
+ key = bucket.new_key('testobj-sse-c')
+ data = 'A'*512
+ key.set_contents_from_string(data, headers=sse_c_headers)
+
+ # upload an object with sse-kms encryption
+ sse_kms_headers = {
+ 'x-amz-server-side-encryption': 'aws:kms',
+ # testkey-1 must be present in 'rgw crypt s3 kms encryption keys' (vstart.sh adds this)
+ 'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-1',
+ }
+ key = bucket.new_key('testobj-sse-kms')
+ key.set_contents_from_string(data, headers=sse_kms_headers)
+
+ # wait for the bucket metadata and data to sync
+ zonegroup_meta_checkpoint(zonegroup)
+ zone_bucket_checkpoint(zone2.zone, zone1.zone, bucket_name)
+
+ # read the encrypted objects from the second zone
+ bucket2 = get_bucket(zone2, bucket_name)
+ key = bucket2.get_key('testobj-sse-c', headers=sse_c_headers)
+ eq(data, key.get_contents_as_string(headers=sse_c_headers, encoding='ascii'))
+
+ key = bucket2.get_key('testobj-sse-kms')
+ eq(data, key.get_contents_as_string(encoding='ascii'))
+
+def test_bucket_index_log_trim():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+
+ zone = zonegroup_conns.rw_zones[0]
+
+ # create a test bucket, upload some objects, and wait for sync
+ def make_test_bucket():
+ name = gen_bucket_name()
+ log.info('create bucket zone=%s name=%s', zone.name, name)
+ bucket = zone.conn.create_bucket(name)
+ for objname in ('a', 'b', 'c', 'd'):
+ k = new_key(zone, name, objname)
+ k.set_contents_from_string('foo')
+ zonegroup_meta_checkpoint(zonegroup)
+ zonegroup_bucket_checkpoint(zonegroup_conns, name)
+ return bucket
+
+ # create a 'cold' bucket
+ cold_bucket = make_test_bucket()
+
+ # trim with max-buckets=0 to clear counters for cold bucket. this should
+ # prevent it from being considered 'active' by the next autotrim
+ bilog_autotrim(zone.zone, [
+ '--rgw-sync-log-trim-max-buckets', '0',
+ ])
+
+ # create an 'active' bucket
+ active_bucket = make_test_bucket()
+
+ # trim with max-buckets=1 min-cold-buckets=0 to trim active bucket only
+ bilog_autotrim(zone.zone, [
+ '--rgw-sync-log-trim-max-buckets', '1',
+ '--rgw-sync-log-trim-min-cold-buckets', '0',
+ ])
+
+ # verify active bucket has empty bilog
+ active_bilog = bilog_list(zone.zone, active_bucket.name)
+ assert(len(active_bilog) == 0)
+
+ # verify cold bucket has nonempty bilog
+ cold_bilog = bilog_list(zone.zone, cold_bucket.name)
+ assert(len(cold_bilog) > 0)
+
+ # trim with min-cold-buckets=999 to trim all buckets
+ bilog_autotrim(zone.zone, [
+ '--rgw-sync-log-trim-max-buckets', '999',
+ '--rgw-sync-log-trim-min-cold-buckets', '999',
+ ])
+
+ # verify cold bucket has empty bilog
+ cold_bilog = bilog_list(zone.zone, cold_bucket.name)
+ assert(len(cold_bilog) == 0)
+
+def test_bucket_creation_time():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+
+ zone_buckets = [zone.get_connection().get_all_buckets() for zone in zonegroup_conns.rw_zones]
+ for z1, z2 in combinations(zone_buckets, 2):
+ for a, b in zip(z1, z2):
+ eq(a.name, b.name)
+ eq(a.creation_date, b.creation_date)
diff --git a/src/test/rgw/rgw_multi/tests_az.py b/src/test/rgw/rgw_multi/tests_az.py
new file mode 100644
index 000000000..13ec832a2
--- /dev/null
+++ b/src/test/rgw/rgw_multi/tests_az.py
@@ -0,0 +1,597 @@
+import logging
+
+from nose import SkipTest
+from nose.tools import assert_not_equal, assert_equal
+
+from boto.s3.deletemarker import DeleteMarker
+
+from .tests import get_realm, \
+ ZonegroupConns, \
+ zonegroup_meta_checkpoint, \
+ zone_meta_checkpoint, \
+ zone_bucket_checkpoint, \
+ zone_data_checkpoint, \
+ zonegroup_bucket_checkpoint, \
+ check_bucket_eq, \
+ gen_bucket_name, \
+ get_user, \
+ get_tenant
+
+from .zone_az import print_connection_info
+
+
+# configure logging for the tests module
+log = logging.getLogger(__name__)
+
+
+##########################################
+# utility functions for archive zone tests
+##########################################
+
+def check_az_configured():
+ """check if at least one archive zone exist"""
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ az_zones = zonegroup.zones_by_type.get("archive")
+ if az_zones is None or len(az_zones) != 1:
+ raise SkipTest("Requires one archive zone")
+
+
+def is_az_zone(zone_conn):
+ """check if a specific zone is archive zone"""
+ if not zone_conn:
+ return False
+ return zone_conn.zone.tier_type() == "archive"
+
+
+def init_env():
+ """initialize the environment"""
+ check_az_configured()
+
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+
+ zonegroup_meta_checkpoint(zonegroup)
+
+ az_zones = []
+ zones = []
+ for conn in zonegroup_conns.zones:
+ if is_az_zone(conn):
+ zone_meta_checkpoint(conn.zone)
+ az_zones.append(conn)
+ elif not conn.zone.is_read_only():
+ zones.append(conn)
+
+ assert_not_equal(len(zones), 0)
+ assert_not_equal(len(az_zones), 0)
+ return zones, az_zones
+
+
+def zone_full_checkpoint(target_zone, source_zone):
+ zone_meta_checkpoint(target_zone)
+ zone_data_checkpoint(target_zone, source_zone)
+
+
+def check_bucket_exists_on_zone(zone, bucket_name):
+ try:
+ zone.conn.get_bucket(bucket_name)
+ except:
+ return False
+ return True
+
+
+def check_key_exists(key):
+ try:
+ key.get_contents_as_string()
+ except:
+ return False
+ return True
+
+
+def get_versioning_status(bucket):
+ res = bucket.get_versioning_status()
+ key = 'Versioning'
+ if not key in res:
+ return None
+ else:
+ return res[key]
+
+
+def get_versioned_objs(bucket):
+ b = []
+ for b_entry in bucket.list_versions():
+ if isinstance(b_entry, DeleteMarker):
+ continue
+ d = {}
+ d['version_id'] = b_entry.version_id
+ d['size'] = b_entry.size
+ d['etag'] = b_entry.etag
+ d['is_latest'] = b_entry.is_latest
+ b.append({b_entry.key:d})
+ return b
+
+
+def get_versioned_entries(bucket):
+ dm = []
+ ver = []
+ for b_entry in bucket.list_versions():
+ if isinstance(b_entry, DeleteMarker):
+ d = {}
+ d['version_id'] = b_entry.version_id
+ d['is_latest'] = b_entry.is_latest
+ dm.append({b_entry.name:d})
+ else:
+ d = {}
+ d['version_id'] = b_entry.version_id
+ d['size'] = b_entry.size
+ d['etag'] = b_entry.etag
+ d['is_latest'] = b_entry.is_latest
+ ver.append({b_entry.key:d})
+ return (dm, ver)
+
+
+def get_number_buckets_by_zone(zone):
+ return len(zone.conn.get_all_buckets())
+
+
+def get_bucket_names_by_zone(zone):
+ return [b.name for b in zone.conn.get_all_buckets()]
+
+
+def get_full_bucket_name(partial_bucket_name, bucket_names_az):
+ full_bucket_name = None
+ for bucket_name in bucket_names_az:
+ if bucket_name.startswith(partial_bucket_name):
+ full_bucket_name = bucket_name
+ break
+ return full_bucket_name
+
+
+####################
+# archive zone tests
+####################
+
+
+def test_az_info():
+ """ log information for manual testing """
+ return SkipTest("only used in manual testing")
+ zones, az_zones = init_env()
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+ bucket_name = gen_bucket_name()
+ # create bucket on the first of the rados zones
+ bucket = zones[0].create_bucket(bucket_name)
+ # create objects in the bucket
+ number_of_objects = 3
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ key.set_contents_from_string('bar')
+ print('Zonegroup: ' + zonegroup.name)
+ print('user: ' + get_user())
+ print('tenant: ' + get_tenant())
+ print('Master Zone')
+ print_connection_info(zones[0].conn)
+ print('Archive Zone')
+ print_connection_info(az_zones[0].conn)
+ print('Bucket: ' + bucket_name)
+
+
+def test_az_create_empty_bucket():
+ """ test empty bucket replication """
+ zones, az_zones = init_env()
+ bucket_name = gen_bucket_name()
+ # create bucket on the non archive zone
+ zones[0].create_bucket(bucket_name)
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # bucket exist on the archive zone
+ p = check_bucket_exists_on_zone(az_zones[0], bucket_name)
+ assert_equal(p, True)
+
+
+def test_az_check_empty_bucket_versioning():
+ """ test bucket vesioning with empty bucket """
+ zones, az_zones = init_env()
+ bucket_name = gen_bucket_name()
+ # create bucket on the non archive zone
+ bucket = zones[0].create_bucket(bucket_name)
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # get bucket on archive zone
+ bucket_az = az_zones[0].conn.get_bucket(bucket_name)
+ # check for non bucket versioning
+ p1 = get_versioning_status(bucket) is None
+ assert_equal(p1, True)
+ p2 = get_versioning_status(bucket_az) is None
+ assert_equal(p2, True)
+
+
+def test_az_object_replication():
+ """ test object replication """
+ zones, az_zones = init_env()
+ bucket_name = gen_bucket_name()
+ # create bucket on the non archive zone
+ bucket = zones[0].create_bucket(bucket_name)
+ key = bucket.new_key("foo")
+ key.set_contents_from_string("bar")
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # check object on archive zone
+ bucket_az = az_zones[0].conn.get_bucket(bucket_name)
+ key_az = bucket_az.get_key("foo")
+ p1 = key_az.get_contents_as_string(encoding='ascii') == "bar"
+ assert_equal(p1, True)
+
+
+def test_az_object_replication_versioning():
+ """ test object replication versioning """
+ zones, az_zones = init_env()
+ bucket_name = gen_bucket_name()
+ # create object on the non archive zone
+ bucket = zones[0].create_bucket(bucket_name)
+ key = bucket.new_key("foo")
+ key.set_contents_from_string("bar")
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # check object content on archive zone
+ bucket_az = az_zones[0].conn.get_bucket(bucket_name)
+ key_az = bucket_az.get_key("foo")
+ p1 = key_az.get_contents_as_string(encoding='ascii') == "bar"
+ assert_equal(p1, True)
+ # grab object versioning and etag
+ for b_version in bucket.list_versions():
+ b_version_id = b_version.version_id
+ b_version_etag = b_version.etag
+ for b_az_version in bucket_az.list_versions():
+ b_az_version_id = b_az_version.version_id
+ b_az_version_etag = b_az_version.etag
+ # check
+ p2 = b_version_id == 'null'
+ assert_equal(p2, True)
+ p3 = b_az_version_id != 'null'
+ assert_equal(p3, True)
+ p4 = b_version_etag == b_az_version_etag
+ assert_equal(p4, True)
+
+
+def test_az_lazy_activation_of_versioned_bucket():
+ """ test lazy activation of versioned bucket """
+ zones, az_zones = init_env()
+ bucket_name = gen_bucket_name()
+ # create object on the non archive zone
+ bucket = zones[0].create_bucket(bucket_name)
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # get bucket on archive zone
+ bucket_az = az_zones[0].conn.get_bucket(bucket_name)
+ # check for non bucket versioning
+ p1 = get_versioning_status(bucket) is None
+ assert_equal(p1, True)
+ p2 = get_versioning_status(bucket_az) is None
+ assert_equal(p2, True)
+ # create object on non archive zone
+ key = bucket.new_key("foo")
+ key.set_contents_from_string("bar")
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # check lazy versioned buckets
+ p3 = get_versioning_status(bucket) is None
+ assert_equal(p3, True)
+ p4 = get_versioning_status(bucket_az) == 'Enabled'
+ assert_equal(p4, True)
+
+
+def test_az_archive_zone_double_object_replication_versioning():
+ """ test archive zone double object replication versioning """
+ zones, az_zones = init_env()
+ bucket_name = gen_bucket_name()
+ # create object on the non archive zone
+ bucket = zones[0].create_bucket(bucket_name)
+ key = bucket.new_key("foo")
+ key.set_contents_from_string("bar")
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # get bucket on archive zone
+ bucket_az = az_zones[0].conn.get_bucket(bucket_name)
+ # check for non bucket versioning
+ p1 = get_versioning_status(bucket) is None
+ assert_equal(p1, True)
+ p2 = get_versioning_status(bucket_az) == 'Enabled'
+ assert_equal(p2, True)
+ # overwrite object on non archive zone
+ key = bucket.new_key("foo")
+ key.set_contents_from_string("ouch")
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # check lazy versioned buckets
+ p3 = get_versioning_status(bucket) is None
+ assert_equal(p3, True)
+ p4 = get_versioning_status(bucket_az) == 'Enabled'
+ assert_equal(p4, True)
+ # get versioned objects
+ objs = get_versioned_objs(bucket)
+ objs_az = get_versioned_objs(bucket_az)
+ # check version_id, size, and is_latest on non archive zone
+ p5 = objs[0]['foo']['version_id'] == 'null'
+ assert_equal(p5, True)
+ p6 = objs[0]['foo']['size'] == 4
+ assert_equal(p6, True)
+ p7 = objs[0]['foo']['is_latest'] == True
+ assert_equal(p7, True)
+ # check version_id, size, is_latest on archive zone
+ latest_obj_az_etag = None
+ for obj_az in objs_az:
+ current_obj_az = obj_az['foo']
+ if current_obj_az['is_latest'] == True:
+ p8 = current_obj_az['size'] == 4
+ assert_equal(p8, True)
+ latest_obj_az_etag = current_obj_az['etag']
+ else:
+ p9 = current_obj_az['size'] == 3
+ assert_equal(p9, True)
+ assert_not_equal(current_obj_az['version_id'], 'null')
+ # check last versions' etags
+ p10 = objs[0]['foo']['etag'] == latest_obj_az_etag
+ assert_equal(p10, True)
+
+
+def test_az_deleted_object_replication():
+ """ test zone deleted object replication """
+ zones, az_zones = init_env()
+ bucket_name = gen_bucket_name()
+ # create object on the non archive zone
+ bucket = zones[0].create_bucket(bucket_name)
+ key = bucket.new_key("foo")
+ key.set_contents_from_string("bar")
+ p1 = key.get_contents_as_string(encoding='ascii') == "bar"
+ assert_equal(p1, True)
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # update object on non archive zone
+ key.set_contents_from_string("soup")
+ p2 = key.get_contents_as_string(encoding='ascii') == "soup"
+ assert_equal(p2, True)
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # delete object on non archive zone
+ key.delete()
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # check object on non archive zone
+ p3 = check_key_exists(key) == False
+ assert_equal(p3, True)
+ # check objects on archive zone
+ bucket_az = az_zones[0].conn.get_bucket(bucket_name)
+ key_az = bucket_az.get_key("foo")
+ p4 = check_key_exists(key_az) == True
+ assert_equal(p4, True)
+ p5 = key_az.get_contents_as_string(encoding='ascii') == "soup"
+ assert_equal(p5, True)
+ b_ver_az = get_versioned_objs(bucket_az)
+ p6 = len(b_ver_az) == 2
+ assert_equal(p6, True)
+
+
+def test_az_bucket_renaming_on_empty_bucket_deletion():
+ """ test bucket renaming on empty bucket deletion """
+ zones, az_zones = init_env()
+ bucket_name = gen_bucket_name()
+ # grab number of buckets on non archive zone
+ num_buckets = get_number_buckets_by_zone(zones[0])
+ # grab number of buckets on archive zone
+ num_buckets_az = get_number_buckets_by_zone(az_zones[0])
+ # create bucket on non archive zone
+ bucket = zones[0].create_bucket(bucket_name)
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # delete bucket in non archive zone
+ zones[0].delete_bucket(bucket_name)
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # check no new buckets on non archive zone
+ p1 = get_number_buckets_by_zone(zones[0]) == num_buckets
+ assert_equal(p1, True)
+ # check non deletion on bucket on archive zone
+ p2 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 1)
+ assert_equal(p2, True)
+ # check bucket renaming
+ bucket_names_az = get_bucket_names_by_zone(az_zones[0])
+ new_bucket_name = bucket_name + '-deleted-'
+ p3 = any(bucket_name.startswith(new_bucket_name) for bucket_name in bucket_names_az)
+ assert_equal(p3, True)
+
+
+def test_az_old_object_version_in_archive_zone():
+ """ test old object version in archive zone """
+ zones, az_zones = init_env()
+ bucket_name = gen_bucket_name()
+ # grab number of buckets on non archive zone
+ num_buckets = get_number_buckets_by_zone(zones[0])
+ # grab number of buckets on archive zone
+ num_buckets_az = get_number_buckets_by_zone(az_zones[0])
+ # create bucket on non archive zone
+ bucket = zones[0].create_bucket(bucket_name)
+ # create object on non archive zone
+ key = bucket.new_key("foo")
+ key.set_contents_from_string("zero")
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # save object version on archive zone
+ bucket_az = az_zones[0].conn.get_bucket(bucket_name)
+ b_ver_az = get_versioned_objs(bucket_az)
+ obj_az_version_id = b_ver_az[0]['foo']['version_id']
+ # update object on non archive zone
+ key.set_contents_from_string("one")
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # delete object on non archive zone
+ key.delete()
+ # delete bucket on non archive zone
+ zones[0].delete_bucket(bucket_name)
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # check same buckets on non archive zone
+ p1 = get_number_buckets_by_zone(zones[0]) == num_buckets
+ assert_equal(p1, True)
+ # check for new bucket on archive zone
+ p2 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 1)
+ assert_equal(p2, True)
+ # get new bucket name on archive zone
+ bucket_names_az = get_bucket_names_by_zone(az_zones[0])
+ new_bucket_name_az = get_full_bucket_name(bucket_name + '-deleted-', bucket_names_az)
+ p3 = new_bucket_name_az is not None
+ assert_equal(p3, True)
+ # check number of objects on archive zone
+ new_bucket_az = az_zones[0].conn.get_bucket(new_bucket_name_az)
+ new_b_ver_az = get_versioned_objs(new_bucket_az)
+ p4 = len(new_b_ver_az) == 2
+ assert_equal(p4, True)
+ # check versioned objects on archive zone
+ new_key_az = new_bucket_az.get_key("foo", version_id=obj_az_version_id)
+ p5 = new_key_az.get_contents_as_string(encoding='ascii') == "zero"
+ assert_equal(p5, True)
+ new_key_latest_az = new_bucket_az.get_key("foo")
+ p6 = new_key_latest_az.get_contents_as_string(encoding='ascii') == "one"
+ assert_equal(p6, True)
+
+
+def test_az_force_bucket_renaming_if_same_bucket_name():
+ """ test force bucket renaming if same bucket name """
+ zones, az_zones = init_env()
+ bucket_name = gen_bucket_name()
+ # grab number of buckets on non archive zone
+ num_buckets = get_number_buckets_by_zone(zones[0])
+ # grab number of buckets on archive zone
+ num_buckets_az = get_number_buckets_by_zone(az_zones[0])
+ # create bucket on non archive zone
+ bucket = zones[0].create_bucket(bucket_name)
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # check same buckets on non archive zone
+ p1 = get_number_buckets_by_zone(zones[0]) == (num_buckets + 1)
+ assert_equal(p1, True)
+ # check for new bucket on archive zone
+ p2 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 1)
+ assert_equal(p2, True)
+ # delete bucket on non archive zone
+ zones[0].delete_bucket(bucket_name)
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # check number of buckets on non archive zone
+ p3 = get_number_buckets_by_zone(zones[0]) == num_buckets
+ assert_equal(p3, True)
+ # check number of buckets on archive zone
+ p4 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 1)
+ assert_equal(p4, True)
+ # get new bucket name on archive zone
+ bucket_names_az = get_bucket_names_by_zone(az_zones[0])
+ new_bucket_name_az = get_full_bucket_name(bucket_name + '-deleted-', bucket_names_az)
+ p5 = new_bucket_name_az is not None
+ assert_equal(p5, True)
+ # create bucket on non archive zone
+ _ = zones[0].create_bucket(new_bucket_name_az)
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # check number of buckets on non archive zone
+ p6 = get_number_buckets_by_zone(zones[0]) == (num_buckets + 1)
+ assert_equal(p6, True)
+ # check number of buckets on archive zone
+ p7 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 2)
+ assert_equal(p7, True)
+
+
+def test_az_versioning_support_in_zones():
+ """ test versioning support on zones """
+ zones, az_zones = init_env()
+ bucket_name = gen_bucket_name()
+ # create bucket on non archive zone
+ bucket = zones[0].create_bucket(bucket_name)
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # get bucket on archive zone
+ bucket_az = az_zones[0].conn.get_bucket(bucket_name)
+ # check non versioned buckets
+ p1 = get_versioning_status(bucket) is None
+ assert_equal(p1, True)
+ p2 = get_versioning_status(bucket_az) is None
+ assert_equal(p2, True)
+ # create object on non archive zone
+ key = bucket.new_key("foo")
+ key.set_contents_from_string("zero")
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # check bucket versioning
+ p3 = get_versioning_status(bucket) is None
+ assert_equal(p3, True)
+ p4 = get_versioning_status(bucket_az) == 'Enabled'
+ assert_equal(p4, True)
+ # enable bucket versioning on non archive zone
+ bucket.configure_versioning(True)
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # check bucket versioning
+ p5 = get_versioning_status(bucket) == 'Enabled'
+ assert_equal(p5, True)
+ p6 = get_versioning_status(bucket_az) == 'Enabled'
+ assert_equal(p6, True)
+ # delete object on non archive zone
+ key.delete()
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # check delete-markers and versions on non archive zone
+ (b_dm, b_ver) = get_versioned_entries(bucket)
+ p7 = len(b_dm) == 1
+ assert_equal(p7, True)
+ p8 = len(b_ver) == 1
+ assert_equal(p8, True)
+ # check delete-markers and versions on archive zone
+ (b_dm_az, b_ver_az) = get_versioned_entries(bucket_az)
+ p9 = len(b_dm_az) == 1
+ assert_equal(p9, True)
+ p10 = len(b_ver_az) == 1
+ assert_equal(p10, True)
+ # delete delete-marker on non archive zone
+ dm_version_id = b_dm[0]['foo']['version_id']
+ bucket.delete_key("foo", version_id=dm_version_id)
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # check delete-markers and versions on non archive zone
+ (b_dm, b_ver) = get_versioned_entries(bucket)
+ p11 = len(b_dm) == 0
+ assert_equal(p11, True)
+ p12 = len(b_ver) == 1
+ assert_equal(p12, True)
+ # check delete-markers and versions on archive zone
+ (b_dm_az, b_ver_az) = get_versioned_entries(bucket_az)
+ p13 = len(b_dm_az) == 1
+ assert_equal(p13, True)
+ p14 = len(b_ver_az) == 1
+ assert_equal(p14, True)
+ # delete delete-marker on archive zone
+ dm_az_version_id = b_dm_az[0]['foo']['version_id']
+ bucket_az.delete_key("foo", version_id=dm_az_version_id)
+ # sync
+ zone_full_checkpoint(az_zones[0].zone, zones[0].zone)
+ # check delete-markers and versions on non archive zone
+ (b_dm, b_ver) = get_versioned_entries(bucket)
+ p15 = len(b_dm) == 0
+ assert_equal(p15, True)
+ p16 = len(b_ver) == 1
+ assert_equal(p16, True)
+ # check delete-markers and versions on archive zone
+ (b_dm_az, b_ver_az) = get_versioned_entries(bucket_az)
+ p17 = len(b_dm_az) == 0
+ assert_equal(p17, True)
+ p17 = len(b_ver_az) == 1
+ assert_equal(p17, True)
+ # check body in zones
+ obj_version_id = b_ver[0]['foo']['version_id']
+ key = bucket.get_key("foo", version_id=obj_version_id)
+ p18 = key.get_contents_as_string(encoding='ascii') == "zero"
+ assert_equal(p18, True)
+ obj_az_version_id = b_ver_az[0]['foo']['version_id']
+ key_az = bucket_az.get_key("foo", version_id=obj_az_version_id)
+ p19 = key_az.get_contents_as_string(encoding='ascii') == "zero"
+ assert_equal(p19, True)
diff --git a/src/test/rgw/rgw_multi/tests_es.py b/src/test/rgw/rgw_multi/tests_es.py
new file mode 100644
index 000000000..08c11718b
--- /dev/null
+++ b/src/test/rgw/rgw_multi/tests_es.py
@@ -0,0 +1,276 @@
+import json
+import logging
+
+import boto
+import boto.s3.connection
+
+import datetime
+import dateutil
+
+from itertools import zip_longest # type: ignore
+
+from nose.tools import eq_ as eq
+
+from .multisite import *
+from .tests import *
+from .zone_es import *
+
+log = logging.getLogger(__name__)
+
+
+def check_es_configured():
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ es_zones = zonegroup.zones_by_type.get("elasticsearch")
+ if not es_zones:
+ raise SkipTest("Requires at least one ES zone")
+
+def is_es_zone(zone_conn):
+ if not zone_conn:
+ return False
+
+ return zone_conn.zone.tier_type() == "elasticsearch"
+
+def verify_search(bucket_name, src_keys, result_keys, f):
+ check_keys = []
+ for k in src_keys:
+ if bucket_name:
+ if bucket_name != k.bucket.name:
+ continue
+ if f(k):
+ check_keys.append(k)
+ check_keys.sort(key = lambda l: (l.bucket.name, l.name, l.version_id))
+
+ log.debug('check keys:' + dump_json(check_keys))
+ log.debug('result keys:' + dump_json(result_keys))
+
+ for k1, k2 in zip_longest(check_keys, result_keys):
+ assert k1
+ assert k2
+ check_object_eq(k1, k2)
+
+def do_check_mdsearch(conn, bucket, src_keys, req_str, src_filter):
+ if bucket:
+ bucket_name = bucket.name
+ else:
+ bucket_name = ''
+ req = MDSearch(conn, bucket_name, req_str)
+ result_keys = req.search(sort_key = lambda k: (k.bucket.name, k.name, k.version_id))
+ verify_search(bucket_name, src_keys, result_keys, src_filter)
+
+def init_env(create_obj, num_keys = 5, buckets_per_zone = 1, bucket_init_cb = None):
+ check_es_configured()
+
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns, buckets_per_zone = buckets_per_zone)
+
+ if bucket_init_cb:
+ for zone_conn, bucket in zone_bucket:
+ bucket_init_cb(zone_conn, bucket)
+
+ src_keys = []
+
+ owner = None
+
+ obj_prefix=''.join(random.choice(string.ascii_lowercase) for _ in range(6))
+
+ # don't wait for meta sync just yet
+ for zone, bucket in zone_bucket:
+ for count in range(num_keys):
+ objname = obj_prefix + str(count)
+ k = new_key(zone, bucket.name, objname)
+ # k.set_contents_from_string(content + 'x' * count)
+ if not create_obj:
+ continue
+
+ create_obj(k, count)
+
+ if not owner:
+ for list_key in bucket.list_versions():
+ owner = list_key.owner
+ break
+
+ k = bucket.get_key(k.name, version_id = k.version_id)
+ k.owner = owner # owner is not set when doing get_key()
+
+ src_keys.append(k)
+
+ zonegroup_meta_checkpoint(zonegroup)
+
+ sources = []
+ targets = []
+ for target_conn in zonegroup_conns.zones:
+ if not is_es_zone(target_conn):
+ sources.append(target_conn)
+ continue
+
+ targets.append(target_conn)
+
+ buckets = []
+ # make sure all targets are synced
+ for source_conn, bucket in zone_bucket:
+ buckets.append(bucket)
+ for target_conn in targets:
+ zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
+
+ return targets, sources, buckets, src_keys
+
+def test_es_object_search():
+ min_size = 10
+ content = 'a' * min_size
+
+ def create_obj(k, i):
+ k.set_contents_from_string(content + 'x' * i)
+
+ targets, _, buckets, src_keys = init_env(create_obj, num_keys = 5, buckets_per_zone = 2)
+
+ for target_conn in targets:
+
+ # bucket checks
+ for bucket in buckets:
+ # check name
+ do_check_mdsearch(target_conn.conn, None, src_keys , 'bucket == ' + bucket.name, lambda k: k.bucket.name == bucket.name)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'bucket == ' + bucket.name, lambda k: k.bucket.name == bucket.name)
+
+ # check on all buckets
+ for key in src_keys:
+ # limiting to checking specific key name, otherwise could get results from
+ # other runs / tests
+ do_check_mdsearch(target_conn.conn, None, src_keys , 'name == ' + key.name, lambda k: k.name == key.name)
+
+ # check on specific bucket
+ for bucket in buckets:
+ for key in src_keys:
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name < ' + key.name, lambda k: k.name < key.name)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name <= ' + key.name, lambda k: k.name <= key.name)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name == ' + key.name, lambda k: k.name == key.name)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name >= ' + key.name, lambda k: k.name >= key.name)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name > ' + key.name, lambda k: k.name > key.name)
+
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name == ' + src_keys[0].name + ' or name >= ' + src_keys[2].name,
+ lambda k: k.name == src_keys[0].name or k.name >= src_keys[2].name)
+
+ # check etag
+ for key in src_keys:
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'etag < ' + key.etag[1:-1], lambda k: k.etag < key.etag)
+ for key in src_keys:
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'etag == ' + key.etag[1:-1], lambda k: k.etag == key.etag)
+ for key in src_keys:
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'etag > ' + key.etag[1:-1], lambda k: k.etag > key.etag)
+
+ # check size
+ for key in src_keys:
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size < ' + str(key.size), lambda k: k.size < key.size)
+ for key in src_keys:
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size <= ' + str(key.size), lambda k: k.size <= key.size)
+ for key in src_keys:
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size == ' + str(key.size), lambda k: k.size == key.size)
+ for key in src_keys:
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size >= ' + str(key.size), lambda k: k.size >= key.size)
+ for key in src_keys:
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size > ' + str(key.size), lambda k: k.size > key.size)
+
+def date_from_str(s):
+ return dateutil.parser.parse(s)
+
+def test_es_object_search_custom():
+ min_size = 10
+ content = 'a' * min_size
+
+ def bucket_init(zone_conn, bucket):
+ req = MDSearchConfig(zone_conn.conn, bucket.name)
+ req.set_config('x-amz-meta-foo-str; string, x-amz-meta-foo-int; int, x-amz-meta-foo-date; date')
+
+ def create_obj(k, i):
+ date = datetime.datetime.now() + datetime.timedelta(seconds=1) * i
+ date_str = date.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
+ k.set_contents_from_string(content + 'x' * i, headers = { 'X-Amz-Meta-Foo-Str': str(i * 5),
+ 'X-Amz-Meta-Foo-Int': str(i * 5),
+ 'X-Amz-Meta-Foo-Date': date_str})
+
+ targets, _, buckets, src_keys = init_env(create_obj, num_keys = 5, buckets_per_zone = 1, bucket_init_cb = bucket_init)
+
+
+ for target_conn in targets:
+
+ # bucket checks
+ for bucket in buckets:
+ str_vals = []
+ for key in src_keys:
+ # check string values
+ val = key.get_metadata('foo-str')
+ str_vals.append(val)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str < ' + val, lambda k: k.get_metadata('foo-str') < val)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str <= ' + val, lambda k: k.get_metadata('foo-str') <= val)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str == ' + val, lambda k: k.get_metadata('foo-str') == val)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str >= ' + val, lambda k: k.get_metadata('foo-str') >= val)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str > ' + val, lambda k: k.get_metadata('foo-str') > val)
+
+ # check int values
+ sval = key.get_metadata('foo-int')
+ val = int(sval)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int < ' + sval, lambda k: int(k.get_metadata('foo-int')) < val)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int <= ' + sval, lambda k: int(k.get_metadata('foo-int')) <= val)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int == ' + sval, lambda k: int(k.get_metadata('foo-int')) == val)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int >= ' + sval, lambda k: int(k.get_metadata('foo-int')) >= val)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int > ' + sval, lambda k: int(k.get_metadata('foo-int')) > val)
+
+ # check int values
+ sval = key.get_metadata('foo-date')
+ val = date_from_str(sval)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date < ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) < val)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date <= ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) <= val)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date == ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) == val)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date >= ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) >= val)
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date > ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) > val)
+
+ # 'or' query
+ for i in range(len(src_keys) // 2):
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str <= ' + str_vals[i] + ' or x-amz-meta-foo-str >= ' + str_vals[-i],
+ lambda k: k.get_metadata('foo-str') <= str_vals[i] or k.get_metadata('foo-str') >= str_vals[-i] )
+
+ # 'and' query
+ for i in range(len(src_keys) // 2):
+ do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str >= ' + str_vals[i] + ' and x-amz-meta-foo-str <= ' + str_vals[i + 1],
+ lambda k: k.get_metadata('foo-str') >= str_vals[i] and k.get_metadata('foo-str') <= str_vals[i + 1] )
+ # more complicated query
+ for i in range(len(src_keys) // 2):
+ do_check_mdsearch(target_conn.conn, None, src_keys , 'bucket == ' + bucket.name + ' and x-amz-meta-foo-str >= ' + str_vals[i] +
+ ' and (x-amz-meta-foo-str <= ' + str_vals[i + 1] + ')',
+ lambda k: k.bucket.name == bucket.name and (k.get_metadata('foo-str') >= str_vals[i] and
+ k.get_metadata('foo-str') <= str_vals[i + 1]) )
+
+def test_es_bucket_conf():
+ min_size = 0
+
+ def bucket_init(zone_conn, bucket):
+ req = MDSearchConfig(zone_conn.conn, bucket.name)
+ req.set_config('x-amz-meta-foo-str; string, x-amz-meta-foo-int; int, x-amz-meta-foo-date; date')
+
+ targets, sources, buckets, _ = init_env(None, num_keys = 5, buckets_per_zone = 1, bucket_init_cb = bucket_init)
+
+ for source_conn in sources:
+ for bucket in buckets:
+ req = MDSearchConfig(source_conn.conn, bucket.name)
+ conf = req.get_config()
+
+ d = {}
+
+ for entry in conf:
+ d[entry['Key']] = entry['Type']
+
+ eq(len(d), 3)
+ eq(d['x-amz-meta-foo-str'], 'str')
+ eq(d['x-amz-meta-foo-int'], 'int')
+ eq(d['x-amz-meta-foo-date'], 'date')
+
+ req.del_config()
+
+ conf = req.get_config()
+
+ eq(len(conf), 0)
+
+ break # no need to iterate over all zones
diff --git a/src/test/rgw/rgw_multi/tests_ps.py b/src/test/rgw/rgw_multi/tests_ps.py
new file mode 100644
index 000000000..8db1b63b5
--- /dev/null
+++ b/src/test/rgw/rgw_multi/tests_ps.py
@@ -0,0 +1,4958 @@
+import logging
+import json
+import tempfile
+import random
+import threading
+import subprocess
+import socket
+import time
+import os
+from http import server as http_server
+from random import randint
+from .tests import get_realm, \
+ ZonegroupConns, \
+ zonegroup_meta_checkpoint, \
+ zone_meta_checkpoint, \
+ zone_bucket_checkpoint, \
+ zone_data_checkpoint, \
+ zonegroup_bucket_checkpoint, \
+ check_bucket_eq, \
+ gen_bucket_name, \
+ get_user, \
+ get_tenant
+from .zone_ps import PSTopic, \
+ PSTopicS3, \
+ PSNotification, \
+ PSSubscription, \
+ PSNotificationS3, \
+ print_connection_info, \
+ delete_all_s3_topics, \
+ put_object_tagging, \
+ get_object_tagging, \
+ delete_all_objects
+from .multisite import User
+from nose import SkipTest
+from nose.tools import assert_not_equal, assert_equal
+import boto.s3.tagging
+
+# configure logging for the tests module
+log = logging.getLogger(__name__)
+
+skip_push_tests = True
+
+####################################
+# utility functions for pubsub tests
+####################################
+
+def set_contents_from_string(key, content):
+ try:
+ key.set_contents_from_string(content)
+ except Exception as e:
+ print('Error: ' + str(e))
+
+
+# HTTP endpoint functions
+# multithreaded streaming server, based on: https://stackoverflow.com/questions/46210672/
+
+class HTTPPostHandler(http_server.BaseHTTPRequestHandler):
+ """HTTP POST hanler class storing the received events in its http server"""
+ def do_POST(self):
+ """implementation of POST handler"""
+ try:
+ content_length = int(self.headers['Content-Length'])
+ body = self.rfile.read(content_length)
+ log.info('HTTP Server (%d) received event: %s', self.server.worker_id, str(body))
+ self.server.append(json.loads(body))
+ except:
+ log.error('HTTP Server received empty event')
+ self.send_response(400)
+ else:
+ if self.headers.get('Expect') == '100-continue':
+ self.send_response(100)
+ else:
+ self.send_response(200)
+ finally:
+ if self.server.delay > 0:
+ time.sleep(self.server.delay)
+ self.end_headers()
+
+
+class HTTPServerWithEvents(http_server.HTTPServer):
+ """HTTP server used by the handler to store events"""
+ def __init__(self, addr, handler, worker_id, delay=0):
+ http_server.HTTPServer.__init__(self, addr, handler, False)
+ self.worker_id = worker_id
+ self.events = []
+ self.delay = delay
+
+ def append(self, event):
+ self.events.append(event)
+
+
+class HTTPServerThread(threading.Thread):
+ """thread for running the HTTP server. reusing the same socket for all threads"""
+ def __init__(self, i, sock, addr, delay=0):
+ threading.Thread.__init__(self)
+ self.i = i
+ self.daemon = True
+ self.httpd = HTTPServerWithEvents(addr, HTTPPostHandler, i, delay)
+ self.httpd.socket = sock
+ # prevent the HTTP server from re-binding every handler
+ self.httpd.server_bind = self.server_close = lambda self: None
+ self.start()
+
+ def run(self):
+ try:
+ log.info('HTTP Server (%d) started on: %s', self.i, self.httpd.server_address)
+ self.httpd.serve_forever()
+ log.info('HTTP Server (%d) ended', self.i)
+ except Exception as error:
+ # could happen if the server r/w to a closing socket during shutdown
+ log.info('HTTP Server (%d) ended unexpectedly: %s', self.i, str(error))
+
+ def close(self):
+ self.httpd.shutdown()
+
+ def get_events(self):
+ return self.httpd.events
+
+ def reset_events(self):
+ self.httpd.events = []
+
+
+class StreamingHTTPServer:
+ """multi-threaded http server class also holding list of events received into the handler
+ each thread has its own server, and all servers share the same socket"""
+ def __init__(self, host, port, num_workers=100, delay=0):
+ addr = (host, port)
+ self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ self.sock.bind(addr)
+ self.sock.listen(num_workers)
+ self.workers = [HTTPServerThread(i, self.sock, addr, delay) for i in range(num_workers)]
+
+ def verify_s3_events(self, keys, exact_match=False, deletions=False, expected_sizes={}):
+ """verify stored s3 records agains a list of keys"""
+ events = []
+ for worker in self.workers:
+ events += worker.get_events()
+ worker.reset_events()
+ verify_s3_records_by_elements(events, keys, exact_match=exact_match, deletions=deletions, expected_sizes=expected_sizes)
+
+ def verify_events(self, keys, exact_match=False, deletions=False):
+ """verify stored events agains a list of keys"""
+ events = []
+ for worker in self.workers:
+ events += worker.get_events()
+ worker.reset_events()
+ verify_events_by_elements(events, keys, exact_match=exact_match, deletions=deletions)
+
+ def get_and_reset_events(self):
+ events = []
+ for worker in self.workers:
+ events += worker.get_events()
+ worker.reset_events()
+ return events
+
+ def close(self):
+ """close all workers in the http server and wait for it to finish"""
+ # make sure that the shared socket is closed
+ # this is needed in case that one of the threads is blocked on the socket
+ self.sock.shutdown(socket.SHUT_RDWR)
+ self.sock.close()
+ # wait for server threads to finish
+ for worker in self.workers:
+ worker.close()
+ worker.join()
+
+
+# AMQP endpoint functions
+
+
+class AMQPReceiver(object):
+ """class for receiving and storing messages on a topic from the AMQP broker"""
+ def __init__(self, exchange, topic, external_endpoint_address=None, ca_location=None):
+ import pika
+ import ssl
+
+ if ca_location:
+ ssl_context = ssl.create_default_context()
+ ssl_context.load_verify_locations(cafile=ca_location)
+ ssl_options = pika.SSLOptions(ssl_context)
+ rabbitmq_port = 5671
+ else:
+ rabbitmq_port = 5672
+ ssl_options = None
+
+ if external_endpoint_address:
+ params = pika.URLParameters(external_endpoint_address, ssl_options=ssl_options)
+ else:
+ hostname = get_ip()
+ params = pika.ConnectionParameters(host=hostname, port=rabbitmq_port, ssl_options=ssl_options)
+ remaining_retries = 10
+ while remaining_retries > 0:
+ try:
+ connection = pika.BlockingConnection(params)
+ break
+ except Exception as error:
+ remaining_retries -= 1
+ print('failed to connect to rabbitmq (remaining retries '
+ + str(remaining_retries) + '): ' + str(error))
+ time.sleep(1)
+
+ if remaining_retries == 0:
+ raise Exception('failed to connect to rabbitmq - no retries left')
+
+ self.channel = connection.channel()
+ self.channel.exchange_declare(exchange=exchange, exchange_type='topic', durable=True)
+ result = self.channel.queue_declare('', exclusive=True)
+ queue_name = result.method.queue
+ self.channel.queue_bind(exchange=exchange, queue=queue_name, routing_key=topic)
+ self.channel.basic_consume(queue=queue_name,
+ on_message_callback=self.on_message,
+ auto_ack=True)
+ self.events = []
+ self.topic = topic
+
+ def on_message(self, ch, method, properties, body):
+ """callback invoked when a new message arrive on the topic"""
+ log.info('AMQP received event for topic %s:\n %s', self.topic, body)
+ self.events.append(json.loads(body))
+
+ # TODO create a base class for the AMQP and HTTP cases
+ def verify_s3_events(self, keys, exact_match=False, deletions=False, expected_sizes={}):
+ """verify stored s3 records agains a list of keys"""
+ verify_s3_records_by_elements(self.events, keys, exact_match=exact_match, deletions=deletions, expected_sizes=expected_sizes)
+ self.events = []
+
+ def verify_events(self, keys, exact_match=False, deletions=False):
+ """verify stored events agains a list of keys"""
+ verify_events_by_elements(self.events, keys, exact_match=exact_match, deletions=deletions)
+ self.events = []
+
+ def get_and_reset_events(self):
+ tmp = self.events
+ self.events = []
+ return tmp
+
+
+def amqp_receiver_thread_runner(receiver):
+ """main thread function for the amqp receiver"""
+ try:
+ log.info('AMQP receiver started')
+ receiver.channel.start_consuming()
+ log.info('AMQP receiver ended')
+ except Exception as error:
+ log.info('AMQP receiver ended unexpectedly: %s', str(error))
+
+
+def create_amqp_receiver_thread(exchange, topic, external_endpoint_address=None, ca_location=None):
+ """create amqp receiver and thread"""
+ receiver = AMQPReceiver(exchange, topic, external_endpoint_address, ca_location)
+ task = threading.Thread(target=amqp_receiver_thread_runner, args=(receiver,))
+ task.daemon = True
+ return task, receiver
+
+
+def stop_amqp_receiver(receiver, task):
+ """stop the receiver thread and wait for it to finis"""
+ try:
+ receiver.channel.stop_consuming()
+ log.info('stopping AMQP receiver')
+ except Exception as error:
+ log.info('failed to gracefuly stop AMQP receiver: %s', str(error))
+ task.join(5)
+
+def check_ps_configured():
+ """check if at least one pubsub zone exist"""
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ ps_zones = zonegroup.zones_by_type.get("pubsub")
+ if not ps_zones:
+ raise SkipTest("Requires at least one PS zone")
+
+
+def is_ps_zone(zone_conn):
+ """check if a specific zone is pubsub zone"""
+ if not zone_conn:
+ return False
+ return zone_conn.zone.tier_type() == "pubsub"
+
+
+def verify_events_by_elements(events, keys, exact_match=False, deletions=False):
+ """ verify there is at least one event per element """
+ err = ''
+ for key in keys:
+ key_found = False
+ if type(events) is list:
+ for event_list in events:
+ if key_found:
+ break
+ for event in event_list['events']:
+ if event['info']['bucket']['name'] == key.bucket.name and \
+ event['info']['key']['name'] == key.name:
+ if deletions and event['event'] == 'OBJECT_DELETE':
+ key_found = True
+ break
+ elif not deletions and event['event'] == 'OBJECT_CREATE':
+ key_found = True
+ break
+ else:
+ for event in events['events']:
+ if event['info']['bucket']['name'] == key.bucket.name and \
+ event['info']['key']['name'] == key.name:
+ if deletions and event['event'] == 'OBJECT_DELETE':
+ key_found = True
+ break
+ elif not deletions and event['event'] == 'OBJECT_CREATE':
+ key_found = True
+ break
+
+ if not key_found:
+ err = 'no ' + ('deletion' if deletions else 'creation') + ' event found for key: ' + str(key)
+ log.error(events)
+ assert False, err
+
+ if not len(events) == len(keys):
+ err = 'superfluous events are found'
+ log.debug(err)
+ if exact_match:
+ log.error(events)
+ assert False, err
+
+
+def verify_s3_records_by_elements(records, keys, exact_match=False, deletions=False, expected_sizes={}):
+ """ verify there is at least one record per element """
+ err = ''
+ for key in keys:
+ key_found = False
+ object_size = 0
+ if type(records) is list:
+ for record_list in records:
+ if key_found:
+ break
+ for record in record_list['Records']:
+ if record['s3']['bucket']['name'] == key.bucket.name and \
+ record['s3']['object']['key'] == key.name:
+ if deletions and record['eventName'].startswith('ObjectRemoved'):
+ key_found = True
+ object_size = record['s3']['object']['size']
+ break
+ elif not deletions and record['eventName'].startswith('ObjectCreated'):
+ key_found = True
+ object_size = record['s3']['object']['size']
+ break
+ else:
+ for record in records['Records']:
+ if record['s3']['bucket']['name'] == key.bucket.name and \
+ record['s3']['object']['key'] == key.name:
+ if deletions and record['eventName'].startswith('ObjectRemoved'):
+ key_found = True
+ object_size = record['s3']['object']['size']
+ break
+ elif not deletions and record['eventName'].startswith('ObjectCreated'):
+ key_found = True
+ object_size = record['s3']['object']['size']
+ break
+
+ if not key_found:
+ err = 'no ' + ('deletion' if deletions else 'creation') + ' event found for key: ' + str(key)
+ assert False, err
+ elif expected_sizes:
+ assert_equal(object_size, expected_sizes.get(key.name))
+
+ if not len(records) == len(keys):
+ err = 'superfluous records are found'
+ log.warning(err)
+ if exact_match:
+ for record_list in records:
+ for record in record_list['Records']:
+ log.error(str(record['s3']['bucket']['name']) + ',' + str(record['s3']['object']['key']))
+ assert False, err
+
+
+def init_rabbitmq():
+ """ start a rabbitmq broker """
+ hostname = get_ip()
+ #port = str(random.randint(20000, 30000))
+ #data_dir = './' + port + '_data'
+ #log_dir = './' + port + '_log'
+ #print('')
+ #try:
+ # os.mkdir(data_dir)
+ # os.mkdir(log_dir)
+ #except:
+ # print('rabbitmq directories already exists')
+ #env = {'RABBITMQ_NODE_PORT': port,
+ # 'RABBITMQ_NODENAME': 'rabbit'+ port + '@' + hostname,
+ # 'RABBITMQ_USE_LONGNAME': 'true',
+ # 'RABBITMQ_MNESIA_BASE': data_dir,
+ # 'RABBITMQ_LOG_BASE': log_dir}
+ # TODO: support multiple brokers per host using env
+ # make sure we don't collide with the default
+ try:
+ proc = subprocess.Popen(['sudo', '--preserve-env=RABBITMQ_CONFIG_FILE', 'rabbitmq-server'])
+ except Exception as error:
+ log.info('failed to execute rabbitmq-server: %s', str(error))
+ print('failed to execute rabbitmq-server: %s' % str(error))
+ return None
+ # TODO add rabbitmq checkpoint instead of sleep
+ time.sleep(5)
+ return proc #, port, data_dir, log_dir
+
+
+def clean_rabbitmq(proc): #, data_dir, log_dir)
+ """ stop the rabbitmq broker """
+ try:
+ subprocess.call(['sudo', 'rabbitmqctl', 'stop'])
+ time.sleep(5)
+ proc.terminate()
+ except:
+ log.info('rabbitmq server already terminated')
+ # TODO: add directory cleanup once multiple brokers are supported
+ #try:
+ # os.rmdir(data_dir)
+ # os.rmdir(log_dir)
+ #except:
+ # log.info('rabbitmq directories already removed')
+
+
+# Kafka endpoint functions
+
+kafka_server = 'localhost'
+
+class KafkaReceiver(object):
+ """class for receiving and storing messages on a topic from the kafka broker"""
+ def __init__(self, topic, security_type):
+ from kafka import KafkaConsumer
+ remaining_retries = 10
+ port = 9092
+ if security_type != 'PLAINTEXT':
+ security_type = 'SSL'
+ port = 9093
+ while remaining_retries > 0:
+ try:
+ self.consumer = KafkaConsumer(topic, bootstrap_servers = kafka_server+':'+str(port), security_protocol=security_type)
+ print('Kafka consumer created on topic: '+topic)
+ break
+ except Exception as error:
+ remaining_retries -= 1
+ print('failed to connect to kafka (remaining retries '
+ + str(remaining_retries) + '): ' + str(error))
+ time.sleep(1)
+
+ if remaining_retries == 0:
+ raise Exception('failed to connect to kafka - no retries left')
+
+ self.events = []
+ self.topic = topic
+ self.stop = False
+
+ def verify_s3_events(self, keys, exact_match=False, deletions=False):
+ """verify stored s3 records agains a list of keys"""
+ verify_s3_records_by_elements(self.events, keys, exact_match=exact_match, deletions=deletions)
+ self.events = []
+
+
+def kafka_receiver_thread_runner(receiver):
+ """main thread function for the kafka receiver"""
+ try:
+ log.info('Kafka receiver started')
+ print('Kafka receiver started')
+ while not receiver.stop:
+ for msg in receiver.consumer:
+ receiver.events.append(json.loads(msg.value))
+ timer.sleep(0.1)
+ log.info('Kafka receiver ended')
+ print('Kafka receiver ended')
+ except Exception as error:
+ log.info('Kafka receiver ended unexpectedly: %s', str(error))
+ print('Kafka receiver ended unexpectedly: ' + str(error))
+
+
+def create_kafka_receiver_thread(topic, security_type='PLAINTEXT'):
+ """create kafka receiver and thread"""
+ receiver = KafkaReceiver(topic, security_type)
+ task = threading.Thread(target=kafka_receiver_thread_runner, args=(receiver,))
+ task.daemon = True
+ return task, receiver
+
+def stop_kafka_receiver(receiver, task):
+ """stop the receiver thread and wait for it to finis"""
+ receiver.stop = True
+ task.join(1)
+ try:
+ receiver.consumer.close()
+ except Exception as error:
+ log.info('failed to gracefuly stop Kafka receiver: %s', str(error))
+
+
+# follow the instruction here to create and sign a broker certificate:
+# https://github.com/edenhill/librdkafka/wiki/Using-SSL-with-librdkafka
+
+# the generated broker certificate should be stored in the java keystore for the use of the server
+# assuming the jks files were copied to $KAFKA_DIR and broker name is "localhost"
+# following lines must be added to $KAFKA_DIR/config/server.properties
+# listeners=PLAINTEXT://localhost:9092,SSL://localhost:9093,SASL_SSL://localhost:9094
+# sasl.enabled.mechanisms=PLAIN
+# ssl.keystore.location = $KAFKA_DIR/server.keystore.jks
+# ssl.keystore.password = abcdefgh
+# ssl.key.password = abcdefgh
+# ssl.truststore.location = $KAFKA_DIR/server.truststore.jks
+# ssl.truststore.password = abcdefgh
+
+# notes:
+# (1) we dont test client authentication, hence, no need to generate client keys
+# (2) our client is not using the keystore, and the "rootCA.crt" file generated in the process above
+# should be copied to: $KAFKA_DIR
+
+def init_kafka():
+ """ start kafka/zookeeper """
+ try:
+ KAFKA_DIR = os.environ['KAFKA_DIR']
+ except:
+ KAFKA_DIR = ''
+
+ if KAFKA_DIR == '':
+ log.info('KAFKA_DIR must be set to where kafka is installed')
+ print('KAFKA_DIR must be set to where kafka is installed')
+ return None, None, None
+
+ DEVNULL = open(os.devnull, 'wb')
+
+ print('\nStarting zookeeper...')
+ try:
+ zk_proc = subprocess.Popen([KAFKA_DIR+'bin/zookeeper-server-start.sh', KAFKA_DIR+'config/zookeeper.properties'], stdout=DEVNULL)
+ except Exception as error:
+ log.info('failed to execute zookeeper: %s', str(error))
+ print('failed to execute zookeeper: %s' % str(error))
+ return None, None, None
+
+ time.sleep(5)
+ if zk_proc.poll() is not None:
+ print('zookeeper failed to start')
+ return None, None, None
+ print('Zookeeper started')
+ print('Starting kafka...')
+ kafka_log = open('./kafka.log', 'w')
+ try:
+ kafka_env = os.environ.copy()
+ kafka_env['KAFKA_OPTS']='-Djava.security.auth.login.config='+KAFKA_DIR+'config/kafka_server_jaas.conf'
+ kafka_proc = subprocess.Popen([
+ KAFKA_DIR+'bin/kafka-server-start.sh',
+ KAFKA_DIR+'config/server.properties'],
+ stdout=kafka_log,
+ env=kafka_env)
+ except Exception as error:
+ log.info('failed to execute kafka: %s', str(error))
+ print('failed to execute kafka: %s' % str(error))
+ zk_proc.terminate()
+ kafka_log.close()
+ return None, None, None
+
+ # TODO add kafka checkpoint instead of sleep
+ time.sleep(15)
+ if kafka_proc.poll() is not None:
+ zk_proc.terminate()
+ print('kafka failed to start. details in: ./kafka.log')
+ kafka_log.close()
+ return None, None, None
+
+ print('Kafka started')
+ return kafka_proc, zk_proc, kafka_log
+
+
+def clean_kafka(kafka_proc, zk_proc, kafka_log):
+ """ stop kafka/zookeeper """
+ try:
+ kafka_log.close()
+ print('Shutdown Kafka...')
+ kafka_proc.terminate()
+ time.sleep(5)
+ if kafka_proc.poll() is None:
+ print('Failed to shutdown Kafka... killing')
+ kafka_proc.kill()
+ print('Shutdown zookeeper...')
+ zk_proc.terminate()
+ time.sleep(5)
+ if zk_proc.poll() is None:
+ print('Failed to shutdown zookeeper... killing')
+ zk_proc.kill()
+ except:
+ log.info('kafka/zookeeper already terminated')
+
+
+def init_env(require_ps=True):
+ """initialize the environment"""
+ if require_ps:
+ check_ps_configured()
+
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+
+ zonegroup_meta_checkpoint(zonegroup)
+
+ ps_zone = None
+ master_zone = None
+ for conn in zonegroup_conns.zones:
+ if conn.zone == zonegroup.master_zone:
+ master_zone = conn
+ if is_ps_zone(conn):
+ zone_meta_checkpoint(conn.zone)
+ ps_zone = conn
+
+ assert_not_equal(master_zone, None)
+ if require_ps:
+ assert_not_equal(ps_zone, None)
+ return master_zone, ps_zone
+
+
+def get_ip():
+ """ This method returns the "primary" IP on the local box (the one with a default route)
+ source: https://stackoverflow.com/a/28950776/711085
+ this is needed because on the teuthology machines: socket.getfqdn()/socket.gethostname() return 127.0.0.1 """
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ try:
+ # address should not be reachable
+ s.connect(('10.255.255.255', 1))
+ ip = s.getsockname()[0]
+ finally:
+ s.close()
+ return ip
+
+
+TOPIC_SUFFIX = "_topic"
+SUB_SUFFIX = "_sub"
+NOTIFICATION_SUFFIX = "_notif"
+
+##############
+# pubsub tests
+##############
+
+def test_ps_info():
+ """ log information for manual testing """
+ return SkipTest("only used in manual testing")
+ master_zone, ps_zone = init_env()
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+ bucket_name = gen_bucket_name()
+ # create bucket on the first of the rados zones
+ bucket = master_zone.create_bucket(bucket_name)
+ # create objects in the bucket
+ number_of_objects = 10
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ key.set_contents_from_string('bar')
+ print('Zonegroup: ' + zonegroup.name)
+ print('user: ' + get_user())
+ print('tenant: ' + get_tenant())
+ print('Master Zone')
+ print_connection_info(master_zone.conn)
+ print('PubSub Zone')
+ print_connection_info(ps_zone.conn)
+ print('Bucket: ' + bucket_name)
+
+
+def test_ps_s3_notification_low_level():
+ """ test low level implementation of s3 notifications """
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ # create bucket on the first of the rados zones
+ master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ # create topic
+ topic_name = bucket_name + TOPIC_SUFFIX
+ topic_conf = PSTopic(ps_zone.conn, topic_name)
+ result, status = topic_conf.set_config()
+ assert_equal(status/100, 2)
+ parsed_result = json.loads(result)
+ topic_arn = parsed_result['arn']
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ generated_topic_name = notification_name+'_'+topic_name
+ topic_conf_list = [{'Id': notification_name,
+ 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:*']
+ }]
+ s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
+ _, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+ zone_meta_checkpoint(ps_zone.zone)
+ # get auto-generated topic
+ generated_topic_conf = PSTopic(ps_zone.conn, generated_topic_name)
+ result, status = generated_topic_conf.get_config()
+ parsed_result = json.loads(result)
+ assert_equal(status/100, 2)
+ assert_equal(parsed_result['topic']['name'], generated_topic_name)
+ # get auto-generated notification
+ notification_conf = PSNotification(ps_zone.conn, bucket_name,
+ generated_topic_name)
+ result, status = notification_conf.get_config()
+ parsed_result = json.loads(result)
+ assert_equal(status/100, 2)
+ assert_equal(len(parsed_result['topics']), 1)
+ # get auto-generated subscription
+ sub_conf = PSSubscription(ps_zone.conn, notification_name,
+ generated_topic_name)
+ result, status = sub_conf.get_config()
+ parsed_result = json.loads(result)
+ assert_equal(status/100, 2)
+ assert_equal(parsed_result['topic'], generated_topic_name)
+ # delete s3 notification
+ _, status = s3_notification_conf.del_config(notification=notification_name)
+ assert_equal(status/100, 2)
+ # delete topic
+ _, status = topic_conf.del_config()
+ assert_equal(status/100, 2)
+
+ # verify low-level cleanup
+ _, status = generated_topic_conf.get_config()
+ assert_equal(status, 404)
+ result, status = notification_conf.get_config()
+ parsed_result = json.loads(result)
+ assert_equal(len(parsed_result['topics']), 0)
+ # TODO should return 404
+ # assert_equal(status, 404)
+ result, status = sub_conf.get_config()
+ parsed_result = json.loads(result)
+ assert_equal(parsed_result['topic'], '')
+ # TODO should return 404
+ # assert_equal(status, 404)
+
+ # cleanup
+ topic_conf.del_config()
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+
+
+def test_ps_s3_notification_records():
+ """ test s3 records fetching """
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ # create bucket on the first of the rados zones
+ bucket = master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ # create topic
+ topic_name = bucket_name + TOPIC_SUFFIX
+ topic_conf = PSTopic(ps_zone.conn, topic_name)
+ result, status = topic_conf.set_config()
+ assert_equal(status/100, 2)
+ parsed_result = json.loads(result)
+ topic_arn = parsed_result['arn']
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name,
+ 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:*']
+ }]
+ s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
+ _, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+ zone_meta_checkpoint(ps_zone.zone)
+ # get auto-generated subscription
+ sub_conf = PSSubscription(ps_zone.conn, notification_name,
+ topic_name)
+ _, status = sub_conf.get_config()
+ assert_equal(status/100, 2)
+ # create objects in the bucket
+ number_of_objects = 10
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ key.set_contents_from_string('bar')
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+
+ # get the events from the subscription
+ result, _ = sub_conf.get_events()
+ records = json.loads(result)
+ for record in records['Records']:
+ log.debug(record)
+ keys = list(bucket.list())
+ # TODO: use exact match
+ verify_s3_records_by_elements(records, keys, exact_match=False)
+
+ # cleanup
+ _, status = s3_notification_conf.del_config()
+ topic_conf.del_config()
+ # delete the keys
+ for key in bucket.list():
+ key.delete()
+ master_zone.delete_bucket(bucket_name)
+
+
+def test_ps_s3_notification():
+ """ test s3 notification set/get/delete """
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ # create bucket on the first of the rados zones
+ master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ topic_name = bucket_name + TOPIC_SUFFIX
+ # create topic
+ topic_name = bucket_name + TOPIC_SUFFIX
+ topic_conf = PSTopic(ps_zone.conn, topic_name)
+ response, status = topic_conf.set_config()
+ assert_equal(status/100, 2)
+ parsed_result = json.loads(response)
+ topic_arn = parsed_result['arn']
+ # create one s3 notification
+ notification_name1 = bucket_name + NOTIFICATION_SUFFIX + '_1'
+ topic_conf_list = [{'Id': notification_name1,
+ 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:*']
+ }]
+ s3_notification_conf1 = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf1.set_config()
+ assert_equal(status/100, 2)
+ # create another s3 notification with the same topic
+ notification_name2 = bucket_name + NOTIFICATION_SUFFIX + '_2'
+ topic_conf_list = [{'Id': notification_name2,
+ 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*']
+ }]
+ s3_notification_conf2 = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf2.set_config()
+ assert_equal(status/100, 2)
+ zone_meta_checkpoint(ps_zone.zone)
+
+ # get all notification on a bucket
+ response, status = s3_notification_conf1.get_config()
+ assert_equal(status/100, 2)
+ assert_equal(len(response['TopicConfigurations']), 2)
+ assert_equal(response['TopicConfigurations'][0]['TopicArn'], topic_arn)
+ assert_equal(response['TopicConfigurations'][1]['TopicArn'], topic_arn)
+
+ # get specific notification on a bucket
+ response, status = s3_notification_conf1.get_config(notification=notification_name1)
+ assert_equal(status/100, 2)
+ assert_equal(response['NotificationConfiguration']['TopicConfiguration']['Topic'], topic_arn)
+ assert_equal(response['NotificationConfiguration']['TopicConfiguration']['Id'], notification_name1)
+ response, status = s3_notification_conf2.get_config(notification=notification_name2)
+ assert_equal(status/100, 2)
+ assert_equal(response['NotificationConfiguration']['TopicConfiguration']['Topic'], topic_arn)
+ assert_equal(response['NotificationConfiguration']['TopicConfiguration']['Id'], notification_name2)
+
+ # delete specific notifications
+ _, status = s3_notification_conf1.del_config(notification=notification_name1)
+ assert_equal(status/100, 2)
+ _, status = s3_notification_conf2.del_config(notification=notification_name2)
+ assert_equal(status/100, 2)
+
+ # cleanup
+ topic_conf.del_config()
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+
+
+def test_ps_s3_topic_on_master():
+ """ test s3 topics set/get/delete on master """
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+ bucket_name = gen_bucket_name()
+ topic_name = bucket_name + TOPIC_SUFFIX
+
+ # clean all topics
+ delete_all_s3_topics(master_zone, zonegroup.name)
+
+ # create s3 topics
+ endpoint_address = 'amqp://127.0.0.1:7001/vhost_1'
+ endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none'
+ topic_conf1 = PSTopicS3(master_zone.conn, topic_name+'_1', zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn = topic_conf1.set_config()
+ assert_equal(topic_arn,
+ 'arn:aws:sns:' + zonegroup.name + ':' + get_tenant() + ':' + topic_name + '_1')
+
+ endpoint_address = 'http://127.0.0.1:9001'
+ endpoint_args = 'push-endpoint='+endpoint_address
+ topic_conf2 = PSTopicS3(master_zone.conn, topic_name+'_2', zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn = topic_conf2.set_config()
+ assert_equal(topic_arn,
+ 'arn:aws:sns:' + zonegroup.name + ':' + get_tenant() + ':' + topic_name + '_2')
+ endpoint_address = 'http://127.0.0.1:9002'
+ endpoint_args = 'push-endpoint='+endpoint_address
+ topic_conf3 = PSTopicS3(master_zone.conn, topic_name+'_3', zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn = topic_conf3.set_config()
+ assert_equal(topic_arn,
+ 'arn:aws:sns:' + zonegroup.name + ':' + get_tenant() + ':' + topic_name + '_3')
+
+ # get topic 3
+ result, status = topic_conf3.get_config()
+ assert_equal(status, 200)
+ assert_equal(topic_arn, result['GetTopicResponse']['GetTopicResult']['Topic']['TopicArn'])
+ assert_equal(endpoint_address, result['GetTopicResponse']['GetTopicResult']['Topic']['EndPoint']['EndpointAddress'])
+ # Note that endpoint args may be ordered differently in the result
+ result = topic_conf3.get_attributes()
+ assert_equal(topic_arn, result['Attributes']['TopicArn'])
+ json_endpoint = json.loads(result['Attributes']['EndPoint'])
+ assert_equal(endpoint_address, json_endpoint['EndpointAddress'])
+
+ # delete topic 1
+ result = topic_conf1.del_config()
+ assert_equal(status, 200)
+
+ # try to get a deleted topic
+ _, status = topic_conf1.get_config()
+ assert_equal(status, 404)
+ try:
+ topic_conf1.get_attributes()
+ except:
+ print('topic already deleted - this is expected')
+ else:
+ assert False, 'topic 1 should be deleted at this point'
+
+ # get the remaining 2 topics
+ result, status = topic_conf1.get_list()
+ assert_equal(status, 200)
+ assert_equal(len(result['ListTopicsResponse']['ListTopicsResult']['Topics']['member']), 2)
+
+ # delete topics
+ result = topic_conf2.del_config()
+ # TODO: should be 200OK
+ # assert_equal(status, 200)
+ result = topic_conf3.del_config()
+ # TODO: should be 200OK
+ # assert_equal(status, 200)
+
+ # get topic list, make sure it is empty
+ result, status = topic_conf1.get_list()
+ assert_equal(result['ListTopicsResponse']['ListTopicsResult']['Topics'], None)
+
+
+def test_ps_s3_topic_with_secret_on_master():
+ """ test s3 topics with secret set/get/delete on master """
+ master_zone, _ = init_env(require_ps=False)
+ if master_zone.secure_conn is None:
+ return SkipTest('secure connection is needed to test topic with secrets')
+
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+ bucket_name = gen_bucket_name()
+ topic_name = bucket_name + TOPIC_SUFFIX
+
+ # clean all topics
+ delete_all_s3_topics(master_zone, zonegroup.name)
+
+ # create s3 topics
+ endpoint_address = 'amqp://user:password@127.0.0.1:7001'
+ endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none'
+ bad_topic_conf = PSTopicS3(master_zone.conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
+ try:
+ result = bad_topic_conf.set_config()
+ except Exception as err:
+ print('Error is expected: ' + str(err))
+ else:
+ assert False, 'user password configuration set allowed only over HTTPS'
+
+ topic_conf = PSTopicS3(master_zone.secure_conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn = topic_conf.set_config()
+
+ assert_equal(topic_arn,
+ 'arn:aws:sns:' + zonegroup.name + ':' + get_tenant() + ':' + topic_name)
+
+ _, status = bad_topic_conf.get_config()
+ assert_equal(status/100, 4)
+
+ # get topic
+ result, status = topic_conf.get_config()
+ assert_equal(status, 200)
+ assert_equal(topic_arn, result['GetTopicResponse']['GetTopicResult']['Topic']['TopicArn'])
+ assert_equal(endpoint_address, result['GetTopicResponse']['GetTopicResult']['Topic']['EndPoint']['EndpointAddress'])
+
+ _, status = bad_topic_conf.get_config()
+ assert_equal(status/100, 4)
+
+ _, status = topic_conf.get_list()
+ assert_equal(status/100, 2)
+
+ # delete topics
+ result = topic_conf.del_config()
+
+
+def test_ps_s3_notification_on_master():
+ """ test s3 notification set/get/delete on master """
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+ bucket_name = gen_bucket_name()
+ # create bucket
+ bucket = master_zone.create_bucket(bucket_name)
+ topic_name = bucket_name + TOPIC_SUFFIX
+ # create s3 topic
+ endpoint_address = 'amqp://127.0.0.1:7001'
+ endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none'
+ topic_conf1 = PSTopicS3(master_zone.conn, topic_name+'_1', zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn1 = topic_conf1.set_config()
+ topic_conf2 = PSTopicS3(master_zone.conn, topic_name+'_2', zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn2 = topic_conf2.set_config()
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name+'_1',
+ 'TopicArn': topic_arn1,
+ 'Events': ['s3:ObjectCreated:*']
+ },
+ {'Id': notification_name+'_2',
+ 'TopicArn': topic_arn1,
+ 'Events': ['s3:ObjectRemoved:*']
+ },
+ {'Id': notification_name+'_3',
+ 'TopicArn': topic_arn1,
+ 'Events': []
+ }]
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ _, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # get notifications on a bucket
+ response, status = s3_notification_conf.get_config(notification=notification_name+'_1')
+ assert_equal(status/100, 2)
+ assert_equal(response['NotificationConfiguration']['TopicConfiguration']['Topic'], topic_arn1)
+
+ # delete specific notifications
+ _, status = s3_notification_conf.del_config(notification=notification_name+'_1')
+ assert_equal(status/100, 2)
+
+ # get the remaining 2 notifications on a bucket
+ response, status = s3_notification_conf.get_config()
+ assert_equal(status/100, 2)
+ assert_equal(len(response['TopicConfigurations']), 2)
+ assert_equal(response['TopicConfigurations'][0]['TopicArn'], topic_arn1)
+ assert_equal(response['TopicConfigurations'][1]['TopicArn'], topic_arn1)
+
+ # delete remaining notifications
+ _, status = s3_notification_conf.del_config()
+ assert_equal(status/100, 2)
+
+ # make sure that the notifications are now deleted
+ response, status = s3_notification_conf.get_config()
+ try:
+ dummy = response['TopicConfigurations']
+ except:
+ print('"TopicConfigurations" is not in response')
+ else:
+ assert False, '"TopicConfigurations" should not be in response'
+
+ # create another s3 notification
+ topic_conf_list = [{'Id': notification_name+'_1',
+ 'TopicArn': topic_arn1,
+ 'Events': ['s3:ObjectCreated:*']
+ }]
+ _, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # make sure the notification and auto-genrated topic are deleted
+ response, status = topic_conf1.get_list()
+ topics = response['ListTopicsResponse']['ListTopicsResult']['Topics']['member']
+ before_delete = len(topics)
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+ response, status = topic_conf2.get_list()
+ topics = response['ListTopicsResponse']['ListTopicsResult']['Topics']['member']
+ after_delete = len(topics)
+ assert_equal(before_delete - after_delete, 3)
+
+ # cleanup
+ topic_conf1.del_config()
+ topic_conf2.del_config()
+
+
+def ps_s3_notification_filter(on_master):
+ """ test s3 notification filter on master """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ hostname = get_ip()
+ proc = init_rabbitmq()
+ if proc is None:
+ return SkipTest('end2end amqp tests require rabbitmq-server installed')
+ if on_master:
+ master_zone, _ = init_env(require_ps=False)
+ ps_zone = master_zone
+ else:
+ master_zone, ps_zone = init_env(require_ps=True)
+ ps_zone = ps_zone
+
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ topic_name = bucket_name + TOPIC_SUFFIX
+
+ # start amqp receivers
+ exchange = 'ex1'
+ task, receiver = create_amqp_receiver_thread(exchange, topic_name)
+ task.start()
+
+ # create s3 topic
+ endpoint_address = 'amqp://' + hostname
+ endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker'
+ if on_master:
+ topic_conf = PSTopicS3(ps_zone.conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn = topic_conf.set_config()
+ else:
+ topic_conf = PSTopic(ps_zone.conn, topic_name, endpoint=endpoint_address, endpoint_args=endpoint_args)
+ result, _ = topic_conf.set_config()
+ parsed_result = json.loads(result)
+ topic_arn = parsed_result['arn']
+ zone_meta_checkpoint(ps_zone.zone)
+
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name+'_1',
+ 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:*'],
+ 'Filter': {
+ 'Key': {
+ 'FilterRules': [{'Name': 'prefix', 'Value': 'hello'}]
+ }
+ }
+ },
+ {'Id': notification_name+'_2',
+ 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:*'],
+ 'Filter': {
+ 'Key': {
+ 'FilterRules': [{'Name': 'prefix', 'Value': 'world'},
+ {'Name': 'suffix', 'Value': 'log'}]
+ }
+ }
+ },
+ {'Id': notification_name+'_3',
+ 'TopicArn': topic_arn,
+ 'Events': [],
+ 'Filter': {
+ 'Key': {
+ 'FilterRules': [{'Name': 'regex', 'Value': '([a-z]+)\\.txt'}]
+ }
+ }
+ }]
+
+ s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
+ result, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ if on_master:
+ topic_conf_list = [{'Id': notification_name+'_4',
+ 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*'],
+ 'Filter': {
+ 'Metadata': {
+ 'FilterRules': [{'Name': 'x-amz-meta-foo', 'Value': 'bar'},
+ {'Name': 'x-amz-meta-hello', 'Value': 'world'}]
+ },
+ 'Key': {
+ 'FilterRules': [{'Name': 'regex', 'Value': '([a-z]+)'}]
+ }
+ }
+ }]
+
+ try:
+ s3_notification_conf4 = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
+ _, status = s3_notification_conf4.set_config()
+ assert_equal(status/100, 2)
+ skip_notif4 = False
+ except Exception as error:
+ print('note: metadata filter is not supported by boto3 - skipping test')
+ skip_notif4 = True
+ else:
+ print('filtering by attributes only supported on master zone')
+ skip_notif4 = True
+
+
+ # get all notifications
+ result, status = s3_notification_conf.get_config()
+ assert_equal(status/100, 2)
+ for conf in result['TopicConfigurations']:
+ filter_name = conf['Filter']['Key']['FilterRules'][0]['Name']
+ assert filter_name == 'prefix' or filter_name == 'suffix' or filter_name == 'regex', filter_name
+
+ if not skip_notif4:
+ result, status = s3_notification_conf4.get_config(notification=notification_name+'_4')
+ assert_equal(status/100, 2)
+ filter_name = result['NotificationConfiguration']['TopicConfiguration']['Filter']['S3Metadata']['FilterRule'][0]['Name']
+ assert filter_name == 'x-amz-meta-foo' or filter_name == 'x-amz-meta-hello'
+
+ expected_in1 = ['hello.kaboom', 'hello.txt', 'hello123.txt', 'hello']
+ expected_in2 = ['world1.log', 'world2log', 'world3.log']
+ expected_in3 = ['hello.txt', 'hell.txt', 'worldlog.txt']
+ expected_in4 = ['foo', 'bar', 'hello', 'world']
+ filtered = ['hell.kaboom', 'world.og', 'world.logg', 'he123ll.txt', 'wo', 'log', 'h', 'txt', 'world.log.txt']
+ filtered_with_attr = ['nofoo', 'nobar', 'nohello', 'noworld']
+ # create objects in bucket
+ for key_name in expected_in1:
+ key = bucket.new_key(key_name)
+ key.set_contents_from_string('bar')
+ for key_name in expected_in2:
+ key = bucket.new_key(key_name)
+ key.set_contents_from_string('bar')
+ for key_name in expected_in3:
+ key = bucket.new_key(key_name)
+ key.set_contents_from_string('bar')
+ if not skip_notif4:
+ for key_name in expected_in4:
+ key = bucket.new_key(key_name)
+ key.set_metadata('foo', 'bar')
+ key.set_metadata('hello', 'world')
+ key.set_metadata('goodbye', 'cruel world')
+ key.set_contents_from_string('bar')
+ for key_name in filtered:
+ key = bucket.new_key(key_name)
+ key.set_contents_from_string('bar')
+ for key_name in filtered_with_attr:
+ key.set_metadata('foo', 'nobar')
+ key.set_metadata('hello', 'noworld')
+ key.set_metadata('goodbye', 'cruel world')
+ key = bucket.new_key(key_name)
+ key.set_contents_from_string('bar')
+
+ if on_master:
+ print('wait for 5sec for the messages...')
+ time.sleep(5)
+ else:
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+
+ found_in1 = []
+ found_in2 = []
+ found_in3 = []
+ found_in4 = []
+
+ for event in receiver.get_and_reset_events():
+ notif_id = event['Records'][0]['s3']['configurationId']
+ key_name = event['Records'][0]['s3']['object']['key']
+ if notif_id == notification_name+'_1':
+ found_in1.append(key_name)
+ elif notif_id == notification_name+'_2':
+ found_in2.append(key_name)
+ elif notif_id == notification_name+'_3':
+ found_in3.append(key_name)
+ elif not skip_notif4 and notif_id == notification_name+'_4':
+ found_in4.append(key_name)
+ else:
+ assert False, 'invalid notification: ' + notif_id
+
+ assert_equal(set(found_in1), set(expected_in1))
+ assert_equal(set(found_in2), set(expected_in2))
+ assert_equal(set(found_in3), set(expected_in3))
+ if not skip_notif4:
+ assert_equal(set(found_in4), set(expected_in4))
+
+ # cleanup
+ s3_notification_conf.del_config()
+ if not skip_notif4:
+ s3_notification_conf4.del_config()
+ topic_conf.del_config()
+ # delete the bucket
+ for key in bucket.list():
+ key.delete()
+ master_zone.delete_bucket(bucket_name)
+ stop_amqp_receiver(receiver, task)
+ clean_rabbitmq(proc)
+
+
+def test_ps_s3_notification_filter_on_master():
+ ps_s3_notification_filter(on_master=True)
+
+
+def test_ps_s3_notification_filter():
+ ps_s3_notification_filter(on_master=False)
+
+
+def test_ps_s3_notification_errors_on_master():
+ """ test s3 notification set/get/delete on master """
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+ bucket_name = gen_bucket_name()
+ # create bucket
+ bucket = master_zone.create_bucket(bucket_name)
+ topic_name = bucket_name + TOPIC_SUFFIX
+ # create s3 topic
+ endpoint_address = 'amqp://127.0.0.1:7001'
+ endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none'
+ topic_conf = PSTopicS3(master_zone.conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn = topic_conf.set_config()
+
+ # create s3 notification with invalid event name
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name,
+ 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:Kaboom']
+ }]
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ try:
+ result, status = s3_notification_conf.set_config()
+ except Exception as error:
+ print(str(error) + ' - is expected')
+ else:
+ assert False, 'invalid event name is expected to fail'
+
+ # create s3 notification with missing name
+ topic_conf_list = [{'Id': '',
+ 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:Put']
+ }]
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ try:
+ _, _ = s3_notification_conf.set_config()
+ except Exception as error:
+ print(str(error) + ' - is expected')
+ else:
+ assert False, 'missing notification name is expected to fail'
+
+ # create s3 notification with invalid topic ARN
+ invalid_topic_arn = 'kaboom'
+ topic_conf_list = [{'Id': notification_name,
+ 'TopicArn': invalid_topic_arn,
+ 'Events': ['s3:ObjectCreated:Put']
+ }]
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ try:
+ _, _ = s3_notification_conf.set_config()
+ except Exception as error:
+ print(str(error) + ' - is expected')
+ else:
+ assert False, 'invalid ARN is expected to fail'
+
+ # create s3 notification with unknown topic ARN
+ invalid_topic_arn = 'arn:aws:sns:a::kaboom'
+ topic_conf_list = [{'Id': notification_name,
+ 'TopicArn': invalid_topic_arn ,
+ 'Events': ['s3:ObjectCreated:Put']
+ }]
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ try:
+ _, _ = s3_notification_conf.set_config()
+ except Exception as error:
+ print(str(error) + ' - is expected')
+ else:
+ assert False, 'unknown topic is expected to fail'
+
+ # create s3 notification with wrong bucket
+ topic_conf_list = [{'Id': notification_name,
+ 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:Put']
+ }]
+ s3_notification_conf = PSNotificationS3(master_zone.conn, 'kaboom', topic_conf_list)
+ try:
+ _, _ = s3_notification_conf.set_config()
+ except Exception as error:
+ print(str(error) + ' - is expected')
+ else:
+ assert False, 'unknown bucket is expected to fail'
+
+ topic_conf.del_config()
+
+ status = topic_conf.del_config()
+ # deleting an unknown notification is not considered an error
+ assert_equal(status, 200)
+
+ _, status = topic_conf.get_config()
+ assert_equal(status, 404)
+
+ # cleanup
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+
+
+def test_objcet_timing():
+ return SkipTest("only used in manual testing")
+ master_zone, _ = init_env(require_ps=False)
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ # create objects in the bucket (async)
+ print('creating objects...')
+ number_of_objects = 1000
+ client_threads = []
+ start_time = time.time()
+ content = str(bytearray(os.urandom(1024*1024)))
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ time_diff = time.time() - start_time
+ print('average time for object creation: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
+
+ print('total number of objects: ' + str(len(list(bucket.list()))))
+
+ print('deleting objects...')
+ client_threads = []
+ start_time = time.time()
+ for key in bucket.list():
+ thr = threading.Thread(target = key.delete, args=())
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ time_diff = time.time() - start_time
+ print('average time for object deletion: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
+
+ # cleanup
+ master_zone.delete_bucket(bucket_name)
+
+
+def test_ps_s3_notification_push_amqp_on_master():
+ """ test pushing amqp s3 notification on master """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ hostname = get_ip()
+ proc = init_rabbitmq()
+ if proc is None:
+ return SkipTest('end2end amqp tests require rabbitmq-server installed')
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ topic_name1 = bucket_name + TOPIC_SUFFIX + '_1'
+ topic_name2 = bucket_name + TOPIC_SUFFIX + '_2'
+
+ # start amqp receivers
+ exchange = 'ex1'
+ task1, receiver1 = create_amqp_receiver_thread(exchange, topic_name1)
+ task2, receiver2 = create_amqp_receiver_thread(exchange, topic_name2)
+ task1.start()
+ task2.start()
+
+ # create two s3 topic
+ endpoint_address = 'amqp://' + hostname
+ # with acks from broker
+ endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker'
+ topic_conf1 = PSTopicS3(master_zone.conn, topic_name1, zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn1 = topic_conf1.set_config()
+ # without acks from broker
+ endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=routable'
+ topic_conf2 = PSTopicS3(master_zone.conn, topic_name2, zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn2 = topic_conf2.set_config()
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name+'_1', 'TopicArn': topic_arn1,
+ 'Events': []
+ },
+ {'Id': notification_name+'_2', 'TopicArn': topic_arn2,
+ 'Events': ['s3:ObjectCreated:*']
+ }]
+
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # create objects in the bucket (async)
+ number_of_objects = 100
+ client_threads = []
+ start_time = time.time()
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ content = str(os.urandom(1024*1024))
+ thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ time_diff = time.time() - start_time
+ print('average time for creation + qmqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
+
+ print('wait for 5sec for the messages...')
+ time.sleep(5)
+
+ # check amqp receiver
+ keys = list(bucket.list())
+ print('total number of objects: ' + str(len(keys)))
+ receiver1.verify_s3_events(keys, exact_match=True)
+ receiver2.verify_s3_events(keys, exact_match=True)
+
+ # delete objects from the bucket
+ client_threads = []
+ start_time = time.time()
+ for key in bucket.list():
+ thr = threading.Thread(target = key.delete, args=())
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ time_diff = time.time() - start_time
+ print('average time for deletion + amqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
+
+ print('wait for 5sec for the messages...')
+ time.sleep(5)
+
+ # check amqp receiver 1 for deletions
+ receiver1.verify_s3_events(keys, exact_match=True, deletions=True)
+ # check amqp receiver 2 has no deletions
+ try:
+ receiver1.verify_s3_events(keys, exact_match=False, deletions=True)
+ except:
+ pass
+ else:
+ err = 'amqp receiver 2 should have no deletions'
+ assert False, err
+
+ # cleanup
+ stop_amqp_receiver(receiver1, task1)
+ stop_amqp_receiver(receiver2, task2)
+ s3_notification_conf.del_config()
+ topic_conf1.del_config()
+ topic_conf2.del_config()
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+ clean_rabbitmq(proc)
+
+
+def test_ps_s3_persistent_cleanup():
+ """ test reservation cleanup after gateway crash """
+ return SkipTest("only used in manual testing")
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create random port for the http server
+ host = get_ip()
+ port = random.randint(10000, 20000)
+ # start an http server in a separate thread
+ number_of_objects = 200
+ http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects)
+
+ gw = master_zone
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = gw.create_bucket(bucket_name)
+ topic_name = bucket_name + TOPIC_SUFFIX
+
+ # create s3 topic
+ endpoint_address = 'http://'+host+':'+str(port)
+ endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true'
+ topic_conf = PSTopicS3(gw.conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn = topic_conf.set_config()
+
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:Put']
+ }]
+ s3_notification_conf = PSNotificationS3(gw.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ client_threads = []
+ start_time = time.time()
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ content = str(os.urandom(1024*1024))
+ thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
+ thr.start()
+ client_threads.append(thr)
+ # stop gateway while clients are sending
+ os.system("killall -9 radosgw");
+ zonegroup.master_zone.gateways[0].stop()
+ print('wait for 10 sec for before restarting the gateway')
+ time.sleep(10)
+ zonegroup.master_zone.gateways[0].start()
+ [thr.join() for thr in client_threads]
+
+ keys = list(bucket.list())
+
+ # delete objects from the bucket
+ client_threads = []
+ start_time = time.time()
+ for key in bucket.list():
+ thr = threading.Thread(target = key.delete, args=())
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ # check http receiver
+ events = http_server.get_and_reset_events()
+
+ print(str(len(events) ) + " events found out of " + str(number_of_objects))
+
+ # make sure that things are working now
+ client_threads = []
+ start_time = time.time()
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ content = str(os.urandom(1024*1024))
+ thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ keys = list(bucket.list())
+
+ # delete objects from the bucket
+ client_threads = []
+ start_time = time.time()
+ for key in bucket.list():
+ thr = threading.Thread(target = key.delete, args=())
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ print('wait for 180 sec for reservations to be stale before queue deletion')
+ time.sleep(180)
+
+ # check http receiver
+ events = http_server.get_and_reset_events()
+
+ print(str(len(events)) + " events found out of " + str(number_of_objects))
+
+ # cleanup
+ s3_notification_conf.del_config()
+ topic_conf.del_config()
+ gw.delete_bucket(bucket_name)
+ http_server.close()
+
+
+def test_ps_s3_persistent_gateways_recovery():
+ """ test gateway recovery of persistent notifications """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+ if len(zonegroup.master_zone.gateways) < 2:
+ return SkipTest("this test requires two gateways")
+
+ # create random port for the http server
+ host = get_ip()
+ port = random.randint(10000, 20000)
+ # start an http server in a separate thread
+ number_of_objects = 10
+ http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects)
+
+ gw1 = master_zone
+ gw2 = zonegroup.master_zone.gateways[1]
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = gw1.create_bucket(bucket_name)
+ topic_name = bucket_name + TOPIC_SUFFIX
+
+ # create two s3 topics
+ endpoint_address = 'http://'+host+':'+str(port)
+ endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true'
+ topic_conf1 = PSTopicS3(gw1.conn, topic_name+'_1', zonegroup.name, endpoint_args=endpoint_args+'&OpaqueData=fromgw1')
+ topic_arn1 = topic_conf1.set_config()
+ topic_conf2 = PSTopicS3(gw2.connection, topic_name+'_2', zonegroup.name, endpoint_args=endpoint_args+'&OpaqueData=fromgw2')
+ topic_arn2 = topic_conf2.set_config()
+
+ # create two s3 notifications
+ notification_name = bucket_name + NOTIFICATION_SUFFIX+'_1'
+ topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn1,
+ 'Events': ['s3:ObjectCreated:Put']
+ }]
+ s3_notification_conf1 = PSNotificationS3(gw1.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf1.set_config()
+ assert_equal(status/100, 2)
+ notification_name = bucket_name + NOTIFICATION_SUFFIX+'_2'
+ topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn2,
+ 'Events': ['s3:ObjectRemoved:Delete']
+ }]
+ s3_notification_conf2 = PSNotificationS3(gw2.connection, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf2.set_config()
+ assert_equal(status/100, 2)
+
+ # stop gateway 2
+ print('stopping gateway2...')
+ gw2.stop()
+
+ client_threads = []
+ start_time = time.time()
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ content = str(os.urandom(1024*1024))
+ thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ keys = list(bucket.list())
+
+ # delete objects from the bucket
+ client_threads = []
+ start_time = time.time()
+ for key in bucket.list():
+ thr = threading.Thread(target = key.delete, args=())
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ print('wait for 60 sec for before restarting the gateway')
+ time.sleep(60)
+ gw2.start()
+
+ # check http receiver
+ events = http_server.get_and_reset_events()
+ for key in keys:
+ creations = 0
+ deletions = 0
+ for event in events:
+ if event['Records'][0]['eventName'] == 's3:ObjectCreated:Put' and \
+ key.name == event['Records'][0]['s3']['object']['key']:
+ creations += 1
+ elif event['Records'][0]['eventName'] == 's3:ObjectRemoved:Delete' and \
+ key.name == event['Records'][0]['s3']['object']['key']:
+ deletions += 1
+ assert_equal(creations, 1)
+ assert_equal(deletions, 1)
+
+ # cleanup
+ s3_notification_conf1.del_config()
+ topic_conf1.del_config()
+ gw1.delete_bucket(bucket_name)
+ time.sleep(10)
+ s3_notification_conf2.del_config()
+ topic_conf2.del_config()
+ http_server.close()
+
+
+def test_ps_s3_persistent_multiple_gateways():
+ """ test pushing persistent notification via two gateways """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+ if len(zonegroup.master_zone.gateways) < 2:
+ return SkipTest("this test requires two gateways")
+
+ # create random port for the http server
+ host = get_ip()
+ port = random.randint(10000, 20000)
+ # start an http server in a separate thread
+ number_of_objects = 10
+ http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects)
+
+ gw1 = master_zone
+ gw2 = zonegroup.master_zone.gateways[1]
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket1 = gw1.create_bucket(bucket_name)
+ bucket2 = gw2.connection.get_bucket(bucket_name)
+ topic_name = bucket_name + TOPIC_SUFFIX
+
+ # create two s3 topics
+ endpoint_address = 'http://'+host+':'+str(port)
+ endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true'
+ topic1_opaque = 'fromgw1'
+ topic_conf1 = PSTopicS3(gw1.conn, topic_name+'_1', zonegroup.name, endpoint_args=endpoint_args+'&OpaqueData='+topic1_opaque)
+ topic_arn1 = topic_conf1.set_config()
+ topic2_opaque = 'fromgw2'
+ topic_conf2 = PSTopicS3(gw2.connection, topic_name+'_2', zonegroup.name, endpoint_args=endpoint_args+'&OpaqueData='+topic2_opaque)
+ topic_arn2 = topic_conf2.set_config()
+
+ # create two s3 notifications
+ notification_name = bucket_name + NOTIFICATION_SUFFIX+'_1'
+ topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn1,
+ 'Events': []
+ }]
+ s3_notification_conf1 = PSNotificationS3(gw1.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf1.set_config()
+ assert_equal(status/100, 2)
+ notification_name = bucket_name + NOTIFICATION_SUFFIX+'_2'
+ topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn2,
+ 'Events': []
+ }]
+ s3_notification_conf2 = PSNotificationS3(gw2.connection, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf2.set_config()
+ assert_equal(status/100, 2)
+
+ client_threads = []
+ start_time = time.time()
+ for i in range(number_of_objects):
+ key = bucket1.new_key('gw1_'+str(i))
+ content = str(os.urandom(1024*1024))
+ thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
+ thr.start()
+ client_threads.append(thr)
+ key = bucket2.new_key('gw2_'+str(i))
+ content = str(os.urandom(1024*1024))
+ thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ keys = list(bucket1.list())
+
+ delay = 30
+ print('wait for '+str(delay)+'sec for the messages...')
+ time.sleep(delay)
+
+ events = http_server.get_and_reset_events()
+ for key in keys:
+ topic1_count = 0
+ topic2_count = 0
+ for event in events:
+ if event['Records'][0]['eventName'] == 's3:ObjectCreated:Put' and \
+ key.name == event['Records'][0]['s3']['object']['key'] and \
+ topic1_opaque == event['Records'][0]['opaqueData']:
+ topic1_count += 1
+ elif event['Records'][0]['eventName'] == 's3:ObjectCreated:Put' and \
+ key.name == event['Records'][0]['s3']['object']['key'] and \
+ topic2_opaque == event['Records'][0]['opaqueData']:
+ topic2_count += 1
+ assert_equal(topic1_count, 1)
+ assert_equal(topic2_count, 1)
+
+ # delete objects from the bucket
+ client_threads = []
+ start_time = time.time()
+ for key in bucket1.list():
+ thr = threading.Thread(target = key.delete, args=())
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ print('wait for '+str(delay)+'sec for the messages...')
+ time.sleep(delay)
+
+ events = http_server.get_and_reset_events()
+ for key in keys:
+ topic1_count = 0
+ topic2_count = 0
+ for event in events:
+ if event['Records'][0]['eventName'] == 's3:ObjectRemoved:Delete' and \
+ key.name == event['Records'][0]['s3']['object']['key'] and \
+ topic1_opaque == event['Records'][0]['opaqueData']:
+ topic1_count += 1
+ elif event['Records'][0]['eventName'] == 's3:ObjectRemoved:Delete' and \
+ key.name == event['Records'][0]['s3']['object']['key'] and \
+ topic2_opaque == event['Records'][0]['opaqueData']:
+ topic2_count += 1
+ assert_equal(topic1_count, 1)
+ assert_equal(topic2_count, 1)
+
+ # cleanup
+ s3_notification_conf1.del_config()
+ topic_conf1.del_config()
+ s3_notification_conf2.del_config()
+ topic_conf2.del_config()
+ gw1.delete_bucket(bucket_name)
+ http_server.close()
+
+
+def test_ps_s3_persistent_multiple_endpoints():
+ """ test pushing persistent notification when one of the endpoints has error """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create random port for the http server
+ host = get_ip()
+ port = random.randint(10000, 20000)
+ # start an http server in a separate thread
+ number_of_objects = 10
+ http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects)
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ topic_name = bucket_name + TOPIC_SUFFIX
+
+ # create two s3 topics
+ endpoint_address = 'http://'+host+':'+str(port)
+ endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true'
+ topic_conf1 = PSTopicS3(master_zone.conn, topic_name+'_1', zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn1 = topic_conf1.set_config()
+ endpoint_address = 'http://kaboom:9999'
+ endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true'
+ topic_conf2 = PSTopicS3(master_zone.conn, topic_name+'_2', zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn2 = topic_conf2.set_config()
+
+ # create two s3 notifications
+ notification_name = bucket_name + NOTIFICATION_SUFFIX+'_1'
+ topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn1,
+ 'Events': []
+ }]
+ s3_notification_conf1 = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf1.set_config()
+ assert_equal(status/100, 2)
+ notification_name = bucket_name + NOTIFICATION_SUFFIX+'_2'
+ topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn2,
+ 'Events': []
+ }]
+ s3_notification_conf2 = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf2.set_config()
+ assert_equal(status/100, 2)
+
+ client_threads = []
+ start_time = time.time()
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ content = str(os.urandom(1024*1024))
+ thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ keys = list(bucket.list())
+
+ delay = 30
+ print('wait for '+str(delay)+'sec for the messages...')
+ time.sleep(delay)
+
+ http_server.verify_s3_events(keys, exact_match=False, deletions=False)
+
+ # delete objects from the bucket
+ client_threads = []
+ start_time = time.time()
+ for key in bucket.list():
+ thr = threading.Thread(target = key.delete, args=())
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ print('wait for '+str(delay)+'sec for the messages...')
+ time.sleep(delay)
+
+ http_server.verify_s3_events(keys, exact_match=False, deletions=True)
+
+ # cleanup
+ s3_notification_conf1.del_config()
+ topic_conf1.del_config()
+ s3_notification_conf2.del_config()
+ topic_conf2.del_config()
+ master_zone.delete_bucket(bucket_name)
+ http_server.close()
+
+
+def persistent_notification(endpoint_type):
+ """ test pushing persistent notification """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ topic_name = bucket_name + TOPIC_SUFFIX
+
+ receiver = {}
+ host = get_ip()
+ if endpoint_type == 'http':
+ # create random port for the http server
+ port = random.randint(10000, 20000)
+ # start an http server in a separate thread
+ receiver = StreamingHTTPServer(host, port, num_workers=10)
+ endpoint_address = 'http://'+host+':'+str(port)
+ endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true'
+ # the http server does not guarantee order, so duplicates are expected
+ exact_match = False
+ elif endpoint_type == 'amqp':
+ proc = init_rabbitmq()
+ if proc is None:
+ return SkipTest('end2end amqp tests require rabbitmq-server installed')
+ # start amqp receiver
+ exchange = 'ex1'
+ task, receiver = create_amqp_receiver_thread(exchange, topic_name)
+ task.start()
+ endpoint_address = 'amqp://' + host
+ endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange='+exchange+'&amqp-ack-level=broker'+'&persistent=true'
+ # amqp broker guarantee ordering
+ exact_match = True
+ else:
+ return SkipTest('Unknown endpoint type: ' + endpoint_type)
+
+
+ # create s3 topic
+ topic_conf = PSTopicS3(master_zone.conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn = topic_conf.set_config()
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn,
+ 'Events': []
+ }]
+
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # create objects in the bucket (async)
+ number_of_objects = 100
+ client_threads = []
+ start_time = time.time()
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ content = str(os.urandom(1024*1024))
+ thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ time_diff = time.time() - start_time
+ print('average time for creation + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
+
+ keys = list(bucket.list())
+
+ delay = 40
+ print('wait for '+str(delay)+'sec for the messages...')
+ time.sleep(delay)
+
+ receiver.verify_s3_events(keys, exact_match=exact_match, deletions=False)
+
+ # delete objects from the bucket
+ client_threads = []
+ start_time = time.time()
+ for key in bucket.list():
+ thr = threading.Thread(target = key.delete, args=())
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ time_diff = time.time() - start_time
+ print('average time for deletion + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
+
+ print('wait for '+str(delay)+'sec for the messages...')
+ time.sleep(delay)
+
+ receiver.verify_s3_events(keys, exact_match=exact_match, deletions=True)
+
+ # cleanup
+ s3_notification_conf.del_config()
+ topic_conf.del_config()
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+ if endpoint_type == 'http':
+ receiver.close()
+ else:
+ stop_amqp_receiver(receiver, task)
+ clean_rabbitmq(proc)
+
+
+def test_ps_s3_persistent_notification_http():
+ """ test pushing persistent notification http """
+ persistent_notification('http')
+
+
+def test_ps_s3_persistent_notification_amqp():
+ """ test pushing persistent notification amqp """
+ persistent_notification('amqp')
+
+
+def random_string(length):
+ import string
+ letters = string.ascii_letters
+ return ''.join(random.choice(letters) for i in range(length))
+
+
+def test_ps_s3_persistent_notification_large():
+ """ test pushing persistent notification of large notifications """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ topic_name = bucket_name + TOPIC_SUFFIX
+
+ receiver = {}
+ host = get_ip()
+ proc = init_rabbitmq()
+ if proc is None:
+ return SkipTest('end2end amqp tests require rabbitmq-server installed')
+ # start amqp receiver
+ exchange = 'ex1'
+ task, receiver = create_amqp_receiver_thread(exchange, topic_name)
+ task.start()
+ endpoint_address = 'amqp://' + host
+ opaque_data = random_string(1024*2)
+ endpoint_args = 'push-endpoint='+endpoint_address+'&OpaqueData='+opaque_data+'&amqp-exchange='+exchange+'&amqp-ack-level=broker'+'&persistent=true'
+ # amqp broker guarantee ordering
+ exact_match = True
+
+ # create s3 topic
+ topic_conf = PSTopicS3(master_zone.conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn = topic_conf.set_config()
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn,
+ 'Events': []
+ }]
+
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # create objects in the bucket (async)
+ number_of_objects = 100
+ client_threads = []
+ start_time = time.time()
+ for i in range(number_of_objects):
+ key_value = random_string(63)
+ key = bucket.new_key(key_value)
+ content = str(os.urandom(1024*1024))
+ thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ time_diff = time.time() - start_time
+ print('average time for creation + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
+
+ keys = list(bucket.list())
+
+ delay = 40
+ print('wait for '+str(delay)+'sec for the messages...')
+ time.sleep(delay)
+
+ receiver.verify_s3_events(keys, exact_match=exact_match, deletions=False)
+
+ # delete objects from the bucket
+ client_threads = []
+ start_time = time.time()
+ for key in bucket.list():
+ thr = threading.Thread(target = key.delete, args=())
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ time_diff = time.time() - start_time
+ print('average time for deletion + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
+
+ print('wait for '+str(delay)+'sec for the messages...')
+ time.sleep(delay)
+
+ receiver.verify_s3_events(keys, exact_match=exact_match, deletions=True)
+
+ # cleanup
+ s3_notification_conf.del_config()
+ topic_conf.del_config()
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+ stop_amqp_receiver(receiver, task)
+ clean_rabbitmq(proc)
+
+
+def test_ps_s3_persistent_notification_pushback():
+ """ test pushing persistent notification pushback """
+ return SkipTest("only used in manual testing")
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create random port for the http server
+ host = get_ip()
+ port = random.randint(10000, 20000)
+ # start an http server in a separate thread
+ http_server = StreamingHTTPServer(host, port, num_workers=10, delay=0.5)
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ topic_name = bucket_name + TOPIC_SUFFIX
+
+ # create s3 topic
+ endpoint_address = 'http://'+host+':'+str(port)
+ endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true'
+ topic_conf = PSTopicS3(master_zone.conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn = topic_conf.set_config()
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn,
+ 'Events': []
+ }]
+
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # create objects in the bucket (async)
+ for j in range(100):
+ number_of_objects = randint(500, 1000)
+ client_threads = []
+ start_time = time.time()
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(j)+'-'+str(i))
+ content = str(os.urandom(1024*1024))
+ thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+ time_diff = time.time() - start_time
+ print('average time for creation + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
+
+ keys = list(bucket.list())
+
+ delay = 30
+ print('wait for '+str(delay)+'sec for the messages...')
+ time.sleep(delay)
+
+ # delete objects from the bucket
+ client_threads = []
+ start_time = time.time()
+ count = 0
+ for key in bucket.list():
+ count += 1
+ thr = threading.Thread(target = key.delete, args=())
+ thr.start()
+ client_threads.append(thr)
+ if count%100 == 0:
+ [thr.join() for thr in client_threads]
+ time_diff = time.time() - start_time
+ print('average time for deletion + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
+ client_threads = []
+ start_time = time.time()
+
+ print('wait for '+str(delay)+'sec for the messages...')
+ time.sleep(delay)
+
+ # cleanup
+ s3_notification_conf.del_config()
+ topic_conf.del_config()
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+ time.sleep(delay)
+ http_server.close()
+
+
+def test_ps_s3_notification_push_kafka():
+ """ test pushing kafka s3 notification on master """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ kafka_proc, zk_proc, kafka_log = init_kafka()
+ if kafka_proc is None or zk_proc is None:
+ return SkipTest('end2end kafka tests require kafka/zookeeper installed')
+
+ master_zone, ps_zone = init_env()
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ # name is constant for manual testing
+ topic_name = bucket_name+'_topic'
+ # create consumer on the topic
+ task, receiver = create_kafka_receiver_thread(topic_name)
+ task.start()
+
+ # create topic
+ topic_conf = PSTopic(ps_zone.conn, topic_name,
+ endpoint='kafka://' + kafka_server,
+ endpoint_args='kafka-ack-level=broker')
+ result, status = topic_conf.set_config()
+ assert_equal(status/100, 2)
+ parsed_result = json.loads(result)
+ topic_arn = parsed_result['arn']
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn,
+ 'Events': []
+ }]
+
+ s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # create objects in the bucket (async)
+ number_of_objects = 10
+ client_threads = []
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ content = str(os.urandom(1024*1024))
+ thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+ keys = list(bucket.list())
+ receiver.verify_s3_events(keys, exact_match=True)
+
+ # delete objects from the bucket
+ client_threads = []
+ for key in bucket.list():
+ thr = threading.Thread(target = key.delete, args=())
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+ receiver.verify_s3_events(keys, exact_match=True, deletions=True)
+
+ # cleanup
+ s3_notification_conf.del_config()
+ topic_conf.del_config()
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+ stop_kafka_receiver(receiver, task)
+ clean_kafka(kafka_proc, zk_proc, kafka_log)
+
+
+def test_ps_s3_notification_push_kafka_on_master():
+ """ test pushing kafka s3 notification on master """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ kafka_proc, zk_proc, kafka_log = init_kafka()
+ if kafka_proc is None or zk_proc is None:
+ return SkipTest('end2end kafka tests require kafka/zookeeper installed')
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ # name is constant for manual testing
+ topic_name = bucket_name+'_topic'
+ # create consumer on the topic
+ task, receiver = create_kafka_receiver_thread(topic_name+'_1')
+ task.start()
+
+ # create s3 topic
+ endpoint_address = 'kafka://' + kafka_server
+ # without acks from broker
+ endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=broker'
+ topic_conf1 = PSTopicS3(master_zone.conn, topic_name+'_1', zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn1 = topic_conf1.set_config()
+ endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=none'
+ topic_conf2 = PSTopicS3(master_zone.conn, topic_name+'_2', zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn2 = topic_conf2.set_config()
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name + '_1', 'TopicArn': topic_arn1,
+ 'Events': []
+ },
+ {'Id': notification_name + '_2', 'TopicArn': topic_arn2,
+ 'Events': []
+ }]
+
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # create objects in the bucket (async)
+ number_of_objects = 10
+ client_threads = []
+ start_time = time.time()
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ content = str(os.urandom(1024*1024))
+ thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ time_diff = time.time() - start_time
+ print('average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
+
+ print('wait for 5sec for the messages...')
+ time.sleep(5)
+ keys = list(bucket.list())
+ receiver.verify_s3_events(keys, exact_match=True)
+
+ # delete objects from the bucket
+ client_threads = []
+ start_time = time.time()
+ for key in bucket.list():
+ thr = threading.Thread(target = key.delete, args=())
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ time_diff = time.time() - start_time
+ print('average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
+
+ print('wait for 5sec for the messages...')
+ time.sleep(5)
+ receiver.verify_s3_events(keys, exact_match=True, deletions=True)
+
+ # cleanup
+ s3_notification_conf.del_config()
+ topic_conf1.del_config()
+ topic_conf2.del_config()
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+ stop_kafka_receiver(receiver, task)
+ clean_kafka(kafka_proc, zk_proc, kafka_log)
+
+
+def kafka_security(security_type):
+ """ test pushing kafka s3 notification on master """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ master_zone, _ = init_env(require_ps=False)
+ if security_type == 'SSL_SASL' and master_zone.secure_conn is None:
+ return SkipTest("secure connection is needed to test SASL_SSL security")
+ kafka_proc, zk_proc, kafka_log = init_kafka()
+ if kafka_proc is None or zk_proc is None:
+ return SkipTest('end2end kafka tests require kafka/zookeeper installed')
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ # name is constant for manual testing
+ topic_name = bucket_name+'_topic'
+ # create consumer on the topic
+ task, receiver = create_kafka_receiver_thread(topic_name)
+ task.start()
+
+ # create s3 topic
+ if security_type == 'SSL_SASL':
+ endpoint_address = 'kafka://alice:alice-secret@' + kafka_server + ':9094'
+ else:
+ # ssl only
+ endpoint_address = 'kafka://' + kafka_server + ':9093'
+
+ KAFKA_DIR = os.environ['KAFKA_DIR']
+
+ # without acks from broker, with root CA
+ endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=none&use-ssl=true&ca-location='+KAFKA_DIR+'rootCA.crt'
+
+ if security_type == 'SSL_SASL':
+ topic_conf = PSTopicS3(master_zone.secure_conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
+ else:
+ topic_conf = PSTopicS3(master_zone.conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
+
+ topic_arn = topic_conf.set_config()
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn,
+ 'Events': []
+ }]
+
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ s3_notification_conf.set_config()
+
+ # create objects in the bucket (async)
+ number_of_objects = 10
+ client_threads = []
+ start_time = time.time()
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ content = str(os.urandom(1024*1024))
+ thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ time_diff = time.time() - start_time
+ print('average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
+
+ try:
+ print('wait for 5sec for the messages...')
+ time.sleep(5)
+ keys = list(bucket.list())
+ receiver.verify_s3_events(keys, exact_match=True)
+
+ # delete objects from the bucket
+ client_threads = []
+ start_time = time.time()
+ for key in bucket.list():
+ thr = threading.Thread(target = key.delete, args=())
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ time_diff = time.time() - start_time
+ print('average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
+
+ print('wait for 5sec for the messages...')
+ time.sleep(5)
+ receiver.verify_s3_events(keys, exact_match=True, deletions=True)
+ except Exception as err:
+ assert False, str(err)
+ finally:
+ # cleanup
+ s3_notification_conf.del_config()
+ topic_conf.del_config()
+ # delete the bucket
+ for key in bucket.list():
+ key.delete()
+ master_zone.delete_bucket(bucket_name)
+ stop_kafka_receiver(receiver, task)
+ clean_kafka(kafka_proc, zk_proc, kafka_log)
+
+
+def test_ps_s3_notification_push_kafka_security_ssl():
+ kafka_security('SSL')
+
+def test_ps_s3_notification_push_kafka_security_ssl_sasl():
+ kafka_security('SSL_SASL')
+
+
+def test_ps_s3_notification_multi_delete_on_master():
+ """ test deletion of multiple keys on master """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ hostname = get_ip()
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create random port for the http server
+ host = get_ip()
+ port = random.randint(10000, 20000)
+ # start an http server in a separate thread
+ number_of_objects = 10
+ http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects)
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ topic_name = bucket_name + TOPIC_SUFFIX
+
+ # create s3 topic
+ endpoint_address = 'http://'+host+':'+str(port)
+ endpoint_args = 'push-endpoint='+endpoint_address
+ topic_conf = PSTopicS3(master_zone.conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn = topic_conf.set_config()
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name,
+ 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectRemoved:*']
+ }]
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # create objects in the bucket
+ client_threads = []
+ objects_size = {}
+ for i in range(number_of_objects):
+ content = str(os.urandom(randint(1, 1024)))
+ object_size = len(content)
+ key = bucket.new_key(str(i))
+ objects_size[key.name] = object_size
+ thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ keys = list(bucket.list())
+
+ start_time = time.time()
+ delete_all_objects(master_zone.conn, bucket_name)
+ time_diff = time.time() - start_time
+ print('average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
+
+ print('wait for 5sec for the messages...')
+ time.sleep(5)
+
+ # check http receiver
+ http_server.verify_s3_events(keys, exact_match=True, deletions=True, expected_sizes=objects_size)
+
+ # cleanup
+ topic_conf.del_config()
+ s3_notification_conf.del_config(notification=notification_name)
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+ http_server.close()
+
+
+def test_ps_s3_notification_push_http_on_master():
+ """ test pushing http s3 notification on master """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ hostname = get_ip()
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create random port for the http server
+ host = get_ip()
+ port = random.randint(10000, 20000)
+ # start an http server in a separate thread
+ number_of_objects = 10
+ http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects)
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ topic_name = bucket_name + TOPIC_SUFFIX
+
+ # create s3 topic
+ endpoint_address = 'http://'+host+':'+str(port)
+ endpoint_args = 'push-endpoint='+endpoint_address
+ topic_conf = PSTopicS3(master_zone.conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn = topic_conf.set_config()
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name,
+ 'TopicArn': topic_arn,
+ 'Events': []
+ }]
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # create objects in the bucket
+ client_threads = []
+ objects_size = {}
+ start_time = time.time()
+ for i in range(number_of_objects):
+ content = str(os.urandom(randint(1, 1024)))
+ object_size = len(content)
+ key = bucket.new_key(str(i))
+ objects_size[key.name] = object_size
+ thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ time_diff = time.time() - start_time
+ print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
+
+ print('wait for 5sec for the messages...')
+ time.sleep(5)
+
+ # check http receiver
+ keys = list(bucket.list())
+ http_server.verify_s3_events(keys, exact_match=True, deletions=False, expected_sizes=objects_size)
+
+ # delete objects from the bucket
+ client_threads = []
+ start_time = time.time()
+ for key in bucket.list():
+ thr = threading.Thread(target = key.delete, args=())
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ time_diff = time.time() - start_time
+ print('average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
+
+ print('wait for 5sec for the messages...')
+ time.sleep(5)
+
+ # check http receiver
+ http_server.verify_s3_events(keys, exact_match=True, deletions=True, expected_sizes=objects_size)
+
+ # cleanup
+ topic_conf.del_config()
+ s3_notification_conf.del_config(notification=notification_name)
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+ http_server.close()
+
+
+def test_ps_s3_opaque_data():
+ """ test that opaque id set in topic, is sent in notification """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ hostname = get_ip()
+ master_zone, ps_zone = init_env()
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create random port for the http server
+ host = get_ip()
+ port = random.randint(10000, 20000)
+ # start an http server in a separate thread
+ number_of_objects = 10
+ http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects)
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ topic_name = bucket_name + TOPIC_SUFFIX
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+
+ # create s3 topic
+ endpoint_address = 'http://'+host+':'+str(port)
+ opaque_data = 'http://1.2.3.4:8888'
+ endpoint_args = 'push-endpoint='+endpoint_address+'&OpaqueData='+opaque_data
+ topic_conf = PSTopic(ps_zone.conn, topic_name, endpoint=endpoint_address, endpoint_args=endpoint_args)
+ result, status = topic_conf.set_config()
+ assert_equal(status/100, 2)
+ parsed_result = json.loads(result)
+ topic_arn = parsed_result['arn']
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name,
+ 'TopicArn': topic_arn,
+ 'Events': []
+ }]
+ s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # create objects in the bucket
+ client_threads = []
+ content = 'bar'
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+
+ # check http receiver
+ keys = list(bucket.list())
+ print('total number of objects: ' + str(len(keys)))
+ events = http_server.get_and_reset_events()
+ for event in events:
+ assert_equal(event['Records'][0]['opaqueData'], opaque_data)
+
+ # cleanup
+ for key in keys:
+ key.delete()
+ [thr.join() for thr in client_threads]
+ topic_conf.del_config()
+ s3_notification_conf.del_config(notification=notification_name)
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+ http_server.close()
+
+
+def test_ps_s3_opaque_data_on_master():
+ """ test that opaque id set in topic, is sent in notification on master """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ hostname = get_ip()
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create random port for the http server
+ host = get_ip()
+ port = random.randint(10000, 20000)
+ # start an http server in a separate thread
+ number_of_objects = 10
+ http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects)
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ topic_name = bucket_name + TOPIC_SUFFIX
+
+ # create s3 topic
+ endpoint_address = 'http://'+host+':'+str(port)
+ endpoint_args = 'push-endpoint='+endpoint_address
+ opaque_data = 'http://1.2.3.4:8888'
+ topic_conf = PSTopicS3(master_zone.conn, topic_name, zonegroup.name, endpoint_args=endpoint_args, opaque_data=opaque_data)
+ topic_arn = topic_conf.set_config()
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name,
+ 'TopicArn': topic_arn,
+ 'Events': []
+ }]
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # create objects in the bucket
+ client_threads = []
+ start_time = time.time()
+ content = 'bar'
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ thr = threading.Thread(target = set_contents_from_string, args=(key, content,))
+ thr.start()
+ client_threads.append(thr)
+ [thr.join() for thr in client_threads]
+
+ time_diff = time.time() - start_time
+ print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
+
+ print('wait for 5sec for the messages...')
+ time.sleep(5)
+
+ # check http receiver
+ keys = list(bucket.list())
+ print('total number of objects: ' + str(len(keys)))
+ events = http_server.get_and_reset_events()
+ for event in events:
+ assert_equal(event['Records'][0]['opaqueData'], opaque_data)
+
+ # cleanup
+ for key in keys:
+ key.delete()
+ [thr.join() for thr in client_threads]
+ topic_conf.del_config()
+ s3_notification_conf.del_config(notification=notification_name)
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+ http_server.close()
+
+def test_ps_topic():
+ """ test set/get/delete of topic """
+ _, ps_zone = init_env()
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+ bucket_name = gen_bucket_name()
+ topic_name = bucket_name+TOPIC_SUFFIX
+
+ # create topic
+ topic_conf = PSTopic(ps_zone.conn, topic_name)
+ _, status = topic_conf.set_config()
+ assert_equal(status/100, 2)
+ # get topic
+ result, _ = topic_conf.get_config()
+ # verify topic content
+ parsed_result = json.loads(result)
+ assert_equal(parsed_result['topic']['name'], topic_name)
+ assert_equal(len(parsed_result['subs']), 0)
+ assert_equal(parsed_result['topic']['arn'],
+ 'arn:aws:sns:' + zonegroup.name + ':' + get_tenant() + ':' + topic_name)
+ # delete topic
+ _, status = topic_conf.del_config()
+ assert_equal(status/100, 2)
+ # verift topic is deleted
+ result, status = topic_conf.get_config()
+ assert_equal(status, 404)
+ parsed_result = json.loads(result)
+ assert_equal(parsed_result['Code'], 'NoSuchKey')
+
+
+def test_ps_topic_with_endpoint():
+ """ test set topic with endpoint"""
+ _, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ topic_name = bucket_name+TOPIC_SUFFIX
+
+ # create topic
+ dest_endpoint = 'amqp://localhost:7001'
+ dest_args = 'amqp-exchange=amqp.direct&amqp-ack-level=none'
+ topic_conf = PSTopic(ps_zone.conn, topic_name,
+ endpoint=dest_endpoint,
+ endpoint_args=dest_args)
+ _, status = topic_conf.set_config()
+ assert_equal(status/100, 2)
+ # get topic
+ result, _ = topic_conf.get_config()
+ # verify topic content
+ parsed_result = json.loads(result)
+ assert_equal(parsed_result['topic']['name'], topic_name)
+ assert_equal(parsed_result['topic']['dest']['push_endpoint'], dest_endpoint)
+ # cleanup
+ topic_conf.del_config()
+
+
+def test_ps_notification():
+ """ test set/get/delete of notification """
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ topic_name = bucket_name+TOPIC_SUFFIX
+
+ # create topic
+ topic_conf = PSTopic(ps_zone.conn, topic_name)
+ topic_conf.set_config()
+ # create bucket on the first of the rados zones
+ master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ # create notifications
+ notification_conf = PSNotification(ps_zone.conn, bucket_name,
+ topic_name)
+ _, status = notification_conf.set_config()
+ assert_equal(status/100, 2)
+ # get notification
+ result, _ = notification_conf.get_config()
+ parsed_result = json.loads(result)
+ assert_equal(len(parsed_result['topics']), 1)
+ assert_equal(parsed_result['topics'][0]['topic']['name'],
+ topic_name)
+ # delete notification
+ _, status = notification_conf.del_config()
+ assert_equal(status/100, 2)
+ result, status = notification_conf.get_config()
+ parsed_result = json.loads(result)
+ assert_equal(len(parsed_result['topics']), 0)
+ # TODO should return 404
+ # assert_equal(status, 404)
+
+ # cleanup
+ topic_conf.del_config()
+ master_zone.delete_bucket(bucket_name)
+
+
+def test_ps_notification_events():
+ """ test set/get/delete of notification on specific events"""
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ topic_name = bucket_name+TOPIC_SUFFIX
+
+ # create topic
+ topic_conf = PSTopic(ps_zone.conn, topic_name)
+ topic_conf.set_config()
+ # create bucket on the first of the rados zones
+ master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ # create notifications
+ events = "OBJECT_CREATE,OBJECT_DELETE"
+ notification_conf = PSNotification(ps_zone.conn, bucket_name,
+ topic_name,
+ events)
+ _, status = notification_conf.set_config()
+ assert_equal(status/100, 2)
+ # get notification
+ result, _ = notification_conf.get_config()
+ parsed_result = json.loads(result)
+ assert_equal(len(parsed_result['topics']), 1)
+ assert_equal(parsed_result['topics'][0]['topic']['name'],
+ topic_name)
+ assert_not_equal(len(parsed_result['topics'][0]['events']), 0)
+ # TODO add test for invalid event name
+
+ # cleanup
+ notification_conf.del_config()
+ topic_conf.del_config()
+ master_zone.delete_bucket(bucket_name)
+
+
+def test_ps_subscription():
+ """ test set/get/delete of subscription """
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ topic_name = bucket_name+TOPIC_SUFFIX
+
+ # create topic
+ topic_conf = PSTopic(ps_zone.conn, topic_name)
+ topic_conf.set_config()
+ # create bucket on the first of the rados zones
+ bucket = master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ # create notifications
+ notification_conf = PSNotification(ps_zone.conn, bucket_name,
+ topic_name)
+ _, status = notification_conf.set_config()
+ assert_equal(status/100, 2)
+ # create subscription
+ sub_conf = PSSubscription(ps_zone.conn, bucket_name+SUB_SUFFIX,
+ topic_name)
+ _, status = sub_conf.set_config()
+ assert_equal(status/100, 2)
+ # get the subscription
+ result, _ = sub_conf.get_config()
+ parsed_result = json.loads(result)
+ assert_equal(parsed_result['topic'], topic_name)
+ # create objects in the bucket
+ number_of_objects = 10
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ key.set_contents_from_string('bar')
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+
+ # get the create events from the subscription
+ result, _ = sub_conf.get_events()
+ events = json.loads(result)
+ for event in events['events']:
+ log.debug('Event: objname: "' + str(event['info']['key']['name']) + '" type: "' + str(event['event']) + '"')
+ keys = list(bucket.list())
+ # TODO: use exact match
+ verify_events_by_elements(events, keys, exact_match=False)
+ # delete objects from the bucket
+ for key in bucket.list():
+ key.delete()
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+
+ # get the delete events from the subscriptions
+ #result, _ = sub_conf.get_events()
+ #for event in events['events']:
+ # log.debug('Event: objname: "' + str(event['info']['key']['name']) + '" type: "' + str(event['event']) + '"')
+ # TODO: check deletions
+ # TODO: use exact match
+ # verify_events_by_elements(events, keys, exact_match=False, deletions=True)
+ # we should see the creations as well as the deletions
+ # delete subscription
+ _, status = sub_conf.del_config()
+ assert_equal(status/100, 2)
+ result, status = sub_conf.get_config()
+ parsed_result = json.loads(result)
+ assert_equal(parsed_result['topic'], '')
+ # TODO should return 404
+ # assert_equal(status, 404)
+
+ # cleanup
+ notification_conf.del_config()
+ topic_conf.del_config()
+ master_zone.delete_bucket(bucket_name)
+
+
+def test_ps_admin():
+ """ test radosgw-admin commands """
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ topic_name = bucket_name+TOPIC_SUFFIX
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create topic
+ topic_conf = PSTopic(ps_zone.conn, topic_name)
+ topic_conf.set_config()
+ result, status = topic_conf.get_config()
+ assert_equal(status, 200)
+ parsed_result = json.loads(result)
+ assert_equal(parsed_result['topic']['name'], topic_name)
+ result, status = ps_zone.zone.cluster.admin(['topic', 'list', '--uid', get_user()] + ps_zone.zone.zone_arg())
+ assert_equal(status, 0)
+ parsed_result = json.loads(result)
+ assert len(parsed_result['topics']) > 0
+ result, status = ps_zone.zone.cluster.admin(['topic', 'get', '--uid', get_user(), '--topic', topic_name] + ps_zone.zone.zone_arg())
+ assert_equal(status, 0)
+ parsed_result = json.loads(result)
+ assert_equal(parsed_result['topic']['name'], topic_name)
+
+ # create s3 topics
+ endpoint_address = 'amqp://127.0.0.1:7001/vhost_1'
+ endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none'
+ topic_conf_s3 = PSTopicS3(master_zone.conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
+ topic_conf_s3.set_config()
+ result, status = topic_conf_s3.get_config()
+ assert_equal(status, 200)
+ assert_equal(result['GetTopicResponse']['GetTopicResult']['Topic']['Name'], topic_name)
+ result, status = master_zone.zone.cluster.admin(['topic', 'list', '--uid', get_user()] + master_zone.zone.zone_arg())
+ assert_equal(status, 0)
+ parsed_result = json.loads(result)
+ assert len(parsed_result['topics']) > 0
+ result, status = master_zone.zone.cluster.admin(['topic', 'get', '--uid', get_user(), '--topic', topic_name] + master_zone.zone.zone_arg())
+ assert_equal(status, 0)
+ parsed_result = json.loads(result)
+ assert_equal(parsed_result['topic']['name'], topic_name)
+
+ # create bucket on the first of the rados zones
+ bucket = master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ # create notifications
+ notification_conf = PSNotification(ps_zone.conn, bucket_name,
+ topic_name)
+ _, status = notification_conf.set_config()
+ assert_equal(status/100, 2)
+ # create subscription
+ sub_conf = PSSubscription(ps_zone.conn, bucket_name+SUB_SUFFIX,
+ topic_name)
+ _, status = sub_conf.set_config()
+ assert_equal(status/100, 2)
+ result, status = ps_zone.zone.cluster.admin(['subscription', 'get', '--uid', get_user(), '--subscription', bucket_name+SUB_SUFFIX]
+ + ps_zone.zone.zone_arg())
+ assert_equal(status, 0)
+ parsed_result = json.loads(result)
+ assert_equal(parsed_result['name'], bucket_name+SUB_SUFFIX)
+ # create objects in the bucket
+ number_of_objects = 110
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ key.set_contents_from_string('bar')
+ # wait for sync
+ # get events from subscription
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+ result, status = ps_zone.zone.cluster.admin(['subscription', 'pull', '--uid', get_user(), '--subscription', bucket_name+SUB_SUFFIX]
+ + ps_zone.zone.zone_arg())
+ assert_equal(status, 0)
+ parsed_result = json.loads(result)
+ marker = parsed_result['next_marker']
+ events1 = parsed_result['events']
+ result, status = ps_zone.zone.cluster.admin(['subscription', 'pull', '--uid', get_user(), '--subscription', bucket_name+SUB_SUFFIX, '--marker', marker]
+ + ps_zone.zone.zone_arg())
+ assert_equal(status, 0)
+ parsed_result = json.loads(result)
+ events2 = parsed_result['events']
+
+ keys = list(bucket.list())
+ verify_events_by_elements({"events": events1+events2}, keys, exact_match=False)
+
+ # ack an event in the subscription
+ result, status = ps_zone.zone.cluster.admin(['subscription', 'ack', '--uid', get_user(), '--subscription', bucket_name+SUB_SUFFIX, '--event-id', events2[0]['id']]
+ + ps_zone.zone.zone_arg())
+ assert_equal(status, 0)
+
+ # remove the subscription
+ result, status = ps_zone.zone.cluster.admin(['subscription', 'rm', '--uid', get_user(), '--subscription', bucket_name+SUB_SUFFIX]
+ + ps_zone.zone.zone_arg())
+ assert_equal(status, 0)
+
+ # remove the topics
+ result, status = ps_zone.zone.cluster.admin(['topic', 'rm', '--uid', get_user(), '--topic', topic_name]
+ + ps_zone.zone.zone_arg())
+ assert_equal(status, 0)
+ result, status = master_zone.zone.cluster.admin(['topic', 'rm', '--uid', get_user(), '--topic', topic_name]
+ + master_zone.zone.zone_arg())
+ assert_equal(status, 0)
+
+ # cleanup
+ for key in bucket.list():
+ key.delete()
+ notification_conf.del_config()
+ master_zone.delete_bucket(bucket_name)
+
+
+def test_ps_incremental_sync():
+ """ test that events are only sent on incremental sync """
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ topic_name = bucket_name+TOPIC_SUFFIX
+
+ # create topic
+ topic_conf = PSTopic(ps_zone.conn, topic_name)
+ topic_conf.set_config()
+ # create bucket on the first of the rados zones
+ bucket = master_zone.create_bucket(bucket_name)
+ # create objects in the bucket
+ number_of_objects = 10
+ for i in range(0, number_of_objects):
+ key = bucket.new_key(str(i))
+ key.set_contents_from_string('foo')
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+ # create notifications
+ notification_conf = PSNotification(ps_zone.conn, bucket_name,
+ topic_name)
+ _, status = notification_conf.set_config()
+ assert_equal(status/100, 2)
+ # create subscription
+ sub_conf = PSSubscription(ps_zone.conn, bucket_name+SUB_SUFFIX,
+ topic_name)
+ _, status = sub_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # create more objects in the bucket
+ for i in range(number_of_objects, 2*number_of_objects):
+ key = bucket.new_key(str(i))
+ key.set_contents_from_string('bar')
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+
+ # get the create events from the subscription
+ result, _ = sub_conf.get_events()
+ events = json.loads(result)
+ count = 0
+ for event in events['events']:
+ log.debug('Event: objname: "' + str(event['info']['key']['name']) + '" type: "' + str(event['event']) + '"')
+ count += 1
+
+ # make sure we have 10 and not 20 events
+ assert_equal(count, number_of_objects)
+
+ # cleanup
+ for key in bucket.list():
+ key.delete()
+ sub_conf.del_config()
+ notification_conf.del_config()
+ topic_conf.del_config()
+ master_zone.delete_bucket(bucket_name)
+
+def test_ps_event_type_subscription():
+ """ test subscriptions for different events """
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+
+ # create topic for objects creation
+ topic_create_name = bucket_name+TOPIC_SUFFIX+'_create'
+ topic_create_conf = PSTopic(ps_zone.conn, topic_create_name)
+ topic_create_conf.set_config()
+ # create topic for objects deletion
+ topic_delete_name = bucket_name+TOPIC_SUFFIX+'_delete'
+ topic_delete_conf = PSTopic(ps_zone.conn, topic_delete_name)
+ topic_delete_conf.set_config()
+ # create topic for all events
+ topic_name = bucket_name+TOPIC_SUFFIX+'_all'
+ topic_conf = PSTopic(ps_zone.conn, topic_name)
+ topic_conf.set_config()
+ # create bucket on the first of the rados zones
+ bucket = master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+ # create notifications for objects creation
+ notification_create_conf = PSNotification(ps_zone.conn, bucket_name,
+ topic_create_name, "OBJECT_CREATE")
+ _, status = notification_create_conf.set_config()
+ assert_equal(status/100, 2)
+ # create notifications for objects deletion
+ notification_delete_conf = PSNotification(ps_zone.conn, bucket_name,
+ topic_delete_name, "OBJECT_DELETE")
+ _, status = notification_delete_conf.set_config()
+ assert_equal(status/100, 2)
+ # create notifications for all events
+ notification_conf = PSNotification(ps_zone.conn, bucket_name,
+ topic_name, "OBJECT_DELETE,OBJECT_CREATE")
+ _, status = notification_conf.set_config()
+ assert_equal(status/100, 2)
+ # create subscription for objects creation
+ sub_create_conf = PSSubscription(ps_zone.conn, bucket_name+SUB_SUFFIX+'_create',
+ topic_create_name)
+ _, status = sub_create_conf.set_config()
+ assert_equal(status/100, 2)
+ # create subscription for objects deletion
+ sub_delete_conf = PSSubscription(ps_zone.conn, bucket_name+SUB_SUFFIX+'_delete',
+ topic_delete_name)
+ _, status = sub_delete_conf.set_config()
+ assert_equal(status/100, 2)
+ # create subscription for all events
+ sub_conf = PSSubscription(ps_zone.conn, bucket_name+SUB_SUFFIX+'_all',
+ topic_name)
+ _, status = sub_conf.set_config()
+ assert_equal(status/100, 2)
+ # create objects in the bucket
+ number_of_objects = 10
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ key.set_contents_from_string('bar')
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+
+ # get the events from the creation subscription
+ result, _ = sub_create_conf.get_events()
+ events = json.loads(result)
+ for event in events['events']:
+ log.debug('Event (OBJECT_CREATE): objname: "' + str(event['info']['key']['name']) +
+ '" type: "' + str(event['event']) + '"')
+ keys = list(bucket.list())
+ # TODO: use exact match
+ verify_events_by_elements(events, keys, exact_match=False)
+ # get the events from the deletions subscription
+ result, _ = sub_delete_conf.get_events()
+ events = json.loads(result)
+ for event in events['events']:
+ log.debug('Event (OBJECT_DELETE): objname: "' + str(event['info']['key']['name']) +
+ '" type: "' + str(event['event']) + '"')
+ assert_equal(len(events['events']), 0)
+ # get the events from the all events subscription
+ result, _ = sub_conf.get_events()
+ events = json.loads(result)
+ for event in events['events']:
+ log.debug('Event (OBJECT_CREATE,OBJECT_DELETE): objname: "' +
+ str(event['info']['key']['name']) + '" type: "' + str(event['event']) + '"')
+ # TODO: use exact match
+ verify_events_by_elements(events, keys, exact_match=False)
+ # delete objects from the bucket
+ for key in bucket.list():
+ key.delete()
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+ log.debug("Event (OBJECT_DELETE) synced")
+
+ # get the events from the creations subscription
+ result, _ = sub_create_conf.get_events()
+ events = json.loads(result)
+ for event in events['events']:
+ log.debug('Event (OBJECT_CREATE): objname: "' + str(event['info']['key']['name']) +
+ '" type: "' + str(event['event']) + '"')
+ # deletions should not change the creation events
+ # TODO: use exact match
+ verify_events_by_elements(events, keys, exact_match=False)
+ # get the events from the deletions subscription
+ result, _ = sub_delete_conf.get_events()
+ events = json.loads(result)
+ for event in events['events']:
+ log.debug('Event (OBJECT_DELETE): objname: "' + str(event['info']['key']['name']) +
+ '" type: "' + str(event['event']) + '"')
+ # only deletions should be listed here
+ # TODO: use exact match
+ verify_events_by_elements(events, keys, exact_match=False, deletions=True)
+ # get the events from the all events subscription
+ result, _ = sub_create_conf.get_events()
+ events = json.loads(result)
+ for event in events['events']:
+ log.debug('Event (OBJECT_CREATE,OBJECT_DELETE): objname: "' + str(event['info']['key']['name']) +
+ '" type: "' + str(event['event']) + '"')
+ # both deletions and creations should be here
+ # TODO: use exact match
+ verify_events_by_elements(events, keys, exact_match=False, deletions=False)
+ # verify_events_by_elements(events, keys, exact_match=False, deletions=True)
+ # TODO: (1) test deletions (2) test overall number of events
+
+ # test subscription deletion when topic is specified
+ _, status = sub_create_conf.del_config(topic=True)
+ assert_equal(status/100, 2)
+ _, status = sub_delete_conf.del_config(topic=True)
+ assert_equal(status/100, 2)
+ _, status = sub_conf.del_config(topic=True)
+ assert_equal(status/100, 2)
+
+ # cleanup
+ notification_create_conf.del_config()
+ notification_delete_conf.del_config()
+ notification_conf.del_config()
+ topic_create_conf.del_config()
+ topic_delete_conf.del_config()
+ topic_conf.del_config()
+ master_zone.delete_bucket(bucket_name)
+
+
+def test_ps_event_fetching():
+ """ test incremental fetching of events from a subscription """
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ topic_name = bucket_name+TOPIC_SUFFIX
+
+ # create topic
+ topic_conf = PSTopic(ps_zone.conn, topic_name)
+ topic_conf.set_config()
+ # create bucket on the first of the rados zones
+ bucket = master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ # create notifications
+ notification_conf = PSNotification(ps_zone.conn, bucket_name,
+ topic_name)
+ _, status = notification_conf.set_config()
+ assert_equal(status/100, 2)
+ # create subscription
+ sub_conf = PSSubscription(ps_zone.conn, bucket_name+SUB_SUFFIX,
+ topic_name)
+ _, status = sub_conf.set_config()
+ assert_equal(status/100, 2)
+ # create objects in the bucket
+ number_of_objects = 100
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ key.set_contents_from_string('bar')
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+ max_events = 15
+ total_events_count = 0
+ next_marker = None
+ all_events = []
+ while True:
+ # get the events from the subscription
+ result, _ = sub_conf.get_events(max_events, next_marker)
+ events = json.loads(result)
+ total_events_count += len(events['events'])
+ all_events.extend(events['events'])
+ next_marker = events['next_marker']
+ for event in events['events']:
+ log.debug('Event: objname: "' + str(event['info']['key']['name']) + '" type: "' + str(event['event']) + '"')
+ if next_marker == '':
+ break
+ keys = list(bucket.list())
+ # TODO: use exact match
+ verify_events_by_elements({'events': all_events}, keys, exact_match=False)
+
+ # cleanup
+ sub_conf.del_config()
+ notification_conf.del_config()
+ topic_conf.del_config()
+ for key in bucket.list():
+ key.delete()
+ master_zone.delete_bucket(bucket_name)
+
+
+def test_ps_event_acking():
+ """ test acking of some events in a subscription """
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ topic_name = bucket_name+TOPIC_SUFFIX
+
+ # create topic
+ topic_conf = PSTopic(ps_zone.conn, topic_name)
+ topic_conf.set_config()
+ # create bucket on the first of the rados zones
+ bucket = master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ # create notifications
+ notification_conf = PSNotification(ps_zone.conn, bucket_name,
+ topic_name)
+ _, status = notification_conf.set_config()
+ assert_equal(status/100, 2)
+ # create subscription
+ sub_conf = PSSubscription(ps_zone.conn, bucket_name+SUB_SUFFIX,
+ topic_name)
+ _, status = sub_conf.set_config()
+ assert_equal(status/100, 2)
+ # create objects in the bucket
+ number_of_objects = 10
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ key.set_contents_from_string('bar')
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+
+ # get the create events from the subscription
+ result, _ = sub_conf.get_events()
+ events = json.loads(result)
+ original_number_of_events = len(events)
+ for event in events['events']:
+ log.debug('Event (before ack) id: "' + str(event['id']) + '"')
+ keys = list(bucket.list())
+ # TODO: use exact match
+ verify_events_by_elements(events, keys, exact_match=False)
+ # ack half of the events
+ events_to_ack = number_of_objects/2
+ for event in events['events']:
+ if events_to_ack == 0:
+ break
+ _, status = sub_conf.ack_events(event['id'])
+ assert_equal(status/100, 2)
+ events_to_ack -= 1
+
+ # verify that acked events are gone
+ result, _ = sub_conf.get_events()
+ events = json.loads(result)
+ for event in events['events']:
+ log.debug('Event (after ack) id: "' + str(event['id']) + '"')
+ assert len(events) >= (original_number_of_events - number_of_objects/2)
+
+ # cleanup
+ sub_conf.del_config()
+ notification_conf.del_config()
+ topic_conf.del_config()
+ for key in bucket.list():
+ key.delete()
+ master_zone.delete_bucket(bucket_name)
+
+
+def test_ps_creation_triggers():
+ """ test object creation notifications in using put/copy/post """
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ topic_name = bucket_name+TOPIC_SUFFIX
+
+ # create topic
+ topic_conf = PSTopic(ps_zone.conn, topic_name)
+ topic_conf.set_config()
+ # create bucket on the first of the rados zones
+ bucket = master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ # create notifications
+ notification_conf = PSNotification(ps_zone.conn, bucket_name,
+ topic_name)
+ _, status = notification_conf.set_config()
+ assert_equal(status/100, 2)
+ # create subscription
+ sub_conf = PSSubscription(ps_zone.conn, bucket_name+SUB_SUFFIX,
+ topic_name)
+ _, status = sub_conf.set_config()
+ assert_equal(status/100, 2)
+ # create objects in the bucket using PUT
+ key = bucket.new_key('put')
+ key.set_contents_from_string('bar')
+ # create objects in the bucket using COPY
+ bucket.copy_key('copy', bucket.name, key.name)
+
+ # create objects in the bucket using multi-part upload
+ fp = tempfile.NamedTemporaryFile(mode='w+b')
+ object_size = 1024
+ content = bytearray(os.urandom(object_size))
+ fp.write(content)
+ fp.flush()
+ fp.seek(0)
+ uploader = bucket.initiate_multipart_upload('multipart')
+ uploader.upload_part_from_file(fp, 1)
+ uploader.complete_upload()
+ fp.close()
+
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+
+ # get the create events from the subscription
+ result, _ = sub_conf.get_events()
+ events = json.loads(result)
+ for event in events['events']:
+ log.debug('Event key: "' + str(event['info']['key']['name']) + '" type: "' + str(event['event']) + '"')
+
+ # TODO: verify the specific 3 keys: 'put', 'copy' and 'multipart'
+ assert len(events['events']) >= 3
+ # cleanup
+ sub_conf.del_config()
+ notification_conf.del_config()
+ topic_conf.del_config()
+ for key in bucket.list():
+ key.delete()
+ master_zone.delete_bucket(bucket_name)
+
+def ps_s3_creation_triggers_on_master(external_endpoint_address=None, ca_location=None, verify_ssl='true'):
+ """ test object creation s3 notifications in using put/copy/post on master"""
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ if not external_endpoint_address:
+ hostname = get_ip()
+ proc = init_rabbitmq()
+ if proc is None:
+ return SkipTest('end2end amqp tests require rabbitmq-server installed')
+ else:
+ proc = None
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ topic_name = bucket_name + TOPIC_SUFFIX
+
+ # start amqp receiver
+ exchange = 'ex1'
+ task, receiver = create_amqp_receiver_thread(exchange, topic_name, external_endpoint_address, ca_location)
+ task.start()
+
+ # create s3 topic
+ if external_endpoint_address:
+ endpoint_address = external_endpoint_address
+ elif ca_location:
+ endpoint_address = 'amqps://' + hostname
+ else:
+ endpoint_address = 'amqp://' + hostname
+ endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker&verify-ssl='+verify_ssl
+ if ca_location:
+ endpoint_args += '&ca-location={}'.format(ca_location)
+ if external_endpoint_address:
+ topic_conf = PSTopicS3(master_zone.secure_conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
+ else:
+ topic_conf = PSTopicS3(master_zone.conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn = topic_conf.set_config()
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name,'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:Put', 's3:ObjectCreated:Copy']
+ }]
+
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ objects_size = {}
+ # create objects in the bucket using PUT
+ content = str(os.urandom(randint(1, 1024)))
+ key_name = 'put'
+ key = bucket.new_key(key_name)
+ objects_size[key_name] = len(content)
+ key.set_contents_from_string(content)
+ # create objects in the bucket using COPY
+ key_name = 'copy'
+ bucket.copy_key(key_name, bucket.name, key.name)
+ objects_size[key_name] = len(content)
+
+ # create objects in the bucket using multi-part upload
+ fp = tempfile.NamedTemporaryFile(mode='w+b')
+ content = bytearray(os.urandom(10*1024*1024))
+ key_name = 'multipart'
+ objects_size[key_name] = len(content)
+ fp.write(content)
+ fp.flush()
+ fp.seek(0)
+ uploader = bucket.initiate_multipart_upload(key_name)
+ uploader.upload_part_from_file(fp, 1)
+ uploader.complete_upload()
+ fp.close()
+
+ print('wait for 5sec for the messages...')
+ time.sleep(5)
+
+ # check amqp receiver
+ keys = list(bucket.list())
+ receiver.verify_s3_events(keys, exact_match=True, expected_sizes=objects_size)
+
+ # cleanup
+ stop_amqp_receiver(receiver, task)
+ s3_notification_conf.del_config()
+ topic_conf.del_config()
+ for key in bucket.list():
+ key.delete()
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+ if proc:
+ clean_rabbitmq(proc)
+
+
+def test_ps_s3_creation_triggers_on_master():
+ ps_s3_creation_triggers_on_master()
+
+
+def test_ps_s3_creation_triggers_on_master_external():
+ from distutils.util import strtobool
+
+ if 'AMQP_EXTERNAL_ENDPOINT' in os.environ:
+ try:
+ if strtobool(os.environ['AMQP_VERIFY_SSL']):
+ verify_ssl = 'true'
+ else:
+ verify_ssl = 'false'
+ except Exception as e:
+ verify_ssl = 'true'
+
+ ps_s3_creation_triggers_on_master(
+ external_endpoint_address=os.environ['AMQP_EXTERNAL_ENDPOINT'],
+ verify_ssl=verify_ssl)
+ else:
+ return SkipTest("Set AMQP_EXTERNAL_ENDPOINT to a valid external AMQP endpoint url for this test to run")
+
+def test_ps_s3_creation_triggers_on_master_ssl():
+ import datetime
+ import textwrap
+ import stat
+ from cryptography import x509
+ from cryptography.x509.oid import NameOID
+ from cryptography.hazmat.primitives import hashes
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives import serialization
+ from cryptography.hazmat.primitives.asymmetric import rsa
+ from tempfile import TemporaryDirectory
+
+ with TemporaryDirectory() as tempdir:
+ # modify permissions to ensure that the rabbitmq user can access them
+ os.chmod(tempdir, mode=stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
+ CACERTFILE = os.path.join(tempdir, 'ca_certificate.pem')
+ CERTFILE = os.path.join(tempdir, 'server_certificate.pem')
+ KEYFILE = os.path.join(tempdir, 'server_key.pem')
+ RABBITMQ_CONF_FILE = os.path.join(tempdir, 'rabbitmq.config')
+
+ root_key = rsa.generate_private_key(
+ public_exponent=65537,
+ key_size=2048,
+ backend=default_backend()
+ )
+ subject = issuer = x509.Name([
+ x509.NameAttribute(NameOID.COUNTRY_NAME, u"UK"),
+ x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"Oxfordshire"),
+ x509.NameAttribute(NameOID.LOCALITY_NAME, u"Harwell"),
+ x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"Rosalind Franklin Institute"),
+ x509.NameAttribute(NameOID.COMMON_NAME, u"RFI CA"),
+ ])
+ root_cert = x509.CertificateBuilder().subject_name(
+ subject
+ ).issuer_name(
+ issuer
+ ).public_key(
+ root_key.public_key()
+ ).serial_number(
+ x509.random_serial_number()
+ ).not_valid_before(
+ datetime.datetime.utcnow()
+ ).not_valid_after(
+ datetime.datetime.utcnow() + datetime.timedelta(days=3650)
+ ).add_extension(
+ x509.BasicConstraints(ca=True, path_length=None), critical=True
+ ).sign(root_key, hashes.SHA256(), default_backend())
+ with open(CACERTFILE, "wb") as f:
+ f.write(root_cert.public_bytes(serialization.Encoding.PEM))
+
+ # Now we want to generate a cert from that root
+ cert_key = rsa.generate_private_key(
+ public_exponent=65537,
+ key_size=2048,
+ backend=default_backend()
+ )
+ with open(KEYFILE, "wb") as f:
+ f.write(cert_key.private_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.TraditionalOpenSSL,
+ encryption_algorithm=serialization.NoEncryption(),
+ ))
+ new_subject = x509.Name([
+ x509.NameAttribute(NameOID.COUNTRY_NAME, u"UK"),
+ x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"Oxfordshire"),
+ x509.NameAttribute(NameOID.LOCALITY_NAME, u"Harwell"),
+ x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"Rosalind Franklin Institute"),
+ ])
+ cert = x509.CertificateBuilder().subject_name(
+ new_subject
+ ).issuer_name(
+ root_cert.issuer
+ ).public_key(
+ cert_key.public_key()
+ ).serial_number(
+ x509.random_serial_number()
+ ).not_valid_before(
+ datetime.datetime.utcnow()
+ ).not_valid_after(
+ datetime.datetime.utcnow() + datetime.timedelta(days=30)
+ ).add_extension(
+ x509.SubjectAlternativeName([x509.DNSName(u"localhost")]),
+ critical=False,
+ ).sign(root_key, hashes.SHA256(), default_backend())
+ # Write our certificate out to disk.
+ with open(CERTFILE, "wb") as f:
+ f.write(cert.public_bytes(serialization.Encoding.PEM))
+
+ with open(RABBITMQ_CONF_FILE, "w") as f:
+ # use the old style config format to ensure it also runs on older RabbitMQ versions.
+ f.write(textwrap.dedent(f'''
+ [
+ {{rabbit, [
+ {{ssl_listeners, [5671]}},
+ {{ssl_options, [{{cacertfile, "{CACERTFILE}"}},
+ {{certfile, "{CERTFILE}"}},
+ {{keyfile, "{KEYFILE}"}},
+ {{verify, verify_peer}},
+ {{fail_if_no_peer_cert, false}}]}}]}}
+ ].
+ '''))
+ os.environ['RABBITMQ_CONFIG_FILE'] = os.path.splitext(RABBITMQ_CONF_FILE)[0]
+
+ ps_s3_creation_triggers_on_master(ca_location=CACERTFILE)
+
+ del os.environ['RABBITMQ_CONFIG_FILE']
+
+
+def test_ps_s3_multipart_on_master():
+ """ test multipart object upload on master"""
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ hostname = get_ip()
+ proc = init_rabbitmq()
+ if proc is None:
+ return SkipTest('end2end amqp tests require rabbitmq-server installed')
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ topic_name = bucket_name + TOPIC_SUFFIX
+
+ # start amqp receivers
+ exchange = 'ex1'
+ task1, receiver1 = create_amqp_receiver_thread(exchange, topic_name+'_1')
+ task1.start()
+ task2, receiver2 = create_amqp_receiver_thread(exchange, topic_name+'_2')
+ task2.start()
+ task3, receiver3 = create_amqp_receiver_thread(exchange, topic_name+'_3')
+ task3.start()
+
+ # create s3 topics
+ endpoint_address = 'amqp://' + hostname
+ endpoint_args = 'push-endpoint=' + endpoint_address + '&amqp-exchange=' + exchange + '&amqp-ack-level=broker'
+ topic_conf1 = PSTopicS3(master_zone.conn, topic_name+'_1', zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn1 = topic_conf1.set_config()
+ topic_conf2 = PSTopicS3(master_zone.conn, topic_name+'_2', zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn2 = topic_conf2.set_config()
+ topic_conf3 = PSTopicS3(master_zone.conn, topic_name+'_3', zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn3 = topic_conf3.set_config()
+
+ # create s3 notifications
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name+'_1', 'TopicArn': topic_arn1,
+ 'Events': ['s3:ObjectCreated:*']
+ },
+ {'Id': notification_name+'_2', 'TopicArn': topic_arn2,
+ 'Events': ['s3:ObjectCreated:Post']
+ },
+ {'Id': notification_name+'_3', 'TopicArn': topic_arn3,
+ 'Events': ['s3:ObjectCreated:CompleteMultipartUpload']
+ }]
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # create objects in the bucket using multi-part upload
+ fp = tempfile.NamedTemporaryFile(mode='w+b')
+ object_size = 1024
+ content = bytearray(os.urandom(object_size))
+ fp.write(content)
+ fp.flush()
+ fp.seek(0)
+ uploader = bucket.initiate_multipart_upload('multipart')
+ uploader.upload_part_from_file(fp, 1)
+ uploader.complete_upload()
+ fp.close()
+
+ print('wait for 5sec for the messages...')
+ time.sleep(5)
+
+ # check amqp receiver
+ events = receiver1.get_and_reset_events()
+ assert_equal(len(events), 3)
+
+ events = receiver2.get_and_reset_events()
+ assert_equal(len(events), 1)
+ assert_equal(events[0]['Records'][0]['eventName'], 's3:ObjectCreated:Post')
+ assert_equal(events[0]['Records'][0]['s3']['configurationId'], notification_name+'_2')
+
+ events = receiver3.get_and_reset_events()
+ assert_equal(len(events), 1)
+ assert_equal(events[0]['Records'][0]['eventName'], 's3:ObjectCreated:CompleteMultipartUpload')
+ assert_equal(events[0]['Records'][0]['s3']['configurationId'], notification_name+'_3')
+ print(events[0]['Records'][0]['s3']['object']['size'])
+
+ # cleanup
+ stop_amqp_receiver(receiver1, task1)
+ stop_amqp_receiver(receiver2, task2)
+ stop_amqp_receiver(receiver3, task3)
+ s3_notification_conf.del_config()
+ topic_conf1.del_config()
+ topic_conf2.del_config()
+ topic_conf3.del_config()
+ for key in bucket.list():
+ key.delete()
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+ clean_rabbitmq(proc)
+
+
+def test_ps_versioned_deletion():
+ """ test notification of deletion markers """
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ topic_name = bucket_name+TOPIC_SUFFIX
+
+ # create topics
+ topic_conf1 = PSTopic(ps_zone.conn, topic_name+'_1')
+ _, status = topic_conf1.set_config()
+ assert_equal(status/100, 2)
+ topic_conf2 = PSTopic(ps_zone.conn, topic_name+'_2')
+ _, status = topic_conf2.set_config()
+ assert_equal(status/100, 2)
+
+ # create bucket on the first of the rados zones
+ bucket = master_zone.create_bucket(bucket_name)
+ bucket.configure_versioning(True)
+
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+
+ # create notifications
+ event_type1 = 'OBJECT_DELETE'
+ notification_conf1 = PSNotification(ps_zone.conn, bucket_name,
+ topic_name+'_1',
+ event_type1)
+ _, status = notification_conf1.set_config()
+ assert_equal(status/100, 2)
+ event_type2 = 'DELETE_MARKER_CREATE'
+ notification_conf2 = PSNotification(ps_zone.conn, bucket_name,
+ topic_name+'_2',
+ event_type2)
+ _, status = notification_conf2.set_config()
+ assert_equal(status/100, 2)
+
+ # create subscriptions
+ sub_conf1 = PSSubscription(ps_zone.conn, bucket_name+SUB_SUFFIX+'_1',
+ topic_name+'_1')
+ _, status = sub_conf1.set_config()
+ assert_equal(status/100, 2)
+ sub_conf2 = PSSubscription(ps_zone.conn, bucket_name+SUB_SUFFIX+'_2',
+ topic_name+'_2')
+ _, status = sub_conf2.set_config()
+ assert_equal(status/100, 2)
+
+ # create objects in the bucket
+ key = bucket.new_key('foo')
+ key.set_contents_from_string('bar')
+ v1 = key.version_id
+ key.set_contents_from_string('kaboom')
+ v2 = key.version_id
+ # create deletion marker
+ delete_marker_key = bucket.delete_key(key.name)
+
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+
+ # delete the deletion marker
+ delete_marker_key.delete()
+ # delete versions
+ bucket.delete_key(key.name, version_id=v2)
+ bucket.delete_key(key.name, version_id=v1)
+
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+
+ # get the delete events from the subscription
+ result, _ = sub_conf1.get_events()
+ events = json.loads(result)
+ for event in events['events']:
+ log.debug('Event key: "' + str(event['info']['key']['name']) + '" type: "' + str(event['event']) + '"')
+ assert_equal(str(event['event']), event_type1)
+
+ result, _ = sub_conf2.get_events()
+ events = json.loads(result)
+ for event in events['events']:
+ log.debug('Event key: "' + str(event['info']['key']['name']) + '" type: "' + str(event['event']) + '"')
+ assert_equal(str(event['event']), event_type2)
+
+ # cleanup
+ # follwing is needed for the cleanup in the case of 3-zones
+ # see: http://tracker.ceph.com/issues/39142
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ try:
+ zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name)
+ master_zone.delete_bucket(bucket_name)
+ except:
+ log.debug('zonegroup_bucket_checkpoint failed, cannot delete bucket')
+ sub_conf1.del_config()
+ sub_conf2.del_config()
+ notification_conf1.del_config()
+ notification_conf2.del_config()
+ topic_conf1.del_config()
+ topic_conf2.del_config()
+
+
+def test_ps_s3_metadata_on_master():
+ """ test s3 notification of metadata on master """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ hostname = get_ip()
+ proc = init_rabbitmq()
+ if proc is None:
+ return SkipTest('end2end amqp tests require rabbitmq-server installed')
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ topic_name = bucket_name + TOPIC_SUFFIX
+
+ # start amqp receiver
+ exchange = 'ex1'
+ task, receiver = create_amqp_receiver_thread(exchange, topic_name)
+ task.start()
+
+ # create s3 topic
+ endpoint_address = 'amqp://' + hostname
+ endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=routable'
+ topic_conf = PSTopicS3(master_zone.conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn = topic_conf.set_config()
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ meta_key = 'meta1'
+ meta_value = 'This is my metadata value'
+ meta_prefix = 'x-amz-meta-'
+ topic_conf_list = [{'Id': notification_name,'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*'],
+ 'Filter': {
+ 'Metadata': {
+ 'FilterRules': [{'Name': meta_prefix+meta_key, 'Value': meta_value}]
+ }
+ }
+ }]
+
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ expected_keys = []
+ # create objects in the bucket
+ key_name = 'foo'
+ key = bucket.new_key(key_name)
+ key.set_metadata(meta_key, meta_value)
+ key.set_contents_from_string('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
+ expected_keys.append(key_name)
+
+ # create objects in the bucket using COPY
+ key_name = 'copy_of_foo'
+ bucket.copy_key(key_name, bucket.name, key.name)
+ expected_keys.append(key_name)
+
+ # create another objects in the bucket using COPY
+ # but override the metadata value
+ key_name = 'another_copy_of_foo'
+ bucket.copy_key(key_name, bucket.name, key.name, metadata={meta_key: 'kaboom'})
+
+ # create objects in the bucket using multi-part upload
+ fp = tempfile.NamedTemporaryFile(mode='w+b')
+ chunk_size = 1024*1024*5 # 5MB
+ object_size = 10*chunk_size
+ content = bytearray(os.urandom(object_size))
+ fp.write(content)
+ fp.flush()
+ fp.seek(0)
+ key_name = 'multipart_foo'
+ uploader = bucket.initiate_multipart_upload(key_name,
+ metadata={meta_key: meta_value})
+ for i in range(1,5):
+ uploader.upload_part_from_file(fp, i, size=chunk_size)
+ fp.seek(i*chunk_size)
+ uploader.complete_upload()
+ fp.close()
+ expected_keys.append(key_name)
+
+ print('wait for 5sec for the messages...')
+ time.sleep(5)
+ # check amqp receiver
+ events = receiver.get_and_reset_events()
+ assert_equal(len(events), 4) # PUT, COPY, Multipart start, Multipart End
+ for event in events:
+ assert(event['Records'][0]['s3']['object']['key'] in expected_keys)
+
+ # delete objects
+ for key in bucket.list():
+ key.delete()
+ print('wait for 5sec for the messages...')
+ time.sleep(5)
+ # check amqp receiver
+ #assert_equal(len(receiver.get_and_reset_events()), len(expected_keys))
+
+ # all 3 object has metadata when deleted
+ assert_equal(event_count, 3)
+
+ # cleanup
+ stop_amqp_receiver(receiver, task)
+ s3_notification_conf.del_config()
+ topic_conf.del_config()
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+ clean_rabbitmq(proc)
+
+
+def test_ps_s3_tags_on_master():
+ """ test s3 notification of tags on master """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ hostname = get_ip()
+ proc = init_rabbitmq()
+ if proc is None:
+ return SkipTest('end2end amqp tests require rabbitmq-server installed')
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ topic_name = bucket_name + TOPIC_SUFFIX
+
+ # start amqp receiver
+ exchange = 'ex1'
+ task, receiver = create_amqp_receiver_thread(exchange, topic_name)
+ task.start()
+
+ # create s3 topic
+ endpoint_address = 'amqp://' + hostname
+ endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=routable'
+ topic_conf = PSTopicS3(master_zone.conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn = topic_conf.set_config()
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name,'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*'],
+ 'Filter': {
+ 'Tags': {
+ 'FilterRules': [{'Name': 'hello', 'Value': 'world'}]
+ }
+ }
+ }]
+
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # create objects in the bucket with tags
+ tags = 'hello=world&ka=boom'
+ key_name1 = 'key1'
+ put_object_tagging(master_zone.conn, bucket_name, key_name1, tags)
+ tags = 'foo=bar&ka=boom'
+ key_name2 = 'key2'
+ put_object_tagging(master_zone.conn, bucket_name, key_name2, tags)
+ key_name3 = 'key3'
+ key = bucket.new_key(key_name3)
+ key.set_contents_from_string('bar')
+ # create objects in the bucket using COPY
+ bucket.copy_key('copy_of_'+key_name1, bucket.name, key_name1)
+ print('wait for 5sec for the messages...')
+ time.sleep(5)
+ expected_tags = [{'val': 'world', 'key': 'hello'}, {'val': 'boom', 'key': 'ka'}]
+ # check amqp receiver
+ filtered_count = 0
+ for event in receiver.get_and_reset_events():
+ obj_tags = event['Records'][0]['s3']['object']['tags']
+ assert_equal(obj_tags[0], expected_tags[0])
+ filtered_count += 1
+ assert_equal(filtered_count, 2)
+
+ # delete the objects
+ for key in bucket.list():
+ key.delete()
+ print('wait for 5sec for the messages...')
+ time.sleep(5)
+ # check amqp receiver
+ filtered_count = 0
+ for event in receiver.get_and_reset_events():
+ obj_tags = event['Records'][0]['s3']['object']['tags']
+ assert_equal(obj_tags[0], expected_tags[0])
+ filtered_count += 1
+ assert_equal(filtered_count, 2)
+
+ # cleanup
+ stop_amqp_receiver(receiver, task)
+ s3_notification_conf.del_config()
+ topic_conf.del_config()
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+ clean_rabbitmq(proc)
+
+
+def test_ps_s3_versioning_on_master():
+ """ test s3 notification of object versions """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ hostname = get_ip()
+ proc = init_rabbitmq()
+ if proc is None:
+ return SkipTest('end2end amqp tests require rabbitmq-server installed')
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ bucket.configure_versioning(True)
+ topic_name = bucket_name + TOPIC_SUFFIX
+
+ # start amqp receiver
+ exchange = 'ex1'
+ task, receiver = create_amqp_receiver_thread(exchange, topic_name)
+ task.start()
+
+ # create s3 topic
+ endpoint_address = 'amqp://' + hostname
+ endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker'
+ topic_conf = PSTopicS3(master_zone.conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn = topic_conf.set_config()
+ # create notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn,
+ 'Events': []
+ }]
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ _, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # create objects in the bucket
+ key_value = 'foo'
+ key = bucket.new_key(key_value)
+ key.set_contents_from_string('hello')
+ ver1 = key.version_id
+ key.set_contents_from_string('world')
+ ver2 = key.version_id
+
+ print('wait for 5sec for the messages...')
+ time.sleep(5)
+
+ # check amqp receiver
+ events = receiver.get_and_reset_events()
+ num_of_versions = 0
+ for event_list in events:
+ for event in event_list['Records']:
+ assert_equal(event['s3']['object']['key'], key_value)
+ version = event['s3']['object']['versionId']
+ num_of_versions += 1
+ if version not in (ver1, ver2):
+ print('version mismatch: '+version+' not in: ('+ver1+', '+ver2+')')
+ assert_equal(1, 0)
+ else:
+ print('version ok: '+version+' in: ('+ver1+', '+ver2+')')
+
+ assert_equal(num_of_versions, 2)
+
+ # cleanup
+ stop_amqp_receiver(receiver, task)
+ s3_notification_conf.del_config()
+ topic_conf.del_config()
+ # delete the bucket
+ bucket.delete_key(key.name, version_id=ver2)
+ bucket.delete_key(key.name, version_id=ver1)
+ master_zone.delete_bucket(bucket_name)
+ clean_rabbitmq(proc)
+
+
+def test_ps_s3_versioned_deletion_on_master():
+ """ test s3 notification of deletion markers on master """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ hostname = get_ip()
+ proc = init_rabbitmq()
+ if proc is None:
+ return SkipTest('end2end amqp tests require rabbitmq-server installed')
+ master_zone, _ = init_env(require_ps=False)
+ realm = get_realm()
+ zonegroup = realm.master_zonegroup()
+
+ # create bucket
+ bucket_name = gen_bucket_name()
+ bucket = master_zone.create_bucket(bucket_name)
+ bucket.configure_versioning(True)
+ topic_name = bucket_name + TOPIC_SUFFIX
+
+ # start amqp receiver
+ exchange = 'ex1'
+ task, receiver = create_amqp_receiver_thread(exchange, topic_name)
+ task.start()
+
+ # create s3 topic
+ endpoint_address = 'amqp://' + hostname
+ endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker'
+ topic_conf = PSTopicS3(master_zone.conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
+ topic_arn = topic_conf.set_config()
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name+'_1', 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectRemoved:*']
+ },
+ {'Id': notification_name+'_2', 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectRemoved:DeleteMarkerCreated']
+ },
+ {'Id': notification_name+'_3', 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectRemoved:Delete']
+ }]
+ s3_notification_conf = PSNotificationS3(master_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # create objects in the bucket
+ key = bucket.new_key('foo')
+ content = str(os.urandom(512))
+ size1 = len(content)
+ key.set_contents_from_string(content)
+ ver1 = key.version_id
+ content = str(os.urandom(511))
+ size2 = len(content)
+ key.set_contents_from_string(content)
+ ver2 = key.version_id
+ # create delete marker (non versioned deletion)
+ delete_marker_key = bucket.delete_key(key.name)
+
+ time.sleep(1)
+
+ # versioned deletion
+ bucket.delete_key(key.name, version_id=ver2)
+ bucket.delete_key(key.name, version_id=ver1)
+
+ print('wait for 5sec for the messages...')
+ time.sleep(5)
+
+ # check amqp receiver
+ events = receiver.get_and_reset_events()
+ delete_events = 0
+ delete_marker_create_events = 0
+ for event_list in events:
+ for event in event_list['Records']:
+ size = event['s3']['object']['size']
+ if event['eventName'] == 's3:ObjectRemoved:Delete':
+ delete_events += 1
+ assert size in [size1, size2]
+ assert event['s3']['configurationId'] in [notification_name+'_1', notification_name+'_3']
+ if event['eventName'] == 's3:ObjectRemoved:DeleteMarkerCreated':
+ delete_marker_create_events += 1
+ assert size == size2
+ assert event['s3']['configurationId'] in [notification_name+'_1', notification_name+'_2']
+
+ # 2 key versions were deleted
+ # notified over the same topic via 2 notifications (1,3)
+ assert_equal(delete_events, 2*2)
+ # 1 deletion marker was created
+ # notified over the same topic over 2 notifications (1,2)
+ assert_equal(delete_marker_create_events, 1*2)
+
+ # cleanup
+ delete_marker_key.delete()
+ stop_amqp_receiver(receiver, task)
+ s3_notification_conf.del_config()
+ topic_conf.del_config()
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+ clean_rabbitmq(proc)
+
+
+def test_ps_push_http():
+ """ test pushing to http endpoint """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ topic_name = bucket_name+TOPIC_SUFFIX
+
+ # create random port for the http server
+ host = get_ip()
+ port = random.randint(10000, 20000)
+ # start an http server in a separate thread
+ http_server = StreamingHTTPServer(host, port)
+
+ # create topic
+ topic_conf = PSTopic(ps_zone.conn, topic_name)
+ _, status = topic_conf.set_config()
+ assert_equal(status/100, 2)
+ # create bucket on the first of the rados zones
+ bucket = master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ # create notifications
+ notification_conf = PSNotification(ps_zone.conn, bucket_name,
+ topic_name)
+ _, status = notification_conf.set_config()
+ assert_equal(status/100, 2)
+ # create subscription
+ sub_conf = PSSubscription(ps_zone.conn, bucket_name+SUB_SUFFIX,
+ topic_name, endpoint='http://'+host+':'+str(port))
+ _, status = sub_conf.set_config()
+ assert_equal(status/100, 2)
+ # create objects in the bucket
+ number_of_objects = 10
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ key.set_contents_from_string('bar')
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+ # check http server
+ keys = list(bucket.list())
+ # TODO: use exact match
+ http_server.verify_events(keys, exact_match=False)
+
+ # delete objects from the bucket
+ for key in bucket.list():
+ key.delete()
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+ # check http server
+ # TODO: use exact match
+ http_server.verify_events(keys, deletions=True, exact_match=False)
+
+ # cleanup
+ sub_conf.del_config()
+ notification_conf.del_config()
+ topic_conf.del_config()
+ master_zone.delete_bucket(bucket_name)
+ http_server.close()
+
+
+def test_ps_s3_push_http():
+ """ test pushing to http endpoint s3 record format"""
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ topic_name = bucket_name+TOPIC_SUFFIX
+
+ # create random port for the http server
+ host = get_ip()
+ port = random.randint(10000, 20000)
+ # start an http server in a separate thread
+ http_server = StreamingHTTPServer(host, port)
+
+ # create topic
+ topic_conf = PSTopic(ps_zone.conn, topic_name,
+ endpoint='http://'+host+':'+str(port))
+ result, status = topic_conf.set_config()
+ assert_equal(status/100, 2)
+ parsed_result = json.loads(result)
+ topic_arn = parsed_result['arn']
+ # create bucket on the first of the rados zones
+ bucket = master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name,
+ 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*']
+ }]
+ s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
+ _, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+ # create objects in the bucket
+ number_of_objects = 10
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ key.set_contents_from_string('bar')
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+ # check http server
+ keys = list(bucket.list())
+ # TODO: use exact match
+ http_server.verify_s3_events(keys, exact_match=False)
+
+ # delete objects from the bucket
+ for key in bucket.list():
+ key.delete()
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+ # check http server
+ # TODO: use exact match
+ http_server.verify_s3_events(keys, deletions=True, exact_match=False)
+
+ # cleanup
+ s3_notification_conf.del_config()
+ topic_conf.del_config()
+ master_zone.delete_bucket(bucket_name)
+ http_server.close()
+
+
+def test_ps_push_amqp():
+ """ test pushing to amqp endpoint """
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ hostname = get_ip()
+ proc = init_rabbitmq()
+ if proc is None:
+ return SkipTest('end2end amqp tests require rabbitmq-server installed')
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ topic_name = bucket_name+TOPIC_SUFFIX
+
+ # create topic
+ exchange = 'ex1'
+ task, receiver = create_amqp_receiver_thread(exchange, topic_name)
+ task.start()
+ topic_conf = PSTopic(ps_zone.conn, topic_name)
+ _, status = topic_conf.set_config()
+ assert_equal(status/100, 2)
+ # create bucket on the first of the rados zones
+ bucket = master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ # create notifications
+ notification_conf = PSNotification(ps_zone.conn, bucket_name,
+ topic_name)
+ _, status = notification_conf.set_config()
+ assert_equal(status/100, 2)
+ # create subscription
+ sub_conf = PSSubscription(ps_zone.conn, bucket_name+SUB_SUFFIX,
+ topic_name, endpoint='amqp://'+hostname,
+ endpoint_args='amqp-exchange='+exchange+'&amqp-ack-level=broker')
+ _, status = sub_conf.set_config()
+ assert_equal(status/100, 2)
+ # create objects in the bucket
+ number_of_objects = 10
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ key.set_contents_from_string('bar')
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+ # check amqp receiver
+ keys = list(bucket.list())
+ # TODO: use exact match
+ receiver.verify_events(keys, exact_match=False)
+
+ # delete objects from the bucket
+ for key in bucket.list():
+ key.delete()
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+ # check amqp receiver
+ # TODO: use exact match
+ receiver.verify_events(keys, deletions=True, exact_match=False)
+
+ # cleanup
+ stop_amqp_receiver(receiver, task)
+ sub_conf.del_config()
+ notification_conf.del_config()
+ topic_conf.del_config()
+ master_zone.delete_bucket(bucket_name)
+ clean_rabbitmq(proc)
+
+
+def test_ps_s3_push_amqp():
+ """ test pushing to amqp endpoint s3 record format"""
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ hostname = get_ip()
+ proc = init_rabbitmq()
+ if proc is None:
+ return SkipTest('end2end amqp tests require rabbitmq-server installed')
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ topic_name = bucket_name+TOPIC_SUFFIX
+
+ # create topic
+ exchange = 'ex1'
+ task, receiver = create_amqp_receiver_thread(exchange, topic_name)
+ task.start()
+ topic_conf = PSTopic(ps_zone.conn, topic_name,
+ endpoint='amqp://' + hostname,
+ endpoint_args='amqp-exchange=' + exchange + '&amqp-ack-level=none')
+ result, status = topic_conf.set_config()
+ assert_equal(status/100, 2)
+ parsed_result = json.loads(result)
+ topic_arn = parsed_result['arn']
+ # create bucket on the first of the rados zones
+ bucket = master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name,
+ 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*']
+ }]
+ s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
+ _, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+ # create objects in the bucket
+ number_of_objects = 10
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ key.set_contents_from_string('bar')
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+ # check amqp receiver
+ keys = list(bucket.list())
+ # TODO: use exact match
+ receiver.verify_s3_events(keys, exact_match=False)
+
+ # delete objects from the bucket
+ for key in bucket.list():
+ key.delete()
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+ # check amqp receiver
+ # TODO: use exact match
+ receiver.verify_s3_events(keys, deletions=True, exact_match=False)
+
+ # cleanup
+ stop_amqp_receiver(receiver, task)
+ s3_notification_conf.del_config()
+ topic_conf.del_config()
+ master_zone.delete_bucket(bucket_name)
+ clean_rabbitmq(proc)
+
+
+def test_ps_delete_bucket():
+ """ test notification status upon bucket deletion """
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ # create bucket on the first of the rados zones
+ bucket = master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ topic_name = bucket_name + TOPIC_SUFFIX
+ # create topic
+ topic_name = bucket_name + TOPIC_SUFFIX
+ topic_conf = PSTopic(ps_zone.conn, topic_name)
+ response, status = topic_conf.set_config()
+ assert_equal(status/100, 2)
+ parsed_result = json.loads(response)
+ topic_arn = parsed_result['arn']
+ # create one s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name,
+ 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:*']
+ }]
+ s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
+ response, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # create non-s3 notification
+ notification_conf = PSNotification(ps_zone.conn, bucket_name,
+ topic_name)
+ _, status = notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # create objects in the bucket
+ number_of_objects = 10
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ key.set_contents_from_string('bar')
+ # wait for bucket sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+ keys = list(bucket.list())
+ # delete objects from the bucket
+ for key in bucket.list():
+ key.delete()
+ # wait for bucket sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+ # delete the bucket
+ master_zone.delete_bucket(bucket_name)
+ # wait for meta sync
+ zone_meta_checkpoint(ps_zone.zone)
+
+ # get the events from the auto-generated subscription
+ sub_conf = PSSubscription(ps_zone.conn, notification_name,
+ topic_name)
+ result, _ = sub_conf.get_events()
+ records = json.loads(result)
+ # TODO: use exact match
+ verify_s3_records_by_elements(records, keys, exact_match=False)
+
+ # s3 notification is deleted with bucket
+ _, status = s3_notification_conf.get_config(notification=notification_name)
+ assert_equal(status, 404)
+ # non-s3 notification is deleted with bucket
+ _, status = notification_conf.get_config()
+ assert_equal(status, 404)
+ # cleanup
+ sub_conf.del_config()
+ topic_conf.del_config()
+
+
+def test_ps_missing_topic():
+ """ test creating a subscription when no topic info exists"""
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ topic_name = bucket_name+TOPIC_SUFFIX
+
+ # create bucket on the first of the rados zones
+ master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_arn = 'arn:aws:sns:::' + topic_name
+ topic_conf_list = [{'Id': notification_name,
+ 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:*']
+ }]
+ s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
+ try:
+ s3_notification_conf.set_config()
+ except:
+ log.info('missing topic is expected')
+ else:
+ assert 'missing topic is expected'
+
+ # cleanup
+ master_zone.delete_bucket(bucket_name)
+
+
+def test_ps_s3_topic_update():
+ """ test updating topic associated with a notification"""
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ rabbit_proc = init_rabbitmq()
+ if rabbit_proc is None:
+ return SkipTest('end2end amqp tests require rabbitmq-server installed')
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ topic_name = bucket_name+TOPIC_SUFFIX
+
+ # create amqp topic
+ hostname = get_ip()
+ exchange = 'ex1'
+ amqp_task, receiver = create_amqp_receiver_thread(exchange, topic_name)
+ amqp_task.start()
+ topic_conf = PSTopic(ps_zone.conn, topic_name,
+ endpoint='amqp://' + hostname,
+ endpoint_args='amqp-exchange=' + exchange + '&amqp-ack-level=none')
+ result, status = topic_conf.set_config()
+ assert_equal(status/100, 2)
+ parsed_result = json.loads(result)
+ topic_arn = parsed_result['arn']
+ # get topic
+ result, _ = topic_conf.get_config()
+ # verify topic content
+ parsed_result = json.loads(result)
+ assert_equal(parsed_result['topic']['name'], topic_name)
+ assert_equal(parsed_result['topic']['dest']['push_endpoint'], topic_conf.parameters['push-endpoint'])
+
+ # create http server
+ port = random.randint(10000, 20000)
+ # start an http server in a separate thread
+ http_server = StreamingHTTPServer(hostname, port)
+
+ # create bucket on the first of the rados zones
+ bucket = master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ # create s3 notification
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name,
+ 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:*']
+ }]
+ s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
+ _, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+ # create objects in the bucket
+ number_of_objects = 10
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ key.set_contents_from_string('bar')
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+
+ keys = list(bucket.list())
+ # TODO: use exact match
+ receiver.verify_s3_events(keys, exact_match=False)
+
+ # update the same topic with new endpoint
+ topic_conf = PSTopic(ps_zone.conn, topic_name,
+ endpoint='http://'+ hostname + ':' + str(port))
+ _, status = topic_conf.set_config()
+ assert_equal(status/100, 2)
+ # get topic
+ result, _ = topic_conf.get_config()
+ # verify topic content
+ parsed_result = json.loads(result)
+ assert_equal(parsed_result['topic']['name'], topic_name)
+ assert_equal(parsed_result['topic']['dest']['push_endpoint'], topic_conf.parameters['push-endpoint'])
+
+ # delete current objects and create new objects in the bucket
+ for key in bucket.list():
+ key.delete()
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i+100))
+ key.set_contents_from_string('bar')
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+
+ keys = list(bucket.list())
+ # verify that notifications are still sent to amqp
+ # TODO: use exact match
+ receiver.verify_s3_events(keys, exact_match=False)
+
+ # update notification to update the endpoint from the topic
+ topic_conf_list = [{'Id': notification_name,
+ 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:*']
+ }]
+ s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
+ _, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # delete current objects and create new objects in the bucket
+ for key in bucket.list():
+ key.delete()
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i+200))
+ key.set_contents_from_string('bar')
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+
+ keys = list(bucket.list())
+ # check that updates switched to http
+ # TODO: use exact match
+ http_server.verify_s3_events(keys, exact_match=False)
+
+ # cleanup
+ # delete objects from the bucket
+ stop_amqp_receiver(receiver, amqp_task)
+ for key in bucket.list():
+ key.delete()
+ s3_notification_conf.del_config()
+ topic_conf.del_config()
+ master_zone.delete_bucket(bucket_name)
+ http_server.close()
+ clean_rabbitmq(rabbit_proc)
+
+
+def test_ps_s3_notification_update():
+ """ test updating the topic of a notification"""
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ hostname = get_ip()
+ rabbit_proc = init_rabbitmq()
+ if rabbit_proc is None:
+ return SkipTest('end2end amqp tests require rabbitmq-server installed')
+
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ topic_name1 = bucket_name+'amqp'+TOPIC_SUFFIX
+ topic_name2 = bucket_name+'http'+TOPIC_SUFFIX
+
+ # create topics
+ # start amqp receiver in a separate thread
+ exchange = 'ex1'
+ amqp_task, receiver = create_amqp_receiver_thread(exchange, topic_name1)
+ amqp_task.start()
+ # create random port for the http server
+ http_port = random.randint(10000, 20000)
+ # start an http server in a separate thread
+ http_server = StreamingHTTPServer(hostname, http_port)
+
+ topic_conf1 = PSTopic(ps_zone.conn, topic_name1,
+ endpoint='amqp://' + hostname,
+ endpoint_args='amqp-exchange=' + exchange + '&amqp-ack-level=none')
+ result, status = topic_conf1.set_config()
+ parsed_result = json.loads(result)
+ topic_arn1 = parsed_result['arn']
+ assert_equal(status/100, 2)
+ topic_conf2 = PSTopic(ps_zone.conn, topic_name2,
+ endpoint='http://'+hostname+':'+str(http_port))
+ result, status = topic_conf2.set_config()
+ parsed_result = json.loads(result)
+ topic_arn2 = parsed_result['arn']
+ assert_equal(status/100, 2)
+
+ # create bucket on the first of the rados zones
+ bucket = master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ # create s3 notification with topic1
+ notification_name = bucket_name + NOTIFICATION_SUFFIX
+ topic_conf_list = [{'Id': notification_name,
+ 'TopicArn': topic_arn1,
+ 'Events': ['s3:ObjectCreated:*']
+ }]
+ s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
+ _, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+ # create objects in the bucket
+ number_of_objects = 10
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ key.set_contents_from_string('bar')
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+
+ keys = list(bucket.list())
+ # TODO: use exact match
+ receiver.verify_s3_events(keys, exact_match=False);
+
+ # update notification to use topic2
+ topic_conf_list = [{'Id': notification_name,
+ 'TopicArn': topic_arn2,
+ 'Events': ['s3:ObjectCreated:*']
+ }]
+ s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
+ _, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+
+ # delete current objects and create new objects in the bucket
+ for key in bucket.list():
+ key.delete()
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i+100))
+ key.set_contents_from_string('bar')
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+
+ keys = list(bucket.list())
+ # check that updates switched to http
+ # TODO: use exact match
+ http_server.verify_s3_events(keys, exact_match=False)
+
+ # cleanup
+ # delete objects from the bucket
+ stop_amqp_receiver(receiver, amqp_task)
+ for key in bucket.list():
+ key.delete()
+ s3_notification_conf.del_config()
+ topic_conf1.del_config()
+ topic_conf2.del_config()
+ master_zone.delete_bucket(bucket_name)
+ http_server.close()
+ clean_rabbitmq(rabbit_proc)
+
+
+def test_ps_s3_multiple_topics_notification():
+ """ test notification creation with multiple topics"""
+ if skip_push_tests:
+ return SkipTest("PubSub push tests don't run in teuthology")
+ hostname = get_ip()
+ rabbit_proc = init_rabbitmq()
+ if rabbit_proc is None:
+ return SkipTest('end2end amqp tests require rabbitmq-server installed')
+
+ master_zone, ps_zone = init_env()
+ bucket_name = gen_bucket_name()
+ topic_name1 = bucket_name+'amqp'+TOPIC_SUFFIX
+ topic_name2 = bucket_name+'http'+TOPIC_SUFFIX
+
+ # create topics
+ # start amqp receiver in a separate thread
+ exchange = 'ex1'
+ amqp_task, receiver = create_amqp_receiver_thread(exchange, topic_name1)
+ amqp_task.start()
+ # create random port for the http server
+ http_port = random.randint(10000, 20000)
+ # start an http server in a separate thread
+ http_server = StreamingHTTPServer(hostname, http_port)
+
+ topic_conf1 = PSTopic(ps_zone.conn, topic_name1,
+ endpoint='amqp://' + hostname,
+ endpoint_args='amqp-exchange=' + exchange + '&amqp-ack-level=none')
+ result, status = topic_conf1.set_config()
+ parsed_result = json.loads(result)
+ topic_arn1 = parsed_result['arn']
+ assert_equal(status/100, 2)
+ topic_conf2 = PSTopic(ps_zone.conn, topic_name2,
+ endpoint='http://'+hostname+':'+str(http_port))
+ result, status = topic_conf2.set_config()
+ parsed_result = json.loads(result)
+ topic_arn2 = parsed_result['arn']
+ assert_equal(status/100, 2)
+
+ # create bucket on the first of the rados zones
+ bucket = master_zone.create_bucket(bucket_name)
+ # wait for sync
+ zone_meta_checkpoint(ps_zone.zone)
+ # create s3 notification
+ notification_name1 = bucket_name + NOTIFICATION_SUFFIX + '_1'
+ notification_name2 = bucket_name + NOTIFICATION_SUFFIX + '_2'
+ topic_conf_list = [
+ {
+ 'Id': notification_name1,
+ 'TopicArn': topic_arn1,
+ 'Events': ['s3:ObjectCreated:*']
+ },
+ {
+ 'Id': notification_name2,
+ 'TopicArn': topic_arn2,
+ 'Events': ['s3:ObjectCreated:*']
+ }]
+ s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list)
+ _, status = s3_notification_conf.set_config()
+ assert_equal(status/100, 2)
+ result, _ = s3_notification_conf.get_config()
+ assert_equal(len(result['TopicConfigurations']), 2)
+ assert_equal(result['TopicConfigurations'][0]['Id'], notification_name1)
+ assert_equal(result['TopicConfigurations'][1]['Id'], notification_name2)
+
+ # get auto-generated subscriptions
+ sub_conf1 = PSSubscription(ps_zone.conn, notification_name1,
+ topic_name1)
+ _, status = sub_conf1.get_config()
+ assert_equal(status/100, 2)
+ sub_conf2 = PSSubscription(ps_zone.conn, notification_name2,
+ topic_name2)
+ _, status = sub_conf2.get_config()
+ assert_equal(status/100, 2)
+
+ # create objects in the bucket
+ number_of_objects = 10
+ for i in range(number_of_objects):
+ key = bucket.new_key(str(i))
+ key.set_contents_from_string('bar')
+ # wait for sync
+ zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name)
+
+ # get the events from both of the subscription
+ result, _ = sub_conf1.get_events()
+ records = json.loads(result)
+ for record in records['Records']:
+ log.debug(record)
+ keys = list(bucket.list())
+ # TODO: use exact match
+ verify_s3_records_by_elements(records, keys, exact_match=False)
+ receiver.verify_s3_events(keys, exact_match=False)
+
+ result, _ = sub_conf2.get_events()
+ parsed_result = json.loads(result)
+ for record in parsed_result['Records']:
+ log.debug(record)
+ keys = list(bucket.list())
+ # TODO: use exact match
+ verify_s3_records_by_elements(records, keys, exact_match=False)
+ http_server.verify_s3_events(keys, exact_match=False)
+
+ # cleanup
+ stop_amqp_receiver(receiver, amqp_task)
+ s3_notification_conf.del_config()
+ topic_conf1.del_config()
+ topic_conf2.del_config()
+ # delete objects from the bucket
+ for key in bucket.list():
+ key.delete()
+ master_zone.delete_bucket(bucket_name)
+ http_server.close()
+ clean_rabbitmq(rabbit_proc)
diff --git a/src/test/rgw/rgw_multi/tools.py b/src/test/rgw/rgw_multi/tools.py
new file mode 100644
index 000000000..dd7f91ade
--- /dev/null
+++ b/src/test/rgw/rgw_multi/tools.py
@@ -0,0 +1,97 @@
+import json
+import boto
+
+def append_attr_value(d, attr, attrv):
+ if attrv and len(str(attrv)) > 0:
+ d[attr] = attrv
+
+def append_attr(d, k, attr):
+ try:
+ attrv = getattr(k, attr)
+ except:
+ return
+ append_attr_value(d, attr, attrv)
+
+def get_attrs(k, attrs):
+ d = {}
+ for a in attrs:
+ append_attr(d, k, a)
+
+ return d
+
+def append_query_arg(s, n, v):
+ if not v:
+ return s
+ nv = '{n}={v}'.format(n=n, v=v)
+ if not s:
+ return nv
+ return '{s}&{nv}'.format(s=s, nv=nv)
+
+class KeyJSONEncoder(boto.s3.key.Key):
+ @staticmethod
+ def default(k, versioned=False):
+ attrs = ['bucket', 'name', 'size', 'last_modified', 'metadata', 'cache_control',
+ 'content_type', 'content_disposition', 'content_language',
+ 'owner', 'storage_class', 'md5', 'version_id', 'encrypted',
+ 'delete_marker', 'expiry_date', 'VersionedEpoch', 'RgwxTag']
+ d = get_attrs(k, attrs)
+ d['etag'] = k.etag[1:-1]
+ if versioned:
+ d['is_latest'] = k.is_latest
+ return d
+
+class DeleteMarkerJSONEncoder(boto.s3.key.Key):
+ @staticmethod
+ def default(k):
+ attrs = ['name', 'version_id', 'last_modified', 'owner']
+ d = get_attrs(k, attrs)
+ d['delete_marker'] = True
+ d['is_latest'] = k.is_latest
+ return d
+
+class UserJSONEncoder(boto.s3.user.User):
+ @staticmethod
+ def default(k):
+ attrs = ['id', 'display_name']
+ return get_attrs(k, attrs)
+
+class BucketJSONEncoder(boto.s3.bucket.Bucket):
+ @staticmethod
+ def default(k):
+ attrs = ['name', 'creation_date']
+ return get_attrs(k, attrs)
+
+class BotoJSONEncoder(json.JSONEncoder):
+ def default(self, obj):
+ if isinstance(obj, boto.s3.key.Key):
+ return KeyJSONEncoder.default(obj)
+ if isinstance(obj, boto.s3.deletemarker.DeleteMarker):
+ return DeleteMarkerJSONEncoder.default(obj)
+ if isinstance(obj, boto.s3.user.User):
+ return UserJSONEncoder.default(obj)
+ if isinstance(obj, boto.s3.prefix.Prefix):
+ return (lambda x: {'prefix': x.name})(obj)
+ if isinstance(obj, boto.s3.bucket.Bucket):
+ return BucketJSONEncoder.default(obj)
+ return json.JSONEncoder.default(self, obj)
+
+
+def dump_json(o, cls=BotoJSONEncoder):
+ return json.dumps(o, cls=cls, indent=4)
+
+def assert_raises(excClass, callableObj, *args, **kwargs):
+ """
+ Like unittest.TestCase.assertRaises, but returns the exception.
+ """
+ try:
+ callableObj(*args, **kwargs)
+ except excClass as e:
+ return e
+ else:
+ if hasattr(excClass, '__name__'):
+ excName = excClass.__name__
+ else:
+ excName = str(excClass)
+ raise AssertionError("%s not raised" % excName)
+
+
diff --git a/src/test/rgw/rgw_multi/zone_az.py b/src/test/rgw/rgw_multi/zone_az.py
new file mode 100644
index 000000000..aaab3ef74
--- /dev/null
+++ b/src/test/rgw/rgw_multi/zone_az.py
@@ -0,0 +1,40 @@
+import logging
+
+from .multisite import Zone
+
+
+log = logging.getLogger('rgw_multi.tests')
+
+
+class AZone(Zone): # pylint: disable=too-many-ancestors
+ """ archive zone class """
+ def __init__(self, name, zonegroup=None, cluster=None, data=None, zone_id=None, gateways=None):
+ super(AZone, self).__init__(name, zonegroup, cluster, data, zone_id, gateways)
+
+ def is_read_only(self):
+ return False
+
+ def tier_type(self):
+ return "archive"
+
+ def create(self, cluster, args=None, **kwargs):
+ if args is None:
+ args = ''
+ args += ['--tier-type', self.tier_type()]
+ return self.json_command(cluster, 'create', args)
+
+ def has_buckets(self):
+ return False
+
+
+class AZoneConfig:
+ """ archive zone configuration """
+ def __init__(self, cfg, section):
+ pass
+
+
+def print_connection_info(conn):
+ """print info of connection"""
+ print("Host: " + conn.host+':'+str(conn.port))
+ print("AWS Secret Key: " + conn.aws_secret_access_key)
+ print("AWS Access Key: " + conn.aws_access_key_id)
diff --git a/src/test/rgw/rgw_multi/zone_cloud.py b/src/test/rgw/rgw_multi/zone_cloud.py
new file mode 100644
index 000000000..322a19e6d
--- /dev/null
+++ b/src/test/rgw/rgw_multi/zone_cloud.py
@@ -0,0 +1,320 @@
+import json
+import requests.compat
+import logging
+
+import boto
+import boto.s3.connection
+
+import dateutil.parser
+import datetime
+
+import re
+
+from nose.tools import eq_ as eq
+from itertools import zip_longest # type: ignore
+from urllib.parse import urlparse
+
+from .multisite import *
+from .tools import *
+
+log = logging.getLogger(__name__)
+
+def get_key_ver(k):
+ if not k.version_id:
+ return 'null'
+ return k.version_id
+
+def unquote(s):
+ if s[0] == '"' and s[-1] == '"':
+ return s[1:-1]
+ return s
+
+def check_object_eq(k1, k2, check_extra = True):
+ assert k1
+ assert k2
+ log.debug('comparing key name=%s', k1.name)
+ eq(k1.name, k2.name)
+ eq(k1.metadata, k2.metadata)
+ # eq(k1.cache_control, k2.cache_control)
+ eq(k1.content_type, k2.content_type)
+ eq(k1.content_encoding, k2.content_encoding)
+ eq(k1.content_disposition, k2.content_disposition)
+ eq(k1.content_language, k2.content_language)
+
+ eq(unquote(k1.etag), unquote(k2.etag))
+
+ mtime1 = dateutil.parser.parse(k1.last_modified)
+ mtime2 = dateutil.parser.parse(k2.last_modified)
+ log.debug('k1.last_modified=%s k2.last_modified=%s', k1.last_modified, k2.last_modified)
+ assert abs((mtime1 - mtime2).total_seconds()) < 1 # handle different time resolution
+ # if check_extra:
+ # eq(k1.owner.id, k2.owner.id)
+ # eq(k1.owner.display_name, k2.owner.display_name)
+ # eq(k1.storage_class, k2.storage_class)
+ eq(k1.size, k2.size)
+ eq(get_key_ver(k1), get_key_ver(k2))
+ # eq(k1.encrypted, k2.encrypted)
+
+def make_request(conn, method, bucket, key, query_args, headers):
+ result = conn.make_request(method, bucket=bucket, key=key, query_args=query_args, headers=headers)
+ if result.status // 100 != 2:
+ raise boto.exception.S3ResponseError(result.status, result.reason, result.read())
+ return result
+
+class CloudKey:
+ def __init__(self, zone_bucket, k):
+ self.zone_bucket = zone_bucket
+
+ # we need two keys: when listing buckets, we get keys that only contain partial data
+ # but we need to have the full data so that we could use all the meta-rgwx- headers
+ # that are needed in order to create a correct representation of the object
+ self.key = k
+ self.rgwx_key = k # assuming k has all the meta info on, if not then we'll update it in update()
+ self.update()
+
+ def update(self):
+ k = self.key
+ rk = self.rgwx_key
+
+ self.size = rk.size
+ orig_name = rk.metadata.get('rgwx-source-key')
+ if not orig_name:
+ self.rgwx_key = self.zone_bucket.bucket.get_key(k.name, version_id = k.version_id)
+ rk = self.rgwx_key
+ orig_name = rk.metadata.get('rgwx-source-key')
+
+ self.name = orig_name
+ self.version_id = rk.metadata.get('rgwx-source-version-id')
+
+ ve = rk.metadata.get('rgwx-versioned-epoch')
+ if ve:
+ self.versioned_epoch = int(ve)
+ else:
+ self.versioned_epoch = 0
+
+ mt = rk.metadata.get('rgwx-source-mtime')
+ if mt:
+ self.last_modified = datetime.datetime.utcfromtimestamp(float(mt)).strftime('%a, %d %b %Y %H:%M:%S GMT')
+ else:
+ self.last_modified = k.last_modified
+
+ et = rk.metadata.get('rgwx-source-etag')
+ if rk.etag.find('-') >= 0 or et.find('-') >= 0:
+ # in this case we will use the source etag as it was uploaded via multipart upload
+ # in one of the zones, so there's no way to make sure etags are calculated the same
+ # way. In the other case we'd just want to keep the etag that was generated in the
+ # regular upload mechanism, which should be consistent in both ends
+ self.etag = et
+ else:
+ self.etag = rk.etag
+
+ if k.etag[0] == '"' and self.etag[0] != '"': # inconsistent etag quoting when listing bucket vs object get
+ self.etag = '"' + self.etag + '"'
+
+ new_meta = {}
+ for meta_key, meta_val in k.metadata.items():
+ if not meta_key.startswith('rgwx-'):
+ new_meta[meta_key] = meta_val
+
+ self.metadata = new_meta
+
+ self.cache_control = k.cache_control
+ self.content_type = k.content_type
+ self.content_encoding = k.content_encoding
+ self.content_disposition = k.content_disposition
+ self.content_language = k.content_language
+
+
+ def get_contents_as_string(self, encoding=None):
+ r = self.key.get_contents_as_string(encoding=encoding)
+
+ # the previous call changed the status of the source object, as it loaded
+ # its metadata
+
+ self.rgwx_key = self.key
+ self.update()
+
+ return r
+
+
+class CloudZoneBucket:
+ def __init__(self, zone_conn, target_path, name):
+ self.zone_conn = zone_conn
+ self.name = name
+ self.cloud_conn = zone_conn.zone.cloud_conn
+
+ target_path = target_path[:]
+ if target_path[-1] != '/':
+ target_path += '/'
+ target_path = target_path.replace('${bucket}', name)
+
+ tp = target_path.split('/', 1)
+
+ if len(tp) == 1:
+ self.target_bucket = target_path
+ self.target_prefix = ''
+ else:
+ self.target_bucket = tp[0]
+ self.target_prefix = tp[1]
+
+ log.debug('target_path=%s target_bucket=%s target_prefix=%s', target_path, self.target_bucket, self.target_prefix)
+ self.bucket = self.cloud_conn.get_bucket(self.target_bucket)
+
+ def get_all_versions(self):
+ l = []
+
+ for k in self.bucket.get_all_keys(prefix=self.target_prefix):
+ new_key = CloudKey(self, k)
+
+ log.debug('appending o=[\'%s\', \'%s\', \'%d\']', new_key.name, new_key.version_id, new_key.versioned_epoch)
+ l.append(new_key)
+
+
+ sort_key = lambda k: (k.name, -k.versioned_epoch)
+ l.sort(key = sort_key)
+
+ for new_key in l:
+ yield new_key
+
+ def get_key(self, name, version_id=None):
+ return CloudKey(self, self.bucket.get_key(name, version_id=version_id))
+
+
+def parse_endpoint(endpoint):
+ o = urlparse(endpoint)
+
+ netloc = o.netloc.split(':')
+
+ host = netloc[0]
+
+ if len(netloc) > 1:
+ port = int(netloc[1])
+ else:
+ port = o.port
+
+ is_secure = False
+
+ if o.scheme == 'https':
+ is_secure = True
+
+ if not port:
+ if is_secure:
+ port = 443
+ else:
+ port = 80
+
+ return host, port, is_secure
+
+
+class CloudZone(Zone):
+ def __init__(self, name, cloud_endpoint, credentials, source_bucket, target_path,
+ zonegroup = None, cluster = None, data = None, zone_id = None, gateways = None):
+ self.cloud_endpoint = cloud_endpoint
+ self.credentials = credentials
+ self.source_bucket = source_bucket
+ self.target_path = target_path
+
+ self.target_path = self.target_path.replace('${zone}', name)
+ # self.target_path = self.target_path.replace('${zone_id}', zone_id)
+ self.target_path = self.target_path.replace('${zonegroup}', zonegroup.name)
+ self.target_path = self.target_path.replace('${zonegroup_id}', zonegroup.id)
+
+ log.debug('target_path=%s', self.target_path)
+
+ host, port, is_secure = parse_endpoint(cloud_endpoint)
+
+ self.cloud_conn = boto.connect_s3(
+ aws_access_key_id = credentials.access_key,
+ aws_secret_access_key = credentials.secret,
+ host = host,
+ port = port,
+ is_secure = is_secure,
+ calling_format = boto.s3.connection.OrdinaryCallingFormat())
+ super(CloudZone, self).__init__(name, zonegroup, cluster, data, zone_id, gateways)
+
+
+ def is_read_only(self):
+ return True
+
+ def tier_type(self):
+ return "cloud"
+
+ def create(self, cluster, args = None, check_retcode = True):
+ """ create the object with the given arguments """
+
+ if args is None:
+ args = ''
+
+ tier_config = ','.join([ 'connection.endpoint=' + self.cloud_endpoint,
+ 'connection.access_key=' + self.credentials.access_key,
+ 'connection.secret=' + self.credentials.secret,
+ 'target_path=' + re.escape(self.target_path)])
+
+ args += [ '--tier-type', self.tier_type(), '--tier-config', tier_config ]
+
+ return self.json_command(cluster, 'create', args, check_retcode=check_retcode)
+
+ def has_buckets(self):
+ return False
+
+ class Conn(ZoneConn):
+ def __init__(self, zone, credentials):
+ super(CloudZone.Conn, self).__init__(zone, credentials)
+
+ def get_bucket(self, bucket_name):
+ return CloudZoneBucket(self, self.zone.target_path, bucket_name)
+
+ def create_bucket(self, name):
+ # should not be here, a bug in the test suite
+ log.critical('Conn.create_bucket() should not be called in cloud zone')
+ assert False
+
+ def check_bucket_eq(self, zone_conn, bucket_name):
+ assert(zone_conn.zone.tier_type() == "rados")
+
+ log.info('comparing bucket=%s zones={%s, %s}', bucket_name, self.name, self.name)
+ b1 = self.get_bucket(bucket_name)
+ b2 = zone_conn.get_bucket(bucket_name)
+
+ log.debug('bucket1 objects:')
+ for o in b1.get_all_versions():
+ log.debug('o=%s', o.name)
+ log.debug('bucket2 objects:')
+ for o in b2.get_all_versions():
+ log.debug('o=%s', o.name)
+
+ for k1, k2 in zip_longest(b1.get_all_versions(), b2.get_all_versions()):
+ if k1 is None:
+ log.critical('key=%s is missing from zone=%s', k2.name, self.name)
+ assert False
+ if k2 is None:
+ log.critical('key=%s is missing from zone=%s', k1.name, zone_conn.name)
+ assert False
+
+ check_object_eq(k1, k2)
+
+
+ log.info('success, bucket identical: bucket=%s zones={%s, %s}', bucket_name, self.name, zone_conn.name)
+
+ return True
+
+ def get_conn(self, credentials):
+ return self.Conn(self, credentials)
+
+
+class CloudZoneConfig:
+ def __init__(self, cfg, section):
+ self.endpoint = cfg.get(section, 'endpoint')
+ access_key = cfg.get(section, 'access_key')
+ secret = cfg.get(section, 'secret')
+ self.credentials = Credentials(access_key, secret)
+ try:
+ self.target_path = cfg.get(section, 'target_path')
+ except:
+ self.target_path = 'rgw-${zonegroup_id}/${bucket}'
+
+ try:
+ self.source_bucket = cfg.get(section, 'source_bucket')
+ except:
+ self.source_bucket = '*'
+
diff --git a/src/test/rgw/rgw_multi/zone_es.py b/src/test/rgw/rgw_multi/zone_es.py
new file mode 100644
index 000000000..204b9e47f
--- /dev/null
+++ b/src/test/rgw/rgw_multi/zone_es.py
@@ -0,0 +1,250 @@
+import json
+import requests.compat
+import logging
+
+import boto
+import boto.s3.connection
+
+import dateutil.parser
+
+from nose.tools import eq_ as eq
+from itertools import zip_longest # type: ignore
+
+from .multisite import *
+from .tools import *
+
+log = logging.getLogger(__name__)
+
+def get_key_ver(k):
+ if not k.version_id:
+ return 'null'
+ return k.version_id
+
+def check_object_eq(k1, k2, check_extra = True):
+ assert k1
+ assert k2
+ log.debug('comparing key name=%s', k1.name)
+ eq(k1.name, k2.name)
+ eq(k1.metadata, k2.metadata)
+ # eq(k1.cache_control, k2.cache_control)
+ eq(k1.content_type, k2.content_type)
+ # eq(k1.content_encoding, k2.content_encoding)
+ # eq(k1.content_disposition, k2.content_disposition)
+ # eq(k1.content_language, k2.content_language)
+ eq(k1.etag, k2.etag)
+ mtime1 = dateutil.parser.parse(k1.last_modified)
+ mtime2 = dateutil.parser.parse(k2.last_modified)
+ assert abs((mtime1 - mtime2).total_seconds()) < 1 # handle different time resolution
+ if check_extra:
+ eq(k1.owner.id, k2.owner.id)
+ eq(k1.owner.display_name, k2.owner.display_name)
+ # eq(k1.storage_class, k2.storage_class)
+ eq(k1.size, k2.size)
+ eq(get_key_ver(k1), get_key_ver(k2))
+ # eq(k1.encrypted, k2.encrypted)
+
+def make_request(conn, method, bucket, key, query_args, headers):
+ result = conn.make_request(method, bucket=bucket, key=key, query_args=query_args, headers=headers)
+ if result.status // 100 != 2:
+ raise boto.exception.S3ResponseError(result.status, result.reason, result.read())
+ return result
+
+
+class MDSearch:
+ def __init__(self, conn, bucket_name, query, query_args = None, marker = None):
+ self.conn = conn
+ self.bucket_name = bucket_name or ''
+ if bucket_name:
+ self.bucket = boto.s3.bucket.Bucket(name=bucket_name)
+ else:
+ self.bucket = None
+ self.query = query
+ self.query_args = query_args
+ self.max_keys = None
+ self.marker = marker
+
+ def raw_search(self):
+ q = self.query or ''
+ query_args = append_query_arg(self.query_args, 'query', requests.compat.quote_plus(q))
+ if self.max_keys is not None:
+ query_args = append_query_arg(query_args, 'max-keys', self.max_keys)
+ if self.marker:
+ query_args = append_query_arg(query_args, 'marker', self.marker)
+
+ query_args = append_query_arg(query_args, 'format', 'json')
+
+ headers = {}
+
+ result = make_request(self.conn, "GET", bucket=self.bucket_name, key='', query_args=query_args, headers=headers)
+
+ l = []
+
+ result_dict = json.loads(result.read())
+
+ for entry in result_dict['Objects']:
+ bucket = self.conn.get_bucket(entry['Bucket'], validate = False)
+ k = boto.s3.key.Key(bucket, entry['Key'])
+
+ k.version_id = entry['Instance']
+ k.etag = entry['ETag']
+ k.owner = boto.s3.user.User(id=entry['Owner']['ID'], display_name=entry['Owner']['DisplayName'])
+ k.last_modified = entry['LastModified']
+ k.size = entry['Size']
+ k.content_type = entry['ContentType']
+ k.versioned_epoch = entry['VersionedEpoch']
+
+ k.metadata = {}
+ for e in entry['CustomMetadata']:
+ k.metadata[e['Name']] = str(e['Value']) # int values will return as int, cast to string for compatibility with object meta response
+
+ l.append(k)
+
+ return result_dict, l
+
+ def search(self, drain = True, sort = True, sort_key = None):
+ l = []
+
+ is_done = False
+
+ while not is_done:
+ result, result_keys = self.raw_search()
+
+ l = l + result_keys
+
+ is_done = not (drain and (result['IsTruncated'] == "true"))
+ marker = result['Marker']
+
+ if sort:
+ if not sort_key:
+ sort_key = lambda k: (k.name, -k.versioned_epoch)
+ l.sort(key = sort_key)
+
+ return l
+
+
+class MDSearchConfig:
+ def __init__(self, conn, bucket_name):
+ self.conn = conn
+ self.bucket_name = bucket_name or ''
+ if bucket_name:
+ self.bucket = boto.s3.bucket.Bucket(name=bucket_name)
+ else:
+ self.bucket = None
+
+ def send_request(self, conf, method):
+ query_args = 'mdsearch'
+ headers = None
+ if conf:
+ headers = { 'X-Amz-Meta-Search': conf }
+
+ query_args = append_query_arg(query_args, 'format', 'json')
+
+ return make_request(self.conn, method, bucket=self.bucket_name, key='', query_args=query_args, headers=headers)
+
+ def get_config(self):
+ result = self.send_request(None, 'GET')
+ return json.loads(result.read())
+
+ def set_config(self, conf):
+ self.send_request(conf, 'POST')
+
+ def del_config(self):
+ self.send_request(None, 'DELETE')
+
+
+class ESZoneBucket:
+ def __init__(self, zone_conn, name, conn):
+ self.zone_conn = zone_conn
+ self.name = name
+ self.conn = conn
+
+ self.bucket = boto.s3.bucket.Bucket(name=name)
+
+ def get_all_versions(self):
+
+ marker = None
+ is_done = False
+
+ req = MDSearch(self.conn, self.name, 'bucket == ' + self.name, marker=marker)
+
+ for k in req.search():
+ yield k
+
+
+
+
+class ESZone(Zone):
+ def __init__(self, name, es_endpoint, zonegroup = None, cluster = None, data = None, zone_id = None, gateways = None):
+ self.es_endpoint = es_endpoint
+ super(ESZone, self).__init__(name, zonegroup, cluster, data, zone_id, gateways)
+
+ def is_read_only(self):
+ return True
+
+ def tier_type(self):
+ return "elasticsearch"
+
+ def create(self, cluster, args = None, check_retcode = True):
+ """ create the object with the given arguments """
+
+ if args is None:
+ args = ''
+
+ tier_config = ','.join([ 'endpoint=' + self.es_endpoint, 'explicit_custom_meta=false' ])
+
+ args += [ '--tier-type', self.tier_type(), '--tier-config', tier_config ]
+
+ return self.json_command(cluster, 'create', args, check_retcode=check_retcode)
+
+ def has_buckets(self):
+ return False
+
+ class Conn(ZoneConn):
+ def __init__(self, zone, credentials):
+ super(ESZone.Conn, self).__init__(zone, credentials)
+
+ def get_bucket(self, bucket_name):
+ return ESZoneBucket(self, bucket_name, self.conn)
+
+ def create_bucket(self, name):
+ # should not be here, a bug in the test suite
+ log.critical('Conn.create_bucket() should not be called in ES zone')
+ assert False
+
+ def check_bucket_eq(self, zone_conn, bucket_name):
+ assert(zone_conn.zone.tier_type() == "rados")
+
+ log.info('comparing bucket=%s zones={%s, %s}', bucket_name, self.name, self.name)
+ b1 = self.get_bucket(bucket_name)
+ b2 = zone_conn.get_bucket(bucket_name)
+
+ log.debug('bucket1 objects:')
+ for o in b1.get_all_versions():
+ log.debug('o=%s', o.name)
+ log.debug('bucket2 objects:')
+ for o in b2.get_all_versions():
+ log.debug('o=%s', o.name)
+
+ for k1, k2 in zip_longest(b1.get_all_versions(), b2.get_all_versions()):
+ if k1 is None:
+ log.critical('key=%s is missing from zone=%s', k2.name, self.name)
+ assert False
+ if k2 is None:
+ log.critical('key=%s is missing from zone=%s', k1.name, zone_conn.name)
+ assert False
+
+ check_object_eq(k1, k2)
+
+
+ log.info('success, bucket identical: bucket=%s zones={%s, %s}', bucket_name, self.name, zone_conn.name)
+
+ return True
+
+ def get_conn(self, credentials):
+ return self.Conn(self, credentials)
+
+
+class ESZoneConfig:
+ def __init__(self, cfg, section):
+ self.endpoint = cfg.get(section, 'endpoint')
+
diff --git a/src/test/rgw/rgw_multi/zone_ps.py b/src/test/rgw/rgw_multi/zone_ps.py
new file mode 100644
index 000000000..0553f8061
--- /dev/null
+++ b/src/test/rgw/rgw_multi/zone_ps.py
@@ -0,0 +1,428 @@
+import logging
+import ssl
+import urllib
+import hmac
+import hashlib
+import base64
+import xmltodict
+from http import client as http_client
+from urllib import parse as urlparse
+from time import gmtime, strftime
+from .multisite import Zone
+import boto3
+from botocore.client import Config
+
+log = logging.getLogger('rgw_multi.tests')
+
+def put_object_tagging(conn, bucket_name, key, tags):
+ client = boto3.client('s3',
+ endpoint_url='http://'+conn.host+':'+str(conn.port),
+ aws_access_key_id=conn.aws_access_key_id,
+ aws_secret_access_key=conn.aws_secret_access_key)
+ return client.put_object(Body='aaaaaaaaaaa', Bucket=bucket_name, Key=key, Tagging=tags)
+
+
+def get_object_tagging(conn, bucket, object_key):
+ client = boto3.client('s3',
+ endpoint_url='http://'+conn.host+':'+str(conn.port),
+ aws_access_key_id=conn.aws_access_key_id,
+ aws_secret_access_key=conn.aws_secret_access_key)
+ return client.get_object_tagging(
+ Bucket=bucket,
+ Key=object_key
+ )
+
+
+class PSZone(Zone): # pylint: disable=too-many-ancestors
+ """ PubSub zone class """
+ def __init__(self, name, zonegroup=None, cluster=None, data=None, zone_id=None, gateways=None, full_sync='false', retention_days ='7'):
+ self.full_sync = full_sync
+ self.retention_days = retention_days
+ self.master_zone = zonegroup.master_zone
+ super(PSZone, self).__init__(name, zonegroup, cluster, data, zone_id, gateways)
+
+ def is_read_only(self):
+ return True
+
+ def tier_type(self):
+ return "pubsub"
+
+ def syncs_from(self, zone_name):
+ return zone_name == self.master_zone.name
+
+ def create(self, cluster, args=None, **kwargs):
+ if args is None:
+ args = ''
+ tier_config = ','.join(['start_with_full_sync=' + self.full_sync, 'event_retention_days=' + self.retention_days])
+ args += ['--tier-type', self.tier_type(), '--sync-from-all=0', '--sync-from', self.master_zone.name, '--tier-config', tier_config]
+ return self.json_command(cluster, 'create', args)
+
+ def has_buckets(self):
+ return False
+
+
+NO_HTTP_BODY = ''
+
+
+def make_request(conn, method, resource, parameters=None, sign_parameters=False, extra_parameters=None):
+ """generic request sending to pubsub radogw
+ should cover: topics, notificatios and subscriptions
+ """
+ url_params = ''
+ if parameters is not None:
+ url_params = urlparse.urlencode(parameters)
+ # remove 'None' from keys with no values
+ url_params = url_params.replace('=None', '')
+ url_params = '?' + url_params
+ if extra_parameters is not None:
+ url_params = url_params + '&' + extra_parameters
+ string_date = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
+ string_to_sign = method + '\n\n\n' + string_date + '\n' + resource
+ if sign_parameters:
+ string_to_sign += url_params
+ signature = base64.b64encode(hmac.new(conn.aws_secret_access_key.encode('utf-8'),
+ string_to_sign.encode('utf-8'),
+ hashlib.sha1).digest()).decode('ascii')
+ headers = {'Authorization': 'AWS '+conn.aws_access_key_id+':'+signature,
+ 'Date': string_date,
+ 'Host': conn.host+':'+str(conn.port)}
+ http_conn = http_client.HTTPConnection(conn.host, conn.port)
+ if log.getEffectiveLevel() <= 10:
+ http_conn.set_debuglevel(5)
+ http_conn.request(method, resource+url_params, NO_HTTP_BODY, headers)
+ response = http_conn.getresponse()
+ data = response.read()
+ status = response.status
+ http_conn.close()
+ return data.decode('utf-8'), status
+
+
+def print_connection_info(conn):
+ """print info of connection"""
+ print("Host: " + conn.host+':'+str(conn.port))
+ print("AWS Secret Key: " + conn.aws_secret_access_key)
+ print("AWS Access Key: " + conn.aws_access_key_id)
+
+
+class PSTopic:
+ """class to set/get/delete a topic
+ PUT /topics/<topic name>[?push-endpoint=<endpoint>&[<arg1>=<value1>...]]
+ GET /topics/<topic name>
+ DELETE /topics/<topic name>
+ """
+ def __init__(self, conn, topic_name, endpoint=None, endpoint_args=None):
+ self.conn = conn
+ assert topic_name.strip()
+ self.resource = '/topics/'+topic_name
+ if endpoint is not None:
+ self.parameters = {'push-endpoint': endpoint}
+ self.extra_parameters = endpoint_args
+ else:
+ self.parameters = None
+ self.extra_parameters = None
+
+ def send_request(self, method, get_list=False, parameters=None, extra_parameters=None):
+ """send request to radosgw"""
+ if get_list:
+ return make_request(self.conn, method, '/topics')
+ return make_request(self.conn, method, self.resource,
+ parameters=parameters, extra_parameters=extra_parameters)
+
+ def get_config(self):
+ """get topic info"""
+ return self.send_request('GET')
+
+ def set_config(self):
+ """set topic"""
+ return self.send_request('PUT', parameters=self.parameters, extra_parameters=self.extra_parameters)
+
+ def del_config(self):
+ """delete topic"""
+ return self.send_request('DELETE')
+
+ def get_list(self):
+ """list all topics"""
+ return self.send_request('GET', get_list=True)
+
+
+def delete_all_s3_topics(zone, region):
+ try:
+ conn = zone.secure_conn if zone.secure_conn is not None else zone.conn
+ protocol = 'https' if conn.is_secure else 'http'
+ client = boto3.client('sns',
+ endpoint_url=protocol+'://'+conn.host+':'+str(conn.port),
+ aws_access_key_id=conn.aws_access_key_id,
+ aws_secret_access_key=conn.aws_secret_access_key,
+ region_name=region,
+ verify='./cert.pem')
+
+ topics = client.list_topics()['Topics']
+ for topic in topics:
+ print('topic cleanup, deleting: ' + topic['TopicArn'])
+ assert client.delete_topic(TopicArn=topic['TopicArn'])['ResponseMetadata']['HTTPStatusCode'] == 200
+ except Exception as err:
+ print('failed to do topic cleanup: ' + str(err))
+
+
+def delete_all_objects(conn, bucket_name):
+ client = boto3.client('s3',
+ endpoint_url='http://'+conn.host+':'+str(conn.port),
+ aws_access_key_id=conn.aws_access_key_id,
+ aws_secret_access_key=conn.aws_secret_access_key)
+
+ objects = []
+ for key in client.list_objects(Bucket=bucket_name)['Contents']:
+ objects.append({'Key': key['Key']})
+ # delete objects from the bucket
+ response = client.delete_objects(Bucket=bucket_name,
+ Delete={'Objects': objects})
+ return response
+
+
+class PSTopicS3:
+ """class to set/list/get/delete a topic
+ POST ?Action=CreateTopic&Name=<topic name>[&OpaqueData=<data>[&push-endpoint=<endpoint>&[<arg1>=<value1>...]]]
+ POST ?Action=ListTopics
+ POST ?Action=GetTopic&TopicArn=<topic-arn>
+ POST ?Action=GetTopicAttributes&TopicArn=<topic-arn>
+ POST ?Action=DeleteTopic&TopicArn=<topic-arn>
+ """
+ def __init__(self, conn, topic_name, region, endpoint_args=None, opaque_data=None):
+ self.conn = conn
+ self.topic_name = topic_name.strip()
+ assert self.topic_name
+ self.topic_arn = ''
+ self.attributes = {}
+ if endpoint_args is not None:
+ self.attributes = {nvp[0] : nvp[1] for nvp in urlparse.parse_qsl(endpoint_args, keep_blank_values=True)}
+ if opaque_data is not None:
+ self.attributes['OpaqueData'] = opaque_data
+ protocol = 'https' if conn.is_secure else 'http'
+ self.client = boto3.client('sns',
+ endpoint_url=protocol+'://'+conn.host+':'+str(conn.port),
+ aws_access_key_id=conn.aws_access_key_id,
+ aws_secret_access_key=conn.aws_secret_access_key,
+ region_name=region,
+ verify='./cert.pem')
+
+
+ def get_config(self):
+ """get topic info"""
+ parameters = {'Action': 'GetTopic', 'TopicArn': self.topic_arn}
+ body = urlparse.urlencode(parameters)
+ string_date = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
+ content_type = 'application/x-www-form-urlencoded; charset=utf-8'
+ resource = '/'
+ method = 'POST'
+ string_to_sign = method + '\n\n' + content_type + '\n' + string_date + '\n' + resource
+ log.debug('StringTosign: %s', string_to_sign)
+ signature = base64.b64encode(hmac.new(self.conn.aws_secret_access_key.encode('utf-8'),
+ string_to_sign.encode('utf-8'),
+ hashlib.sha1).digest()).decode('ascii')
+ headers = {'Authorization': 'AWS '+self.conn.aws_access_key_id+':'+signature,
+ 'Date': string_date,
+ 'Host': self.conn.host+':'+str(self.conn.port),
+ 'Content-Type': content_type}
+ if self.conn.is_secure:
+ http_conn = http_client.HTTPSConnection(self.conn.host, self.conn.port,
+ context=ssl.create_default_context(cafile='./cert.pem'))
+ else:
+ http_conn = http_client.HTTPConnection(self.conn.host, self.conn.port)
+ http_conn.request(method, resource, body, headers)
+ response = http_conn.getresponse()
+ data = response.read()
+ status = response.status
+ http_conn.close()
+ dict_response = xmltodict.parse(data)
+ return dict_response, status
+
+ def get_attributes(self):
+ """get topic attributes"""
+ return self.client.get_topic_attributes(TopicArn=self.topic_arn)
+
+ def set_config(self):
+ """set topic"""
+ result = self.client.create_topic(Name=self.topic_name, Attributes=self.attributes)
+ self.topic_arn = result['TopicArn']
+ return self.topic_arn
+
+ def del_config(self):
+ """delete topic"""
+ result = self.client.delete_topic(TopicArn=self.topic_arn)
+ return result['ResponseMetadata']['HTTPStatusCode']
+
+ def get_list(self):
+ """list all topics"""
+ # note that boto3 supports list_topics(), however, the result only show ARNs
+ parameters = {'Action': 'ListTopics'}
+ body = urlparse.urlencode(parameters)
+ string_date = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
+ content_type = 'application/x-www-form-urlencoded; charset=utf-8'
+ resource = '/'
+ method = 'POST'
+ string_to_sign = method + '\n\n' + content_type + '\n' + string_date + '\n' + resource
+ log.debug('StringTosign: %s', string_to_sign)
+ signature = base64.b64encode(hmac.new(self.conn.aws_secret_access_key.encode('utf-8'),
+ string_to_sign.encode('utf-8'),
+ hashlib.sha1).digest()).decode('ascii')
+ headers = {'Authorization': 'AWS '+self.conn.aws_access_key_id+':'+signature,
+ 'Date': string_date,
+ 'Host': self.conn.host+':'+str(self.conn.port),
+ 'Content-Type': content_type}
+ if self.conn.is_secure:
+ http_conn = http_client.HTTPSConnection(self.conn.host, self.conn.port,
+ context=ssl.create_default_context(cafile='./cert.pem'))
+ else:
+ http_conn = http_client.HTTPConnection(self.conn.host, self.conn.port)
+ http_conn.request(method, resource, body, headers)
+ response = http_conn.getresponse()
+ data = response.read()
+ status = response.status
+ http_conn.close()
+ dict_response = xmltodict.parse(data)
+ return dict_response, status
+
+
+class PSNotification:
+ """class to set/get/delete a notification
+ PUT /notifications/bucket/<bucket>?topic=<topic-name>[&events=<event>[,<event>]]
+ GET /notifications/bucket/<bucket>
+ DELETE /notifications/bucket/<bucket>?topic=<topic-name>
+ """
+ def __init__(self, conn, bucket_name, topic_name, events=''):
+ self.conn = conn
+ assert bucket_name.strip()
+ assert topic_name.strip()
+ self.resource = '/notifications/bucket/'+bucket_name
+ if events.strip():
+ self.parameters = {'topic': topic_name, 'events': events}
+ else:
+ self.parameters = {'topic': topic_name}
+
+ def send_request(self, method, parameters=None):
+ """send request to radosgw"""
+ return make_request(self.conn, method, self.resource, parameters)
+
+ def get_config(self):
+ """get notification info"""
+ return self.send_request('GET')
+
+ def set_config(self):
+ """set notification"""
+ return self.send_request('PUT', self.parameters)
+
+ def del_config(self):
+ """delete notification"""
+ return self.send_request('DELETE', self.parameters)
+
+
+class PSNotificationS3:
+ """class to set/get/delete an S3 notification
+ PUT /<bucket>?notification
+ GET /<bucket>?notification[=<notification>]
+ DELETE /<bucket>?notification[=<notification>]
+ """
+ def __init__(self, conn, bucket_name, topic_conf_list):
+ self.conn = conn
+ assert bucket_name.strip()
+ self.bucket_name = bucket_name
+ self.resource = '/'+bucket_name
+ self.topic_conf_list = topic_conf_list
+ self.client = boto3.client('s3',
+ endpoint_url='http://'+conn.host+':'+str(conn.port),
+ aws_access_key_id=conn.aws_access_key_id,
+ aws_secret_access_key=conn.aws_secret_access_key)
+
+ def send_request(self, method, parameters=None):
+ """send request to radosgw"""
+ return make_request(self.conn, method, self.resource,
+ parameters=parameters, sign_parameters=True)
+
+ def get_config(self, notification=None):
+ """get notification info"""
+ parameters = None
+ if notification is None:
+ response = self.client.get_bucket_notification_configuration(Bucket=self.bucket_name)
+ status = response['ResponseMetadata']['HTTPStatusCode']
+ return response, status
+ parameters = {'notification': notification}
+ response, status = self.send_request('GET', parameters=parameters)
+ dict_response = xmltodict.parse(response)
+ return dict_response, status
+
+ def set_config(self):
+ """set notification"""
+ response = self.client.put_bucket_notification_configuration(Bucket=self.bucket_name,
+ NotificationConfiguration={
+ 'TopicConfigurations': self.topic_conf_list
+ })
+ status = response['ResponseMetadata']['HTTPStatusCode']
+ return response, status
+
+ def del_config(self, notification=None):
+ """delete notification"""
+ parameters = {'notification': notification}
+
+ return self.send_request('DELETE', parameters)
+
+
+class PSSubscription:
+ """class to set/get/delete a subscription:
+ PUT /subscriptions/<sub-name>?topic=<topic-name>[&push-endpoint=<endpoint>&[<arg1>=<value1>...]]
+ GET /subscriptions/<sub-name>
+ DELETE /subscriptions/<sub-name>
+ also to get list of events, and ack them:
+ GET /subscriptions/<sub-name>?events[&max-entries=<max-entries>][&marker=<marker>]
+ POST /subscriptions/<sub-name>?ack&event-id=<event-id>
+ """
+ def __init__(self, conn, sub_name, topic_name, endpoint=None, endpoint_args=None):
+ self.conn = conn
+ assert topic_name.strip()
+ self.resource = '/subscriptions/'+sub_name
+ if endpoint is not None:
+ self.parameters = {'topic': topic_name, 'push-endpoint': endpoint}
+ self.extra_parameters = endpoint_args
+ else:
+ self.parameters = {'topic': topic_name}
+ self.extra_parameters = None
+
+ def send_request(self, method, parameters=None, extra_parameters=None):
+ """send request to radosgw"""
+ return make_request(self.conn, method, self.resource,
+ parameters=parameters,
+ extra_parameters=extra_parameters)
+
+ def get_config(self):
+ """get subscription info"""
+ return self.send_request('GET')
+
+ def set_config(self):
+ """set subscription"""
+ return self.send_request('PUT', parameters=self.parameters, extra_parameters=self.extra_parameters)
+
+ def del_config(self, topic=False):
+ """delete subscription"""
+ if topic:
+ return self.send_request('DELETE', self.parameters)
+ return self.send_request('DELETE')
+
+ def get_events(self, max_entries=None, marker=None):
+ """ get events from subscription """
+ parameters = {'events': None}
+ if max_entries is not None:
+ parameters['max-entries'] = max_entries
+ if marker is not None:
+ parameters['marker'] = marker
+ return self.send_request('GET', parameters)
+
+ def ack_events(self, event_id):
+ """ ack events in a subscription """
+ parameters = {'ack': None, 'event-id': event_id}
+ return self.send_request('POST', parameters)
+
+
+class PSZoneConfig:
+ """ pubsub zone configuration """
+ def __init__(self, cfg, section):
+ self.full_sync = cfg.get(section, 'start_with_full_sync')
+ self.retention_days = cfg.get(section, 'retention_days')
diff --git a/src/test/rgw/rgw_multi/zone_rados.py b/src/test/rgw/rgw_multi/zone_rados.py
new file mode 100644
index 000000000..984b0c2ff
--- /dev/null
+++ b/src/test/rgw/rgw_multi/zone_rados.py
@@ -0,0 +1,109 @@
+import logging
+from boto.s3.deletemarker import DeleteMarker
+
+from itertools import zip_longest # type: ignore
+
+from nose.tools import eq_ as eq
+
+from .multisite import *
+
+log = logging.getLogger(__name__)
+
+def check_object_eq(k1, k2, check_extra = True):
+ assert k1
+ assert k2
+ log.debug('comparing key name=%s', k1.name)
+ eq(k1.name, k2.name)
+ eq(k1.version_id, k2.version_id)
+ eq(k1.is_latest, k2.is_latest)
+ eq(k1.last_modified, k2.last_modified)
+ if isinstance(k1, DeleteMarker):
+ assert isinstance(k2, DeleteMarker)
+ return
+
+ eq(k1.get_contents_as_string(), k2.get_contents_as_string())
+ eq(k1.metadata, k2.metadata)
+ eq(k1.cache_control, k2.cache_control)
+ eq(k1.content_type, k2.content_type)
+ eq(k1.content_encoding, k2.content_encoding)
+ eq(k1.content_disposition, k2.content_disposition)
+ eq(k1.content_language, k2.content_language)
+ eq(k1.etag, k2.etag)
+ if check_extra:
+ eq(k1.owner.id, k2.owner.id)
+ eq(k1.owner.display_name, k2.owner.display_name)
+ eq(k1.storage_class, k2.storage_class)
+ eq(k1.size, k2.size)
+ eq(k1.encrypted, k2.encrypted)
+
+class RadosZone(Zone):
+ def __init__(self, name, zonegroup = None, cluster = None, data = None, zone_id = None, gateways = None):
+ super(RadosZone, self).__init__(name, zonegroup, cluster, data, zone_id, gateways)
+
+ def tier_type(self):
+ return "rados"
+
+
+ class Conn(ZoneConn):
+ def __init__(self, zone, credentials):
+ super(RadosZone.Conn, self).__init__(zone, credentials)
+
+ def get_bucket(self, name):
+ return self.conn.get_bucket(name)
+
+ def create_bucket(self, name):
+ return self.conn.create_bucket(name)
+
+ def delete_bucket(self, name):
+ return self.conn.delete_bucket(name)
+
+ def check_bucket_eq(self, zone_conn, bucket_name):
+ log.info('comparing bucket=%s zones={%s, %s}', bucket_name, self.name, zone_conn.name)
+ b1 = self.get_bucket(bucket_name)
+ b2 = zone_conn.get_bucket(bucket_name)
+
+ b1_versions = b1.list_versions()
+ log.debug('bucket1 objects:')
+ for o in b1_versions:
+ log.debug('o=%s', o.name)
+
+ b2_versions = b2.list_versions()
+ log.debug('bucket2 objects:')
+ for o in b2_versions:
+ log.debug('o=%s', o.name)
+
+ for k1, k2 in zip_longest(b1_versions, b2_versions):
+ if k1 is None:
+ log.critical('key=%s is missing from zone=%s', k2.name, self.name)
+ assert False
+ if k2 is None:
+ log.critical('key=%s is missing from zone=%s', k1.name, zone_conn.name)
+ assert False
+
+ check_object_eq(k1, k2)
+
+ if isinstance(k1, DeleteMarker):
+ # verify that HEAD sees a delete marker
+ assert b1.get_key(k1.name) is None
+ assert b2.get_key(k2.name) is None
+ else:
+ # now get the keys through a HEAD operation, verify that the available data is the same
+ k1_head = b1.get_key(k1.name, version_id=k1.version_id)
+ k2_head = b2.get_key(k2.name, version_id=k2.version_id)
+ check_object_eq(k1_head, k2_head, False)
+
+ if k1.version_id:
+ # compare the olh to make sure they agree about the current version
+ k1_olh = b1.get_key(k1.name)
+ k2_olh = b2.get_key(k2.name)
+ # if there's a delete marker, HEAD will return None
+ if k1_olh or k2_olh:
+ check_object_eq(k1_olh, k2_olh, False)
+
+ log.info('success, bucket identical: bucket=%s zones={%s, %s}', bucket_name, self.name, zone_conn.name)
+
+ return True
+
+ def get_conn(self, credentials):
+ return self.Conn(self, credentials)
+