summaryrefslogtreecommitdiffstats
path: root/examples
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
commit19fcec84d8d7d21e796c7624e521b60d28ee21ed (patch)
tree42d26aa27d1e3f7c0b8bd3fd14e7d7082f5008dc /examples
parentInitial commit. (diff)
downloadceph-upstream/16.2.11+ds.tar.xz
ceph-upstream/16.2.11+ds.zip
Adding upstream version 16.2.11+ds.upstream/16.2.11+dsupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'examples')
-rw-r--r--examples/boto3/README.md98
-rwxr-xr-xexamples/boto3/append_object.py42
-rwxr-xr-xexamples/boto3/delete_notification.py36
-rwxr-xr-xexamples/boto3/get_notification.py28
-rwxr-xr-xexamples/boto3/get_usage_stats.py17
-rwxr-xr-xexamples/boto3/list_unordered.py25
-rwxr-xr-xexamples/boto3/notification_filters.py48
-rw-r--r--examples/boto3/service-2.sdk-extras.json215
-rw-r--r--examples/boto3/topic_attributes.py46
-rwxr-xr-xexamples/boto3/topic_with_endpoint.py41
-rw-r--r--examples/librados/Makefile39
-rw-r--r--examples/librados/hello_radosstriper.cc102
-rw-r--r--examples/librados/hello_world.cc289
-rw-r--r--examples/librados/hello_world.readme14
-rw-r--r--examples/librados/hello_world_c.c304
-rw-r--r--examples/librbd/Makefile27
-rw-r--r--examples/librbd/hello_world.cc220
-rw-r--r--examples/rbd-replay/.gitignore3
-rwxr-xr-xexamples/rbd-replay/create-image8
-rwxr-xr-xexamples/rbd-replay/replay9
-rwxr-xr-xexamples/rbd-replay/run-rbd-replay-prep3
-rwxr-xr-xexamples/rbd-replay/trace11
-rw-r--r--examples/rgw-cache/nginx-default.conf136
-rw-r--r--examples/rgw-cache/nginx-lua-file.lua26
-rw-r--r--examples/rgw-cache/nginx-noprefetch.conf101
-rw-r--r--examples/rgw-cache/nginx-slicing.conf137
-rw-r--r--examples/rgw-cache/nginx.conf57
27 files changed, 2082 insertions, 0 deletions
diff --git a/examples/boto3/README.md b/examples/boto3/README.md
new file mode 100644
index 000000000..52f43746d
--- /dev/null
+++ b/examples/boto3/README.md
@@ -0,0 +1,98 @@
+# Introduction
+This directory contains examples on how to use AWS CLI/boto3 to exercise the RadosGW extensions to the S3 API.
+This is an extension to the [AWS SDK](https://github.com/boto/botocore/blob/develop/botocore/data/s3/2006-03-01/service-2.json).
+
+# Users
+For the standard client to support these extensions, the: ``service-2.sdk-extras.json`` file should be placed under: ``~/.aws/models/s3/2006-03-01/`` directory.
+For more information see [here](https://github.com/boto/botocore/blob/develop/botocore/loaders.py#L33).
+## Python
+The [boto3 client](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html) could be used with the extensions, code samples exists in this directory.
+## AWS CLI
+The standard [AWS CLI](https://docs.aws.amazon.com/cli/latest/) may also be used with these extensions. For example:
+- Unordered listing:
+```
+aws --endpoint-url http://localhost:8000 s3api list-objects --bucket=mybucket --allow-unordered
+```
+
+- Topic creation with endpoint:
+```
+aws --endpoint-url http://localhost:8000 sns create-topic --name=mytopic --attributes='{"push-endpoint": "amqp://localhost:5672", "amqp-exchange": "ex1", "amqp-ack-level": "broker"}'
+```
+Expected output:
+```
+{
+ "TopicArn": "arn:aws:sns:default::mytopic"
+}
+```
+
+- Get topic attributes:
+```
+aws --endpoint-url http://localhost:8000 sns get-topic-attributes --topic-arn="arn:aws:sns:default::mytopic"
+```
+Expected output:
+```
+{
+ "Attributes": {
+ "User": "",
+ "Name": "mytopic",
+ "EndPoint": "{\"EndpointAddress\":\"amqp://localhost:5672\",\"EndpointArgs\":\"Attributes.entry.1.key=push-endpoint&Attributes.entry.1.value=amqp://localhost:5672&Attributes.entry.2.key=amqp-exchange&Attributes.entry.2.value=ex1&Attributes.entry.3.key=amqp-ack-level&Attributes.entry.3.value=broker&Version=2010-03-31&amqp-ack-level=broker&amqp-exchange=ex1&push-endpoint=amqp://localhost:5672\",\"EndpointTopic\":\"mytopic\",\"HasStoredSecret\":\"false\",\"Persistent\":\"false\"}",
+ "TopicArn": "arn:aws:sns:default::mytopic",
+ "OpaqueData": ""
+ }
+}
+```
+
+- Bucket notifications with filtering extensions (bucket must exist before calling this command):
+```
+aws --region=default --endpoint-url http://localhost:8000 s3api put-bucket-notification-configuration --bucket=mybucket --notification-configuration='{"TopicConfigurations": [{"Id": "notif1", "TopicArn": "arn:aws:sns:default::mytopic", "Events": ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"], "Filter": {"Metadata": {"FilterRules": [{"Name": "x-amz-meta-foo", "Value": "bar"}, {"Name": "x-amz-meta-hello", "Value": "world"}]}, "Key": {"FilterRules": [{"Name": "regex", "Value": "([a-z]+)"}]}}}]}'
+```
+
+- Get configuration of a specific notification of a bucket:
+```
+aws --endpoint-url http://localhost:8000 s3api get-bucket-notification-configuration --bucket=mybucket --notification=notif1
+```
+Expected output:
+```
+{
+ "TopicConfigurations": [
+ {
+ "Id": "notif1",
+ "TopicArn": "arn:aws:sns:default::mytopic",
+ "Events": [
+ "s3:ObjectCreated:*",
+ "s3:ObjectRemoved:*"
+ ],
+ "Filter": {
+ "Key": {
+ "FilterRules": [
+ {
+ "Name": "regex",
+ "Value": "([a-z]+)"
+ }
+ ]
+ },
+ "Metadata": {
+ "FilterRules": [
+ {
+ "Name": "x-amz-meta-foo",
+ "Value": "bar"
+ },
+ {
+ "Name": "x-amz-meta-hello",
+ "Value": "world"
+ }
+ ]
+ }
+ }
+ }
+ ]
+}
+```
+
+# Developers
+Anyone developing an extension to the S3 API supported by AWS, please modify ``service-2.sdk-extras.json`` (all extensions should go into the same file), so that boto3 could be used to test the new API.
+In addition, python files with code samples should be added to this directory demonstrating use of the new API.
+When testing you changes please:
+- make sure that the modified file is in the boto3 path as explained above
+- make sure that the standard S3 tests suit is not broken, even with the extensions files in the path
+
diff --git a/examples/boto3/append_object.py b/examples/boto3/append_object.py
new file mode 100755
index 000000000..0e13252ec
--- /dev/null
+++ b/examples/boto3/append_object.py
@@ -0,0 +1,42 @@
+#!/usr/bin/python
+from __future__ import print_function
+
+import boto3
+import sys
+import json
+
+def js_print(arg):
+ print(json.dumps(arg, indent=2))
+
+if len(sys.argv) != 3:
+ print('Usage: ' + sys.argv[0] + ' <bucket> <key>')
+ sys.exit(1)
+
+# bucket name as first argument
+bucketname = sys.argv[1]
+keyname = sys.argv[2]
+# endpoint and keys from vstart
+endpoint = 'http://127.0.0.1:8000'
+access_key='0555b35654ad1656d804'
+secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+
+client = boto3.client('s3',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+
+print('deleting object first')
+js_print(client.delete_object(Bucket=bucketname, Key=keyname))
+print('appending at position 0')
+resp = client.put_object(Bucket=bucketname, Key=keyname,
+ Append=True,
+ AppendPosition=0,
+ Body='8letters')
+
+js_print(resp)
+append_pos = resp['AppendPosition']
+print('appending at position %d' % append_pos)
+js_print(client.put_object(Bucket=bucketname, Key=keyname,
+ Append=True,
+ AppendPosition=append_pos,
+ Body='8letters'))
diff --git a/examples/boto3/delete_notification.py b/examples/boto3/delete_notification.py
new file mode 100755
index 000000000..ca5958e52
--- /dev/null
+++ b/examples/boto3/delete_notification.py
@@ -0,0 +1,36 @@
+#!/usr/bin/python
+
+import boto3
+import sys
+
+if len(sys.argv) == 3:
+ # bucket name as first argument
+ bucketname = sys.argv[1]
+ # notification name as second argument
+ notification_name = sys.argv[2]
+elif len(sys.argv) == 2:
+ # bucket name as first argument
+ bucketname = sys.argv[1]
+ notification_name = ""
+else:
+ print('Usage: ' + sys.argv[0] + ' <bucket> [notification]')
+ sys.exit(1)
+
+# endpoint and keys from vstart
+endpoint = 'http://127.0.0.1:8000'
+access_key='0555b35654ad1656d804'
+secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+
+client = boto3.client('s3',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+
+# deleting a specific notification congifuration from a bucket (when NotificationId is provided) or
+# deleting all notification configurations on a bucket (without deleting the bucket itself) are extension to AWS S3 API
+
+if notification_name == "":
+ print(client.delete_bucket_notification_configuration(Bucket=bucketname))
+else:
+ print(client.delete_bucket_notification_configuration(Bucket=bucketname,
+ Notification=notification_name))
diff --git a/examples/boto3/get_notification.py b/examples/boto3/get_notification.py
new file mode 100755
index 000000000..490c018d4
--- /dev/null
+++ b/examples/boto3/get_notification.py
@@ -0,0 +1,28 @@
+#!/usr/bin/python
+
+import boto3
+import sys
+
+if len(sys.argv) != 3:
+ print('Usage: ' + sys.argv[0] + ' <bucket> <notification>')
+ sys.exit(1)
+
+# bucket name as first argument
+bucketname = sys.argv[1]
+ # notification name as second argument
+notification_name = sys.argv[2]
+
+# endpoint and keys from vstart
+endpoint = 'http://127.0.0.1:8000'
+access_key='0555b35654ad1656d804'
+secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+
+client = boto3.client('s3',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+
+# getting a specific notification configuration is an extension to AWS S3 API
+
+print(client.get_bucket_notification_configuration(Bucket=bucketname,
+ Notification=notification_name))
diff --git a/examples/boto3/get_usage_stats.py b/examples/boto3/get_usage_stats.py
new file mode 100755
index 000000000..0b7880d4f
--- /dev/null
+++ b/examples/boto3/get_usage_stats.py
@@ -0,0 +1,17 @@
+#!/usr/bin/python
+from __future__ import print_function
+
+import boto3
+import json
+
+# endpoint and keys from vstart
+endpoint = 'http://127.0.0.1:8000'
+access_key='0555b35654ad1656d804'
+secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+
+client = boto3.client('s3',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+
+print(json.dumps(client.get_usage_stats(), indent=2))
diff --git a/examples/boto3/list_unordered.py b/examples/boto3/list_unordered.py
new file mode 100755
index 000000000..2aa5a8e06
--- /dev/null
+++ b/examples/boto3/list_unordered.py
@@ -0,0 +1,25 @@
+#!/usr/bin/python
+
+import boto3
+import sys
+
+if len(sys.argv) != 2:
+ print('Usage: ' + sys.argv[0] + ' <bucket>')
+ sys.exit(1)
+
+# bucket name as first argument
+bucketname = sys.argv[1]
+
+# endpoint and keys from vstart
+endpoint = 'http://127.0.0.1:8000'
+access_key='0555b35654ad1656d804'
+secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+
+client = boto3.client('s3',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+
+# geting an unordered list of objets is an extension to AWS S3 API
+
+print(client.list_objects(Bucket=bucketname, AllowUnordered=True))
diff --git a/examples/boto3/notification_filters.py b/examples/boto3/notification_filters.py
new file mode 100755
index 000000000..2687c8b3a
--- /dev/null
+++ b/examples/boto3/notification_filters.py
@@ -0,0 +1,48 @@
+#!/usr/bin/python
+
+import boto3
+import sys
+
+if len(sys.argv) != 4:
+ print('Usage: ' + sys.argv[0] + ' <bucket> <topic ARN> <notification Id>')
+ sys.exit(1)
+
+# bucket name as first argument
+bucketname = sys.argv[1]
+# topic ARN as second argument
+topic_arn = sys.argv[2]
+# notification id as third argument
+notification_id = sys.argv[3]
+
+# endpoint and keys from vstart
+endpoint = 'http://127.0.0.1:8000'
+access_key='0555b35654ad1656d804'
+secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+
+client = boto3.client('s3',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+
+# regex filter on the object name and metadata based filtering are extension to AWS S3 API
+# bucket and topic should be created beforehand
+
+topic_conf_list = [{'Id': notification_id,
+ 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*'],
+ 'Filter': {
+ 'Metadata': {
+ 'FilterRules': [{'Name': 'x-amz-meta-foo', 'Value': 'bar'},
+ {'Name': 'x-amz-meta-hello', 'Value': 'world'}]
+ },
+ 'Tags': {
+ 'FilterRules': [{'Name': 'foo', 'Value': 'bar'},
+ {'Name': 'hello', 'Value': 'world'}]
+ },
+ 'Key': {
+ 'FilterRules': [{'Name': 'regex', 'Value': '([a-z]+)'}]
+ }
+ }}]
+
+print(client.put_bucket_notification_configuration(Bucket=bucketname,
+ NotificationConfiguration={'TopicConfigurations': topic_conf_list}))
diff --git a/examples/boto3/service-2.sdk-extras.json b/examples/boto3/service-2.sdk-extras.json
new file mode 100644
index 000000000..9ee66730e
--- /dev/null
+++ b/examples/boto3/service-2.sdk-extras.json
@@ -0,0 +1,215 @@
+{
+"version": 1.0,
+"merge": {
+ "operations":{
+ "DeleteBucketNotificationConfiguration":{
+ "name":"DeleteBucketNotificationConfiguration",
+ "http":{
+ "method":"DELETE",
+ "requestUri":"/{Bucket}?notification",
+ "responseCode":204
+ },
+ "input":{"shape":"DeleteBucketNotificationConfigurationRequest"},
+ "documentationUrl":"https://docs.ceph.com/docs/master/radosgw/s3/bucketops/#delete-notification",
+ "documentation":"<p>Deletes the notification configuration from the bucket.</p>"
+ },
+ "GetUsageStats":{
+ "name":"GetUsageStats",
+ "http":{
+ "method":"GET",
+ "requestUri":"/?usage",
+ "responseCode":200
+ },
+ "output": {"shape": "GetUsageStatsOutput"},
+ "documentationUrl":"https://docs.ceph.com/docs/master/radosgw/s3/serviceops#get-usage-stats",
+ "documentation":"<p>Get usage stats for the user</p>"
+ }
+ },
+ "shapes": {
+ "ListObjectsRequest": {
+ "members": {
+ "AllowUnordered": {
+ "shape":"AllowUnordered",
+ "documentation":"<p>Allow the listing results to be returned in unsorted order. This may be faster when listing very large buckets.</p>",
+ "location":"querystring",
+ "locationName":"allow-unordered"
+ }
+ }
+ },
+ "AllowUnordered":{"type":"boolean"},
+ "PutObjectRequest": {
+ "members": {
+ "AppendPosition": {
+ "shape":"AppendPosition",
+ "documentation": "<p>Position to allow appending</p>",
+ "location": "querystring",
+ "locationName": "position"
+ },
+ "Append": {
+ "shape":"Append",
+ "documentation":"<p>Append Object</p>",
+ "location": "querystring",
+ "locationName": "append"
+ }
+ }
+ },
+ "Append": {"type":"boolean"},
+ "AppendPosition":{"type":"integer"},
+ "PutObjectOutput": {
+ "members": {
+ "AppendPosition": {
+ "shape":"AppendPosition",
+ "documentation": "<p>Position to allow appending</p>",
+ "location": "header",
+ "locationName": "x-rgw-next-append-position",
+ "documentationUrl":"https://docs.ceph.com/docs/master/radosgw/s3/objectops/#append-object"
+ }
+ }
+ },
+ "GetBucketNotificationConfigurationRequest":{
+ "type":"structure",
+ "required":["Bucket"],
+ "members":{
+ "Bucket":{
+ "shape":"BucketName",
+ "documentation":"<p>Name of the bucket to get the notifications configuration for.</p>",
+ "location":"uri",
+ "locationName":"Bucket"
+ },
+ "Notification":{
+ "shape":"NotificationId",
+ "documentation":"<p>Id of the specific notification on the bucket for which the configuration should be retrieved.</p>",
+ "location":"querystring",
+ "locationName":"notification-id"
+ }
+ }
+ },
+ "DeleteBucketNotificationConfigurationRequest":{
+ "type":"structure",
+ "required":["Bucket"],
+ "members":{
+ "Bucket":{
+ "shape":"BucketName",
+ "documentation":"<p>Name of the bucket to delete the notifications configuration from.</p>",
+ "location":"uri",
+ "locationName":"Bucket"
+ },
+ "Notification":{
+ "shape":"NotificationId",
+ "documentation":"<p>Id of the specific notification on the bucket to be deleted.</p>",
+ "location":"querystring",
+ "locationName":"notification-id"
+ }
+ }
+ },
+ "FilterRule":{
+ "type":"structure",
+ "members":{
+ "Name":{
+ "shape":"FilterRuleName",
+ "documentation":"<p>The object key name prefix, suffix or regex identifying one or more objects to which the filtering rule applies. The maximum length is 1,024 characters. Overlapping prefixes and suffixes are supported.</p>"
+ },
+ "Value":{
+ "shape":"FilterRuleValue",
+ "documentation":"<p>The value that the filter searches for in object key names.</p>"
+ }
+ },
+ "documentation":"<p>Specifies the Amazon S3 object key name to filter on and whether to filter on the suffix, prefix or regex of the key name.</p>"
+ },
+ "FilterRuleName":{
+ "type":"string",
+ "enum":[
+ "prefix",
+ "suffix",
+ "regex"
+ ]
+ },
+ "NotificationConfigurationFilter":{
+ "type":"structure",
+ "members":{
+ "Key":{
+ "shape":"S3KeyFilter",
+ "documentation":"<p/>",
+ "locationName":"S3Key"
+ },
+ "Metadata":{
+ "shape":"S3MetadataFilter",
+ "documentation":"<p/>",
+ "locationName":"S3Metadata"
+ },
+ "Tags":{
+ "shape":"S3TagsFilter",
+ "documentation":"<p/>",
+ "locationName":"S3Tags"
+ }
+
+ },
+ "documentation":"<p>Specifies object key name filtering rules. For information about key name filtering, see <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html\">Configuring Event Notifications</a> in the <i>Amazon Simple Storage Service Developer Guide</i>.</p>"
+ },
+ "S3KeyFilter":{
+ "type":"structure",
+ "members":{
+ "FilterRules":{
+ "shape":"FilterRuleList",
+ "documentation":"<p/>",
+ "locationName":"FilterRule"
+ }
+ },
+ "documentation":"<p>A container for object key name prefix, suffix and regex filtering rules.</p>"
+ },
+ "S3MetadataFilter":{
+ "type":"structure",
+ "members":{
+ "FilterRules":{
+ "shape":"FilterRuleList",
+ "documentation":"<p/>",
+ "locationName":"FilterRule"
+ }
+ },
+ "documentation":"<p>A container for metadata filtering rules.</p>"
+ },
+ "S3TagsFilter":{
+ "type":"structure",
+ "members":{
+ "FilterRules":{
+ "shape":"FilterRuleList",
+ "documentation":"<p/>",
+ "locationName":"FilterRule"
+ }
+ },
+ "documentation":"<p>A container for object tags filtering rules.</p>"
+ },
+ "GetUsageStatsOutput": {
+ "type": "structure",
+ "members": {
+ "Summary": {
+ "shape":"UsageStatsSummary",
+ "documentation": "<p/>"
+ }
+ }
+ },
+ "UsageStatsSummary": {
+ "type": "structure",
+ "members": {
+ "QuotaMaxBytes":{"shape":"QuotaMaxBytes"},
+ "QuotaMaxBuckets":{"shape": "QuotaMaxBuckets"},
+ "QuotaMaxObjCount":{"shape":"QuotaMaxObjCount"},
+ "QuotaMaxBytesPerBucket":{"shape":"QuotaMaxBytesPerBucket"},
+ "QuotaMaxObjCountPerBucket":{"shape":"QuotaMaxObjCountPerBucket"},
+ "TotalBytes":{"shape":"TotalBytes"},
+ "TotalBytesRounded":{"shape":"TotalBytesRounded"},
+ "TotalEntries":{"shape":"TotalEntries"}
+ }
+ },
+ "QuotaMaxBytes":{"type":"integer"},
+ "QuotaMaxBuckets":{"type": "integer"},
+ "QuotaMaxObjCount":{"type":"integer"},
+ "QuotaMaxBytesPerBucket":{"type":"integer"},
+ "QuotaMaxObjCountPerBucket":{"type":"integer"},
+ "TotalBytesRounded":{"type":"integer"},
+ "TotalBytes":{"type":"integer"},
+ "TotalEntries":{"type":"integer"}
+ },
+ "documentation":"<p/>"
+}
+}
diff --git a/examples/boto3/topic_attributes.py b/examples/boto3/topic_attributes.py
new file mode 100644
index 000000000..3caeb1fec
--- /dev/null
+++ b/examples/boto3/topic_attributes.py
@@ -0,0 +1,46 @@
+import sys
+import urllib
+import hmac
+import hashlib
+import base64
+import xmltodict
+import http.client
+from urllib import parse as urlparse
+from time import gmtime, strftime
+
+if len(sys.argv) == 2:
+ # topic arn as first argument
+ topic_arn = sys.argv[1]
+else:
+ print ('Usage: ' + sys.argv[0] + ' <topic arn> [region name]')
+ sys.exit(1)
+
+# endpoint and keys from vstart
+endpoint = '127.0.0.1:8000'
+access_key='0555b35654ad1656d804'
+secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+
+
+parameters = {'Action': 'GetTopic', 'TopicArn': topic_arn}
+body = urlparse.urlencode(parameters)
+string_date = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
+content_type = 'application/x-www-form-urlencoded; charset=utf-8'
+resource = '/'
+method = 'POST'
+string_to_sign = method + '\n\n' + content_type + '\n' + string_date + '\n' + resource
+signature = base64.b64encode(hmac.new(secret_key.encode('utf-8'), string_to_sign.encode('utf-8'), hashlib.sha1).digest()).decode('ascii')
+headers = {'Authorization': 'AWS '+access_key+':'+signature,
+ 'Date': string_date,
+ 'Host': endpoint,
+ 'Content-Type': content_type}
+http_conn = http.client.HTTPConnection(endpoint)
+http_conn.request(method, resource, body, headers)
+response = http_conn.getresponse()
+data = response.read()
+status = response.status
+http_conn.close()
+dict_response = xmltodict.parse(data)
+
+# getting attributes of a specific topic is an extension to AWS sns
+
+print(dict_response, status)
diff --git a/examples/boto3/topic_with_endpoint.py b/examples/boto3/topic_with_endpoint.py
new file mode 100755
index 000000000..3137cee7d
--- /dev/null
+++ b/examples/boto3/topic_with_endpoint.py
@@ -0,0 +1,41 @@
+#!/usr/bin/python
+
+import boto3
+import sys
+import urlparse
+from botocore.client import Config
+
+if len(sys.argv) == 3:
+ # topic name as first argument
+ topic_name = sys.argv[1]
+ # region name as second argument
+ region_name = sys.argv[2]
+elif len(sys.argv) == 2:
+ # topic name as first argument
+ topic_name = sys.argv[1]
+ region_name = ""
+else:
+ print('Usage: ' + sys.argv[0] + ' <topic name> [region name]')
+ sys.exit(1)
+
+# endpoint and keys from vstart
+endpoint = 'http://127.0.0.1:8000'
+access_key='0555b35654ad1656d804'
+secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+
+client = boto3.client('sns',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ region_name=region_name,
+ aws_secret_access_key=secret_key,
+ config=Config(signature_version='s3'))
+
+# to see the list of available "regions" use:
+# radosgw-admin realm zonegroup list
+
+# this is standard AWS services call, using custom attributes to add AMQP endpoint information to the topic
+
+endpoint_args = 'push-endpoint=amqp://127.0.0.1:5672&amqp-exchange=ex1&amqp-ack-level=broker'
+attributes = {nvp[0] : nvp[1] for nvp in urlparse.parse_qsl(endpoint_args, keep_blank_values=True)}
+
+print(client.create_topic(Name=topic_name, Attributes=attributes))
diff --git a/examples/librados/Makefile b/examples/librados/Makefile
new file mode 100644
index 000000000..fd61cb998
--- /dev/null
+++ b/examples/librados/Makefile
@@ -0,0 +1,39 @@
+
+CXX?=g++
+CXX_FLAGS?=-std=c++11 -Wno-unused-parameter -Wall -Wextra -Werror -g
+CXX_LIBS?=-lrados -lradosstriper
+CXX_INC?=$(LOCAL_LIBRADOS_INC)
+CXX_CC=$(CXX) $(CXX_FLAGS) $(CXX_INC) $(LOCAL_LIBRADOS)
+
+CC?=gcc
+CC_FLAGS=-Wno-unused-parameter -Wall -Wextra -Werror -g
+CC_INC=$(LOCAL_LIBRADOS_INC)
+CC_LIBS?=-lrados
+CC_CC=$(CC) $(CC_FLAGS) $(CC_INC) $(LOCAL_LIBRADOS)
+
+# Relative path to the Ceph source:
+CEPH_SRC_HOME?=../../src
+CEPH_BLD_HOME?=../../build
+
+LOCAL_LIBRADOS?=-L$(CEPH_BLD_HOME)/lib/ -Wl,-rpath,$(CEPH_BLD_HOME)/lib
+LOCAL_LIBRADOS_INC?=-I$(CEPH_SRC_HOME)/include
+
+all: hello_world_cpp hello_radosstriper_cpp hello_world_c
+
+# Build against the system librados instead of the one in the build tree:
+all-system: LOCAL_LIBRADOS=
+all-system: LOCAL_LIBRADOS_INC=
+all-system: all
+
+hello_world_cpp: hello_world.cc
+ $(CXX_CC) -o hello_world_cpp hello_world.cc $(CXX_LIBS)
+
+hello_radosstriper_cpp: hello_radosstriper.cc
+ $(CXX_CC) -o hello_radosstriper_cpp hello_radosstriper.cc $(CXX_LIBS)
+
+hello_world_c: hello_world_c.c
+ $(CC_CC) -o hello_world_c hello_world_c.c $(CC_LIBS)
+
+clean:
+ rm -f hello_world_cpp hello_radosstriper_cpp hello_world_c
+
diff --git a/examples/librados/hello_radosstriper.cc b/examples/librados/hello_radosstriper.cc
new file mode 100644
index 000000000..f1b43d8fc
--- /dev/null
+++ b/examples/librados/hello_radosstriper.cc
@@ -0,0 +1,102 @@
+#include "rados/librados.hpp"
+#include "radosstriper/libradosstriper.hpp"
+#include <iostream>
+#include <string>
+
+
+int main(int argc, char* argv[])
+{
+ if(argc != 6)
+ {
+ std::cout <<"Please put in correct params\n"<<
+ "Stripe Count:\n"<<
+ "Object Size:\n" <<
+ "File Name:\n" <<
+ "Object Name:\n"
+ "Pool Name:"<< std::endl;
+ return EXIT_FAILURE;
+ }
+ uint32_t strip_count = std::stoi(argv[1]);
+ uint32_t obj_size = std::stoi(argv[2]);
+ std::string fname = argv[3];
+ std::string obj_name = argv[4];
+ std::string pool_name = argv[5];
+ int ret = 0;
+ librados::IoCtx io_ctx;
+ librados::Rados cluster;
+ libradosstriper::RadosStriper* rs = new libradosstriper::RadosStriper;
+
+ // make sure the keyring file is in /etc/ceph/ and is world readable
+ ret = cluster.init2("client.admin","ceph",0);
+ if( ret < 0)
+ {
+ std::cerr << "Couldn't init cluster "<< ret << std::endl;
+ }
+
+ // make sure ceph.conf is in /etc/ceph/ and is world readable
+ ret = cluster.conf_read_file("ceph.conf");
+ if( ret < 0)
+ {
+ std::cerr << "Couldn't read conf file "<< ret << std::endl;
+ }
+ ret = cluster.connect();
+ if(ret < 0)
+ {
+ std::cerr << "Couldn't connect to cluster "<< ret << std::endl;
+ }
+ else
+ {
+ std::cout << "Connected to Cluster"<< std::endl;
+ }
+
+ ret = cluster.ioctx_create(pool_name.c_str(), io_ctx);
+
+ if(ret < 0)
+ {
+ std::cerr << "Couldn't Create IO_CTX"<< ret << std::endl;
+ }
+ ret = libradosstriper::RadosStriper::striper_create(io_ctx,rs);
+ if(ret < 0)
+ {
+ std::cerr << "Couldn't Create RadosStriper"<< ret << std::endl;
+ delete rs;
+ }
+ uint64_t alignment = 0;
+ ret = io_ctx.pool_required_alignment2(&alignment);
+ if(ret < 0)
+ {
+ std::cerr << "IO_CTX didn't give alignment "<< ret
+ << "\n Is this an erasure coded pool? "<< std::endl;
+
+ delete rs;
+ io_ctx.close();
+ cluster.shutdown();
+ return EXIT_FAILURE;
+ }
+ std::cout << "Pool alignment: "<< alignment << std::endl;
+ rs->set_object_layout_stripe_unit(alignment);
+ // how many objects are we striping across?
+ rs->set_object_layout_stripe_count(strip_count);
+ // how big should each object be?
+ rs->set_object_layout_object_size(obj_size);
+
+ std::string err = "no_err";
+ librados::bufferlist bl;
+ bl.read_file(fname.c_str(),&err);
+ if(err != "no_err")
+ {
+ std::cout << "Error reading file into bufferlist: "<< err << std::endl;
+ delete rs;
+ io_ctx.close();
+ cluster.shutdown();
+ return EXIT_FAILURE;
+ }
+
+ std::cout << "Writing: " << fname << "\nas: "<< obj_name << std::endl;
+ rs->write_full(obj_name,bl);
+ std::cout << "done with: " << fname << std::endl;
+
+ delete rs;
+ io_ctx.close();
+ cluster.shutdown();
+}
diff --git a/examples/librados/hello_world.cc b/examples/librados/hello_world.cc
new file mode 100644
index 000000000..759e26966
--- /dev/null
+++ b/examples/librados/hello_world.cc
@@ -0,0 +1,289 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+/*
+ * Ceph - scalable distributed file system
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ * Copyright 2013 Inktank
+ */
+
+// install the librados-dev package to get this
+#include <rados/librados.hpp>
+#include <iostream>
+#include <string>
+
+int main(int argc, const char **argv)
+{
+ int ret = 0;
+
+ // we will use all of these below
+ const char *pool_name = "hello_world_pool";
+ std::string hello("hello world!");
+ std::string object_name("hello_object");
+ librados::IoCtx io_ctx;
+
+ // first, we create a Rados object and initialize it
+ librados::Rados rados;
+ {
+ ret = rados.init("admin"); // just use the client.admin keyring
+ if (ret < 0) { // let's handle any error that might have come back
+ std::cerr << "couldn't initialize rados! error " << ret << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ std::cout << "we just set up a rados cluster object" << std::endl;
+ }
+
+ /*
+ * Now we need to get the rados object its config info. It can
+ * parse argv for us to find the id, monitors, etc, so let's just
+ * use that.
+ */
+ {
+ ret = rados.conf_parse_argv(argc, argv);
+ if (ret < 0) {
+ // This really can't happen, but we need to check to be a good citizen.
+ std::cerr << "failed to parse config options! error " << ret << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+
+ std::cout << "we just parsed our config options" << std::endl;
+ // We also want to apply the config file if the user specified
+ // one, and conf_parse_argv won't do that for us.
+ for (int i = 0; i < argc; ++i) {
+ if ((strcmp(argv[i], "-c") == 0) || (strcmp(argv[i], "--conf") == 0)) {
+ ret = rados.conf_read_file(argv[i+1]);
+ if (ret < 0) {
+ // This could fail if the config file is malformed, but it'd be hard.
+ std::cerr << "failed to parse config file " << argv[i+1]
+ << "! error" << ret << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ break;
+ }
+ }
+ }
+
+ /*
+ * next, we actually connect to the cluster
+ */
+ {
+ ret = rados.connect();
+ if (ret < 0) {
+ std::cerr << "couldn't connect to cluster! error " << ret << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ std::cout << "we just connected to the rados cluster" << std::endl;
+ }
+
+ /*
+ * let's create our own pool instead of scribbling over real data.
+ * Note that this command creates pools with default PG counts specified
+ * by the monitors, which may not be appropriate for real use -- it's fine
+ * for testing, though.
+ */
+ {
+ ret = rados.pool_create(pool_name);
+ if (ret < 0) {
+ std::cerr << "couldn't create pool! error " << ret << std::endl;
+ return EXIT_FAILURE;
+ }
+ std::cout << "we just created a new pool named " << pool_name << std::endl;
+ }
+
+ /*
+ * create an "IoCtx" which is used to do IO to a pool
+ */
+ {
+ ret = rados.ioctx_create(pool_name, io_ctx);
+ if (ret < 0) {
+ std::cerr << "couldn't set up ioctx! error " << ret << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ std::cout << "we just created an ioctx for our pool" << std::endl;
+ }
+
+ /*
+ * now let's do some IO to the pool! We'll write "hello world!" to a
+ * new object.
+ */
+ {
+ /*
+ * "bufferlist"s are Ceph's native transfer type, and are carefully
+ * designed to be efficient about copying. You can fill them
+ * up from a lot of different data types, but strings or c strings
+ * are often convenient. Just make sure not to deallocate the memory
+ * until the bufferlist goes out of scope and any requests using it
+ * have been finished!
+ */
+ librados::bufferlist bl;
+ bl.append(hello);
+
+ /*
+ * now that we have the data to write, let's send it to an object.
+ * We'll use the synchronous interface for simplicity.
+ */
+ ret = io_ctx.write_full(object_name, bl);
+ if (ret < 0) {
+ std::cerr << "couldn't write object! error " << ret << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ std::cout << "we just wrote new object " << object_name
+ << ", with contents\n" << hello << std::endl;
+ }
+
+ /*
+ * now let's read that object back! Just for fun, we'll do it using
+ * async IO instead of synchronous. (This would be more useful if we
+ * wanted to send off multiple reads at once; see
+ * http://docs.ceph.com/docs/master/rados/api/librados/#asychronous-io )
+ */
+ {
+ librados::bufferlist read_buf;
+ int read_len = 4194304; // this is way more than we need
+ // allocate the completion from librados
+ librados::AioCompletion *read_completion = librados::Rados::aio_create_completion();
+ // send off the request.
+ ret = io_ctx.aio_read(object_name, read_completion, &read_buf, read_len, 0);
+ if (ret < 0) {
+ std::cerr << "couldn't start read object! error " << ret << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ // wait for the request to complete, and check that it succeeded.
+ read_completion->wait_for_complete();
+ ret = read_completion->get_return_value();
+ if (ret < 0) {
+ std::cerr << "couldn't read object! error " << ret << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ std::cout << "we read our object " << object_name
+ << ", and got back " << ret << " bytes with contents\n";
+ std::string read_string;
+ read_buf.begin().copy(ret, read_string);
+ std::cout << read_string << std::endl;
+ }
+
+ /*
+ * We can also use xattrs that go alongside the object.
+ */
+ {
+ librados::bufferlist version_bl;
+ version_bl.append('1');
+ ret = io_ctx.setxattr(object_name, "version", version_bl);
+ if (ret < 0) {
+ std::cerr << "failed to set xattr version entry! error "
+ << ret << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ std::cout << "we set the xattr 'version' on our object!" << std::endl;
+ }
+
+ /*
+ * And if we want to be really cool, we can do multiple things in a single
+ * atomic operation. For instance, we can update the contents of our object
+ * and set the version at the same time.
+ */
+ {
+ librados::bufferlist bl;
+ bl.append(hello);
+ bl.append("v2");
+ librados::ObjectWriteOperation write_op;
+ write_op.write_full(bl);
+ librados::bufferlist version_bl;
+ version_bl.append('2');
+ write_op.setxattr("version", version_bl);
+ ret = io_ctx.operate(object_name, &write_op);
+ if (ret < 0) {
+ std::cerr << "failed to do compound write! error " << ret << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ std::cout << "we overwrote our object " << object_name
+ << " with contents\n" << bl.c_str() << std::endl;
+ }
+
+ /*
+ * And to be even cooler, we can make sure that the object looks the
+ * way we expect before doing the write! Notice how this attempt fails
+ * because the xattr differs.
+ */
+ {
+ librados::ObjectWriteOperation failed_write_op;
+ librados::bufferlist bl;
+ bl.append(hello);
+ bl.append("v2");
+ librados::ObjectWriteOperation write_op;
+ write_op.write_full(bl);
+ librados::bufferlist version_bl;
+ version_bl.append('2');
+ librados::bufferlist old_version_bl;
+ old_version_bl.append('1');
+ failed_write_op.cmpxattr("version", LIBRADOS_CMPXATTR_OP_EQ, old_version_bl);
+ failed_write_op.write_full(bl);
+ failed_write_op.setxattr("version", version_bl);
+ ret = io_ctx.operate(object_name, &failed_write_op);
+ if (ret < 0) {
+ std::cout << "we just failed a write because the xattr wasn't as specified"
+ << std::endl;
+ } else {
+ std::cerr << "we succeeded on writing despite an xattr comparison mismatch!"
+ << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+
+ /*
+ * Now let's do the update with the correct xattr values so it
+ * actually goes through
+ */
+ bl.clear();
+ bl.append(hello);
+ bl.append("v3");
+ old_version_bl.clear();
+ old_version_bl.append('2');
+ version_bl.clear();
+ version_bl.append('3');
+ librados::ObjectWriteOperation update_op;
+ update_op.cmpxattr("version", LIBRADOS_CMPXATTR_OP_EQ, old_version_bl);
+ update_op.write_full(bl);
+ update_op.setxattr("version", version_bl);
+ ret = io_ctx.operate(object_name, &update_op);
+ if (ret < 0) {
+ std::cerr << "failed to do a compound write update! error " << ret
+ << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ std::cout << "we overwrote our object " << object_name
+ << " following an xattr test with contents\n" << bl.c_str()
+ << std::endl;
+ }
+
+ ret = EXIT_SUCCESS;
+ out:
+ /*
+ * And now we're done, so let's remove our pool and then
+ * shut down the connection gracefully.
+ */
+ int delete_ret = rados.pool_delete(pool_name);
+ if (delete_ret < 0) {
+ // be careful not to
+ std::cerr << "We failed to delete our test pool!" << std::endl;
+ ret = EXIT_FAILURE;
+ }
+
+ rados.shutdown();
+
+ return ret;
+}
diff --git a/examples/librados/hello_world.readme b/examples/librados/hello_world.readme
new file mode 100644
index 000000000..afa1cb32e
--- /dev/null
+++ b/examples/librados/hello_world.readme
@@ -0,0 +1,14 @@
+This simple librados program can be built by running "make" (and cleaned up
+with "make clean"), assuming you have librados-dev already installed.
+
+By default, the makefile will build against the librados headers and library in your
+build tree (ie. using relative paths). If you would like to build the examples against
+your system librados and headers, use "make all-system".
+
+And executed using
+./hello_world_cpp -c ../../src/ceph.conf
+(or whatever path to a ceph.conf is appropriate to you, or
+by explicitly specifying monitors, user id, and keys).
+
+It demonstrates using librados in a non-Ceph project and the code should
+be self-explanatory.
diff --git a/examples/librados/hello_world_c.c b/examples/librados/hello_world_c.c
new file mode 100644
index 000000000..2f91828de
--- /dev/null
+++ b/examples/librados/hello_world_c.c
@@ -0,0 +1,304 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+/*
+ * Ceph - scalable distributed file system
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ * Copyright 2013 Inktank
+ */
+
+// install the librados-dev package to get this
+#include <rados/librados.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+int main(int argc, const char **argv)
+{
+ int ret = 0;
+
+ // we will use all of these below
+ const char *pool_name = "hello_world_pool";
+ const char* hello = "hello world!";
+ const char* object_name = "hello_object";
+ rados_ioctx_t io_ctx = NULL;
+ int pool_created = 0;
+
+ // first, we create a Rados object and initialize it
+ rados_t rados = NULL;
+ {
+ ret = rados_create(&rados, "admin"); // just use the client.admin keyring
+ if (ret < 0) { // let's handle any error that might have come back
+ printf("couldn't initialize rados! error %d\n", ret);
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ printf("we just set up a rados cluster object\n");
+ }
+
+ /*
+ * Now we need to get the rados object its config info. It can
+ * parse argv for us to find the id, monitors, etc, so let's just
+ * use that.
+ */
+ {
+ ret = rados_conf_parse_argv(rados, argc, argv);
+ if (ret < 0) {
+ // This really can't happen, but we need to check to be a good citizen.
+ printf("failed to parse config options! error %d\n", ret);
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+
+ printf("we just parsed our config options\n");
+ // We also want to apply the config file if the user specified
+ // one, and conf_parse_argv won't do that for us.
+ int i;
+ for (i = 0; i < argc; ++i) {
+ if ((strcmp(argv[i], "-c") == 0) || (strcmp(argv[i], "--conf") == 0)) {
+ ret = rados_conf_read_file(rados, argv[i+1]);
+ if (ret < 0) {
+ // This could fail if the config file is malformed, but it'd be hard.
+ printf("failed to parse config file %s! error %d\n", argv[i+1], ret);
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ break;
+ }
+ }
+ }
+
+ /*
+ * next, we actually connect to the cluster
+ */
+ {
+ ret = rados_connect(rados);
+ if (ret < 0) {
+ printf("couldn't connect to cluster! error %d\n", ret);
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ printf("we just connected to the rados cluster\n");
+ }
+
+ /*
+ * let's create our own pool instead of scribbling over real data.
+ * Note that this command creates pools with default PG counts specified
+ * by the monitors, which may not be appropriate for real use -- it's fine
+ * for testing, though.
+ */
+ {
+ ret = rados_pool_create(rados, pool_name);
+ if (ret < 0) {
+ printf("couldn't create pool! error %d\n", ret);
+ return EXIT_FAILURE;
+ }
+ printf("we just created a new pool named %s\n", pool_name);
+ pool_created = 1;
+ }
+
+ /*
+ * create an "IoCtx" which is used to do IO to a pool
+ */
+ {
+ ret = rados_ioctx_create(rados, pool_name, &io_ctx);
+ if (ret < 0) {
+ printf("couldn't set up ioctx! error %d\n", ret);
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ printf("we just created an ioctx for our pool\n");
+ }
+
+ /*
+ * now let's do some IO to the pool! We'll write "hello world!" to a
+ * new object.
+ */
+ {
+ /*
+ * now that we have the data to write, let's send it to an object.
+ * We'll use the synchronous interface for simplicity.
+ */
+ ret = rados_write_full(io_ctx, object_name, hello, strlen(hello));
+ if (ret < 0) {
+ printf("couldn't write object! error %d\n", ret);
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ printf("we just wrote new object %s, with contents '%s'\n", object_name, hello);
+ }
+
+ /*
+ * now let's read that object back! Just for fun, we'll do it using
+ * async IO instead of synchronous. (This would be more useful if we
+ * wanted to send off multiple reads at once; see
+ * http://docs.ceph.com/docs/master/rados/api/librados/#asychronous-io )
+ */
+ {
+ int read_len = 4194304; // this is way more than we need
+ char* read_buf = malloc(read_len + 1); // add one for the terminating 0 we'll add later
+ if (!read_buf) {
+ printf("couldn't allocate read buffer\n");
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ // allocate the completion from librados
+ rados_completion_t read_completion;
+ ret = rados_aio_create_completion2(NULL, NULL, &read_completion);
+ if (ret < 0) {
+ printf("couldn't create completion! error %d\n", ret);
+ ret = EXIT_FAILURE;
+ free(read_buf);
+ goto out;
+ }
+ printf("we just created a new completion\n");
+
+ // send off the request.
+ ret = rados_aio_read(io_ctx, object_name, read_completion, read_buf, read_len, 0);
+ if (ret < 0) {
+ printf("couldn't start read object! error %d\n", ret);
+ ret = EXIT_FAILURE;
+ free(read_buf);
+ rados_aio_release(read_completion);
+ goto out;
+ }
+ // wait for the request to complete, and check that it succeeded.
+ rados_aio_wait_for_complete(read_completion);
+ ret = rados_aio_get_return_value(read_completion);
+ if (ret < 0) {
+ printf("couldn't read object! error %d\n", ret);
+ ret = EXIT_FAILURE;
+ free(read_buf);
+ rados_aio_release(read_completion);
+ goto out;
+ }
+ read_buf[ret] = 0; // null-terminate the string
+ printf("we read our object %s, and got back %d bytes with contents\n%s\n", object_name, ret, read_buf);
+
+ free(read_buf);
+ rados_aio_release(read_completion);
+ }
+
+ /*
+ * We can also use xattrs that go alongside the object.
+ */
+ {
+ const char* version = "1";
+ ret = rados_setxattr(io_ctx, object_name, "version", version, strlen(version));
+ if (ret < 0) {
+ printf("failed to set xattr version entry! error %d\n", ret);
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ printf("we set the xattr 'version' on our object!\n");
+ }
+
+ /*
+ * And if we want to be really cool, we can do multiple things in a single
+ * atomic operation. For instance, we can update the contents of our object
+ * and set the version at the same time.
+ */
+ {
+ const char* content = "v2";
+ rados_write_op_t write_op = rados_create_write_op();
+ if (!write_op) {
+ printf("failed to allocate write op\n");
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ rados_write_op_write_full(write_op, content, strlen(content));
+ const char* version = "2";
+ rados_write_op_setxattr(write_op, "version", version, strlen(version));
+ ret = rados_write_op_operate(write_op, io_ctx, object_name, NULL, 0);
+ if (ret < 0) {
+ printf("failed to do compound write! error %d\n", ret);
+ ret = EXIT_FAILURE;
+ rados_release_write_op(write_op);
+ goto out;
+ }
+ printf("we overwrote our object %s with contents\n%s\n", object_name, content);
+ rados_release_write_op(write_op);
+ }
+
+ /*
+ * And to be even cooler, we can make sure that the object looks the
+ * way we expect before doing the write! Notice how this attempt fails
+ * because the xattr differs.
+ */
+ {
+ rados_write_op_t failed_write_op = rados_create_write_op();
+ if (!failed_write_op) {
+ printf("failed to allocate write op\n");
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ const char* content = "v2";
+ const char* version = "2";
+ const char* old_version = "1";
+ rados_write_op_cmpxattr(failed_write_op, "version", LIBRADOS_CMPXATTR_OP_EQ, old_version, strlen(old_version));
+ rados_write_op_write_full(failed_write_op, content, strlen(content));
+ rados_write_op_setxattr(failed_write_op, "version", version, strlen(version));
+ ret = rados_write_op_operate(failed_write_op, io_ctx, object_name, NULL, 0);
+ if (ret < 0) {
+ printf("we just failed a write because the xattr wasn't as specified\n");
+ } else {
+ printf("we succeeded on writing despite an xattr comparison mismatch!\n");
+ ret = EXIT_FAILURE;
+ rados_release_write_op(failed_write_op);
+ goto out;
+ }
+ rados_release_write_op(failed_write_op);
+
+ /*
+ * Now let's do the update with the correct xattr values so it
+ * actually goes through
+ */
+ content = "v3";
+ old_version = "2";
+ version = "3";
+ rados_write_op_t update_op = rados_create_write_op();
+ if (!failed_write_op) {
+ printf("failed to allocate write op\n");
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ rados_write_op_cmpxattr(update_op, "version", LIBRADOS_CMPXATTR_OP_EQ, old_version, strlen(old_version));
+ rados_write_op_write_full(update_op, content, strlen(content));
+ rados_write_op_setxattr(update_op, "version", version, strlen(version));
+ ret = rados_write_op_operate(update_op, io_ctx, object_name, NULL, 0);
+ if (ret < 0) {
+ printf("failed to do a compound write update! error %d\n", ret);
+ ret = EXIT_FAILURE;
+ rados_release_write_op(update_op);
+ goto out;
+ }
+ printf("we overwrote our object %s following an xattr test with contents\n%s\n", object_name, content);
+ rados_release_write_op(update_op);
+ }
+
+ ret = EXIT_SUCCESS;
+
+ out:
+ if (io_ctx) {
+ rados_ioctx_destroy(io_ctx);
+ }
+
+ if (pool_created) {
+ /*
+ * And now we're done, so let's remove our pool and then
+ * shut down the connection gracefully.
+ */
+ int delete_ret = rados_pool_delete(rados, pool_name);
+ if (delete_ret < 0) {
+ // be careful not to
+ printf("We failed to delete our test pool!\n");
+ ret = EXIT_FAILURE;
+ }
+ }
+
+ rados_shutdown(rados);
+
+ return ret;
+}
diff --git a/examples/librbd/Makefile b/examples/librbd/Makefile
new file mode 100644
index 000000000..2cd9f269a
--- /dev/null
+++ b/examples/librbd/Makefile
@@ -0,0 +1,27 @@
+
+CXX?=g++
+CXX_FLAGS?=-std=c++11 -Wno-unused-parameter -Wall -Wextra -Werror -g
+CXX_LIBS?=-lboost_system -lrbd -lrados
+CXX_INC?=$(LOCAL_LIBRADOS_INC)
+CXX_CC=$(CXX) $(CXX_FLAGS) $(CXX_INC) $(LOCAL_LIBRADOS)
+
+# Relative path to the Ceph source:
+CEPH_SRC_HOME?=../../src
+CEPH_BLD_HOME?=../../build
+
+LOCAL_LIBRADOS?=-L$(CEPH_BLD_HOME)/lib/ -Wl,-rpath,$(CEPH_BLD_HOME)/lib
+LOCAL_LIBRADOS_INC?=-I$(CEPH_SRC_HOME)/include
+
+all: hello_world_cpp
+
+# Build against the system librados instead of the one in the build tree:
+all-system: LOCAL_LIBRADOS=
+all-system: LOCAL_LIBRADOS_INC=
+all-system: all
+
+hello_world_cpp: hello_world.cc
+ $(CXX_CC) -o hello_world_cpp hello_world.cc $(CXX_LIBS)
+
+clean:
+ rm -f hello_world_cpp
+
diff --git a/examples/librbd/hello_world.cc b/examples/librbd/hello_world.cc
new file mode 100644
index 000000000..f21c45f20
--- /dev/null
+++ b/examples/librbd/hello_world.cc
@@ -0,0 +1,220 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+/*
+ * Ceph - scalable distributed file system
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ */
+
+// install the librados-dev and librbd package to get this
+#include <rados/librados.hpp>
+#include <rbd/librbd.hpp>
+#include <iostream>
+#include <string>
+#include <sstream>
+
+int main(int argc, const char **argv)
+{
+ int ret = 0;
+
+ // we will use all of these below
+ const char *pool_name = "hello_world_pool";
+ std::string hello("hello world!");
+ std::string object_name("hello_object");
+ librados::IoCtx io_ctx;
+
+ // first, we create a Rados object and initialize it
+ librados::Rados rados;
+ {
+ ret = rados.init("admin"); // just use the client.admin keyring
+ if (ret < 0) { // let's handle any error that might have come back
+ std::cerr << "couldn't initialize rados! error " << ret << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ } else {
+ std::cout << "we just set up a rados cluster object" << std::endl;
+ }
+ }
+
+ /*
+ * Now we need to get the rados object its config info. It can
+ * parse argv for us to find the id, monitors, etc, so let's just
+ * use that.
+ */
+ {
+ ret = rados.conf_parse_argv(argc, argv);
+ if (ret < 0) {
+ // This really can't happen, but we need to check to be a good citizen.
+ std::cerr << "failed to parse config options! error " << ret << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ } else {
+ std::cout << "we just parsed our config options" << std::endl;
+ // We also want to apply the config file if the user specified
+ // one, and conf_parse_argv won't do that for us.
+ for (int i = 0; i < argc; ++i) {
+ if ((strcmp(argv[i], "-c") == 0) || (strcmp(argv[i], "--conf") == 0)) {
+ ret = rados.conf_read_file(argv[i+1]);
+ if (ret < 0) {
+ // This could fail if the config file is malformed, but it'd be hard.
+ std::cerr << "failed to parse config file " << argv[i+1]
+ << "! error" << ret << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ /*
+ * next, we actually connect to the cluster
+ */
+ {
+ ret = rados.connect();
+ if (ret < 0) {
+ std::cerr << "couldn't connect to cluster! error " << ret << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ } else {
+ std::cout << "we just connected to the rados cluster" << std::endl;
+ }
+ }
+
+ /*
+ * let's create our own pool instead of scribbling over real data.
+ * Note that this command creates pools with default PG counts specified
+ * by the monitors, which may not be appropriate for real use -- it's fine
+ * for testing, though.
+ */
+ {
+ ret = rados.pool_create(pool_name);
+ if (ret < 0) {
+ std::cerr << "couldn't create pool! error " << ret << std::endl;
+ return EXIT_FAILURE;
+ } else {
+ std::cout << "we just created a new pool named " << pool_name << std::endl;
+ }
+ }
+
+ /*
+ * create an "IoCtx" which is used to do IO to a pool
+ */
+ {
+ ret = rados.ioctx_create(pool_name, io_ctx);
+ if (ret < 0) {
+ std::cerr << "couldn't set up ioctx! error " << ret << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ } else {
+ std::cout << "we just created an ioctx for our pool" << std::endl;
+ }
+ }
+
+ /*
+ * create an rbd image and write data to it
+ */
+ {
+ std::string name = "librbd_test";
+ uint64_t size = 2 << 20;
+ int order = 0;
+ librbd::RBD rbd;
+ librbd::Image image;
+
+ ret = rbd.create(io_ctx, name.c_str(), size, &order);
+ if (ret < 0) {
+ std::cerr << "couldn't create an rbd image! error " << ret << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ } else {
+ std::cout << "we just created an rbd image" << std::endl;
+ }
+
+ ret = rbd.open(io_ctx, image, name.c_str(), NULL);
+ if (ret < 0) {
+ std::cerr << "couldn't open the rbd image! error " << ret << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ } else {
+ std::cout << "we just opened the rbd image" << std::endl;
+ }
+
+ int TEST_IO_SIZE = 512;
+ char test_data[TEST_IO_SIZE + 1];
+ int i;
+
+ for (i = 0; i < TEST_IO_SIZE; ++i) {
+ test_data[i] = (char) (rand() % (126 - 33) + 33);
+ }
+ test_data[TEST_IO_SIZE] = '\0';
+
+ size_t len = strlen(test_data);
+ ceph::bufferlist bl;
+ bl.append(test_data, len);
+
+ ret = image.write(0, len, bl);
+ if (ret < 0) {
+ std::cerr << "couldn't write to the rbd image! error " << ret << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ } else {
+ std::cout << "we just wrote data to our rbd image " << std::endl;
+ }
+
+ /*
+ * let's read the image and compare it to the data we wrote
+ */
+ ceph::bufferlist bl_r;
+ int read;
+ read = image.read(0, TEST_IO_SIZE, bl_r);
+ if (read < 0) {
+ std::cerr << "we couldn't read data from the image! error" << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+
+ std::string bl_res(bl_r.c_str(), read);
+
+ int res = memcmp(bl_res.c_str(), test_data, TEST_IO_SIZE);
+ if (res != 0) {
+ std::cerr << "what we read didn't match expected! error" << std::endl;
+ } else {
+ std::cout << "we read our data on the image successfully" << std::endl;
+ }
+
+ image.close();
+
+ /*
+ *let's now delete the image
+ */
+ ret = rbd.remove(io_ctx, name.c_str());
+ if (ret < 0) {
+ std::cerr << "failed to delete rbd image! error " << ret << std::endl;
+ ret = EXIT_FAILURE;
+ goto out;
+ } else {
+ std::cout << "we just deleted our rbd image " << std::endl;
+ }
+ }
+
+ ret = EXIT_SUCCESS;
+ out:
+ /*
+ * And now we're done, so let's remove our pool and then
+ * shut down the connection gracefully.
+ */
+ int delete_ret = rados.pool_delete(pool_name);
+ if (delete_ret < 0) {
+ // be careful not to
+ std::cerr << "We failed to delete our test pool!" << std::endl;
+ ret = EXIT_FAILURE;
+ }
+
+ rados.shutdown();
+
+ return ret;
+}
diff --git a/examples/rbd-replay/.gitignore b/examples/rbd-replay/.gitignore
new file mode 100644
index 000000000..f9e70539c
--- /dev/null
+++ b/examples/rbd-replay/.gitignore
@@ -0,0 +1,3 @@
+/*.log
+/replayer
+/traces
diff --git a/examples/rbd-replay/create-image b/examples/rbd-replay/create-image
new file mode 100755
index 000000000..878926785
--- /dev/null
+++ b/examples/rbd-replay/create-image
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+pool=rbd
+image=my-image
+size=10G
+export LD_LIBRARY_PATH=../../build/lib
+#qemu-img create -f raw rbd:$pool/$image:conf=../../src/ceph.conf $size
+qemu-img convert linux-0.2.img -O raw rbd:$pool/$image:conf=../../src/ceph.conf
diff --git a/examples/rbd-replay/replay b/examples/rbd-replay/replay
new file mode 100755
index 000000000..9b138c678
--- /dev/null
+++ b/examples/rbd-replay/replay
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+lttng create
+lttng enable-event -u 'librbd:*'
+lttng add-context -u -t pthread_id
+lttng start
+../../src/rbd-replay --conf=../../src/ceph.conf replay.bin "$@" | tee replay.log
+lttng stop
+lttng view > replay-trace.log
diff --git a/examples/rbd-replay/run-rbd-replay-prep b/examples/rbd-replay/run-rbd-replay-prep
new file mode 100755
index 000000000..d7e07e9be
--- /dev/null
+++ b/examples/rbd-replay/run-rbd-replay-prep
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+
+../../src/rbd-replay-prep traces/ust/uid/10002/64-bit replay.bin
diff --git a/examples/rbd-replay/trace b/examples/rbd-replay/trace
new file mode 100755
index 000000000..8739d46e1
--- /dev/null
+++ b/examples/rbd-replay/trace
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+
+mkdir -p traces
+lttng create -o traces librbd
+lttng enable-event -u 'librbd:*'
+lttng add-context -u -t pthread_id
+lttng start
+[ -z "$BUILD_DIR" ] && BUILD_DIR=build
+LD_LIBRARY_PATH=../../${BUILD_DIR}/lib qemu-system-i386 -m 1024 rbd:rbd/my-image:conf=../../src/ceph.conf
+lttng stop
+lttng view > trace.log
diff --git a/examples/rgw-cache/nginx-default.conf b/examples/rgw-cache/nginx-default.conf
new file mode 100644
index 000000000..ddde70539
--- /dev/null
+++ b/examples/rgw-cache/nginx-default.conf
@@ -0,0 +1,136 @@
+#config cache size and path to the cache directory, you should make sure that the user that is running nginx have permissions to access the cache directory
+#max_size means that Nginx will not cache more than 20G, It should be tuned to a larger number if the /data/cache is bigger
+proxy_cache_path /data/cache levels=2:2:2 keys_zone=mycache:999m max_size=20G inactive=1d use_temp_path=off;
+upstream rgws {
+ # List of all rgws (ips or resolvable names)
+ server rgw1:8000 max_fails=2 fail_timeout=5s;
+ server rgw2:8000 max_fails=2 fail_timeout=5s;
+ server rgw3:8000 max_fails=2 fail_timeout=5s;
+}
+server {
+ listen 80;
+ server_name cacher;
+ location /authentication {
+ internal;
+ client_max_body_size 0;
+ proxy_pass http://rgws$request_uri;
+ proxy_pass_request_body off;
+ proxy_set_header Host $host;
+ # setting x-rgw-auth allow the RGW the ability to only authorize the request without fetching the obj data
+ proxy_set_header x-rgw-auth "yes";
+ proxy_set_header Authorization $http_authorization;
+ proxy_http_version 1.1;
+ proxy_method $request_method;
+ # Do not convert HEAD requests into GET requests
+ proxy_cache_convert_head off;
+ error_page 404 = @outage;
+ proxy_intercept_errors on;
+ if ($request_uri = "/") {
+ return 200;
+ }
+ # URI included with question mark is not being cached
+ if ($request_uri ~* (\?)) {
+ return 200;
+ }
+ if ($request_method = "PUT") {
+ return 200;
+ }
+ if ($request_method = "POST") {
+ return 200;
+ }
+ if ($request_method = "HEAD") {
+ return 200;
+ }
+ if ($request_method = "COPY") {
+ return 200;
+ }
+ if ($request_method = "DELETE") {
+ return 200;
+ }
+ if ($http_if_match) {
+ return 200;
+ }
+ if ($http_authorization !~* "aws4_request") {
+ return 200;
+ }
+ }
+ location @outage{
+ return 403;
+ }
+ location / {
+ auth_request /authentication;
+ proxy_pass http://rgws;
+ set $authvar '';
+ # if $do_not_cache is not empty the request would not be cached, this is relevant for list op for example
+ set $do_not_cache '';
+ # the IP or name of the RGWs
+ rewrite_by_lua_file /etc/nginx/nginx-lua-file.lua;
+ #proxy_set_header Authorization $http_authorization;
+ # my cache configured at the top of the file
+ proxy_cache mycache;
+ proxy_cache_lock_timeout 0s;
+ proxy_cache_lock_age 1000s;
+ proxy_http_version 1.1;
+ set $date $aws_auth_date;
+ # Getting 403 if this header not set
+ proxy_set_header Host $host;
+ # Cache all 200 OK's for 1 day
+ proxy_cache_valid 200 206 1d;
+ # Use stale cache file in all errors from upstream if we can
+ proxy_cache_use_stale updating;
+ proxy_cache_background_update on;
+ # Try to check if etag have changed, if yes, do not re-fetch from rgw the object
+ proxy_cache_revalidate on;
+ # Lock the cache so that only one request can populate it at a time
+ proxy_cache_lock on;
+ # prevent convertion of head requests to get requests
+ proxy_cache_convert_head off;
+ # Listing all buckets should not be cached
+ if ($request_uri = "/") {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ # URI including question mark are not supported to prevent bucket listing cache
+ if ($request_uri ~* (\?)) {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ # Only aws4 requests are being cached - As the aws auth module supporting only aws v2
+ if ($http_authorization !~* "aws4_request") {
+ set $date $http_x_amz_date;
+ }
+ if ($request_method = "PUT") {
+ set $date $http_x_amz_date;
+ }
+ if ($request_method = "POST") {
+ set $date $http_x_amz_date;
+ }
+ if ($request_method = "HEAD") {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ if ($request_method = "COPY") {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ if ($http_if_match) {
+ #set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ set $myrange $http_range;
+ }
+ if ($request_method = "DELETE") {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ proxy_set_header if_match $http_if_match;
+ proxy_set_header Range $myrange;
+ # Use the original x-amz-date if the aws auth module didn't create one
+ proxy_set_header x-amz-date $date;
+ proxy_set_header X-Amz-Cache $authvar;
+ proxy_no_cache $do_not_cache;
+ proxy_set_header Authorization $awsauthfour;
+ # This is on which content the nginx to use for hashing the cache keys
+ proxy_cache_key "$request_uri$request_method$request_body$myrange";
+ client_max_body_size 0;
+ }
+}
diff --git a/examples/rgw-cache/nginx-lua-file.lua b/examples/rgw-cache/nginx-lua-file.lua
new file mode 100644
index 000000000..efaf42230
--- /dev/null
+++ b/examples/rgw-cache/nginx-lua-file.lua
@@ -0,0 +1,26 @@
+local check = ngx.req.get_headers()["AUTHORIZATION"]
+local uri = ngx.var.request_uri
+local ngx_re = require "ngx.re"
+local hdrs = ngx.req.get_headers()
+--Take all signedheaders names, this for creating the X-Amz-Cache which is necessary to override range header to be able to readahead an object
+local res, err = ngx_re.split(check,"SignedHeaders=")
+local res2, err2 = ngx_re.split(res[2],",")
+local res3, err3 = ngx_re.split(res2[1],";")
+local t = {}
+local concathdrs = string.char(0x00)
+for i = 1, #res3, 1 do
+ if hdrs[res3[i]] ~= nil then
+--0xB1 is the separator between header name and value
+ t[i] = res3[i] .. string.char(0xB1) .. hdrs[res3[i]]
+--0xB2 is the separator between headers
+ concathdrs = concathdrs .. string.char(0xB2) .. t[i]
+ end
+end
+-- check if the authorization header is not empty
+if check ~= nil then
+ local xamzcache = concathdrs:sub(2)
+ xamzcache = xamzcache .. string.char(0xB2) .. "Authorization" .. string.char(0xB1) .. check
+ if xamzcache:find("aws4_request") ~= nil and uri ~= "/" and uri:find("?") == nil and hdrs["if-match"] == nil then
+ ngx.var.authvar = xamzcache
+ end
+end
diff --git a/examples/rgw-cache/nginx-noprefetch.conf b/examples/rgw-cache/nginx-noprefetch.conf
new file mode 100644
index 000000000..03e0ebc4a
--- /dev/null
+++ b/examples/rgw-cache/nginx-noprefetch.conf
@@ -0,0 +1,101 @@
+#config cache size and path to the cache directory, you should make sure that the user that is running nginx have permissions to access the cache directory
+#max_size means that Nginx will not cache more than 20G, It should be tuned to a larger number if the /data/cache is bigger
+proxy_cache_path /data/cache levels=2:2:2 keys_zone=mycache:999m max_size=20G inactive=1d use_temp_path=off;
+upstream rgws {
+ # List of all rgws (ips or resolvable names)
+ server rgw1:8000 max_fails=2 fail_timeout=5s;
+ server rgw2:8000 max_fails=2 fail_timeout=5s;
+ server rgw3:8000 max_fails=2 fail_timeout=5s;
+}
+server {
+ listen 80;
+ server_name cacher;
+ location /authentication {
+ internal;
+ client_max_body_size 0;
+ proxy_pass http://rgws$request_uri;
+ proxy_pass_request_body off;
+ proxy_set_header Host $host;
+ # setting x-rgw-auth allow the RGW the ability to only authorize the request without fetching the obj data
+ proxy_set_header x-rgw-auth "yes";
+ proxy_set_header Authorization $http_authorization;
+ proxy_http_version 1.1;
+ proxy_method $request_method;
+ # Do not convert HEAD requests into GET requests
+ proxy_cache_convert_head off;
+ error_page 404 = @outage;
+ proxy_intercept_errors on;
+ if ($request_uri = "/") {
+ return 200;
+ }
+ # URI included with question mark is not being cached
+ if ($request_uri ~* (\?)) {
+ return 200;
+ }
+ if ($request_method = "PUT") {
+ return 200;
+ }
+ if ($request_method = "POST") {
+ return 200;
+ }
+ if ($request_method = "HEAD") {
+ return 200;
+ }
+ if ($request_method = "COPY") {
+ return 200;
+ }
+ if ($request_method = "DELETE") {
+ return 200;
+ }
+ if ($http_if_match) {
+ return 200;
+ }
+ if ($http_authorization !~* "aws4_request") {
+ return 200;
+ }
+ }
+ location @outage{
+ return 403;
+ }
+ location / {
+ auth_request /authentication;
+ proxy_pass http://rgws;
+ # if $do_not_cache is not empty the request would not be cached, this is relevant for list op for example
+ set $do_not_cache '';
+ # the IP or name of the RGWs
+ #proxy_set_header Authorization $http_authorization;
+ # my cache configured at the top of the file
+ proxy_cache mycache;
+ proxy_cache_lock_timeout 0s;
+ proxy_cache_lock_age 1000s;
+ proxy_http_version 1.1;
+ # Getting 403 if this header not set
+ proxy_set_header Host $host;
+ # Cache all 200 OK's for 1 day
+ proxy_cache_valid 200 206 1d;
+ # Use stale cache file in all errors from upstream if we can
+ proxy_cache_use_stale updating;
+ proxy_cache_background_update on;
+ # Try to check if etag have changed, if yes, do not re-fetch from rgw the object
+ proxy_cache_revalidate on;
+ # Lock the cache so that only one request can populate it at a time
+ proxy_cache_lock on;
+ # prevent convertion of head requests to get requests
+ proxy_cache_convert_head off;
+ # Listing all buckets should not be cached
+ if ($request_uri = "/") {
+ set $do_not_cache "no";
+ }
+ # URI including question mark are not supported to prevent bucket listing cache
+ if ($request_uri ~* (\?)) {
+ set $do_not_cache "no";
+ }
+ # Use the original x-amz-date if the aws auth module didn't create one
+ proxy_no_cache $do_not_cache;
+ proxy_set_header Authorization $http_authorization;
+ proxy_set_header Range $http_range;
+ # This is on which content the nginx to use for hashing the cache keys
+ proxy_cache_key "$request_uri$request_method$request_body$http_range";
+ client_max_body_size 0;
+ }
+}
diff --git a/examples/rgw-cache/nginx-slicing.conf b/examples/rgw-cache/nginx-slicing.conf
new file mode 100644
index 000000000..d3c8f623b
--- /dev/null
+++ b/examples/rgw-cache/nginx-slicing.conf
@@ -0,0 +1,137 @@
+#config cache size and path to the cache directory, you should make sure that the user that is running nginx have permissions to access the cache directory
+#max_size means that Nginx will not cache more than 20G, It should be tuned to a larger number if the /data/cache is bigger
+proxy_cache_path /data/cache levels=2:2:2 keys_zone=mycache:999m max_size=20G inactive=1d use_temp_path=off;
+upstream rgws {
+ # List of all rgws (ips or resolvable names)
+ server rgw1:8000 max_fails=2 fail_timeout=5s;
+ server rgw2:8000 max_fails=2 fail_timeout=5s;
+ server rgw3:8000 max_fails=2 fail_timeout=5s;
+}
+server {
+ listen 80;
+ server_name cacher;
+ location /authentication {
+ internal;
+ client_max_body_size 0;
+ proxy_pass http://rgws$request_uri;
+ proxy_pass_request_body off;
+ proxy_set_header Host $host;
+ # setting x-rgw-auth allow the RGW the ability to only authorize the request without fetching the obj data
+ proxy_set_header x-rgw-auth "yes";
+ proxy_set_header Authorization $http_authorization;
+ proxy_http_version 1.1;
+ proxy_method $request_method;
+ # Do not convert HEAD requests into GET requests
+ proxy_cache_convert_head off;
+ error_page 404 = @outage;
+ proxy_intercept_errors on;
+ if ($request_uri = "/") {
+ return 200;
+ }
+ # URI included with question mark is not being cached
+ if ($request_uri ~* (\?)) {
+ return 200;
+ }
+ if ($request_method = "PUT") {
+ return 200;
+ }
+ if ($request_method = "POST") {
+ return 200;
+ }
+ if ($request_method = "HEAD") {
+ return 200;
+ }
+ if ($request_method = "COPY") {
+ return 200;
+ }
+ if ($request_method = "DELETE") {
+ return 200;
+ }
+ if ($http_if_match) {
+ return 200;
+ }
+ if ($http_authorization !~* "aws4_request") {
+ return 200;
+ }
+ }
+ location @outage{
+ return 403;
+ }
+ location / {
+ slice 1m;
+ auth_request /authentication;
+ proxy_set_header Range $slice_range;
+ proxy_pass http://rgws;
+ set $authvar '';
+ # if $do_not_cache is not empty the request would not be cached, this is relevant for list op for example
+ set $do_not_cache '';
+ # the IP or name of the RGWs
+ rewrite_by_lua_file /etc/nginx/nginx-lua-file.lua;
+ #proxy_set_header Authorization $http_authorization;
+ # my cache configured at the top of the file
+ proxy_cache mycache;
+ proxy_cache_lock_timeout 0s;
+ proxy_cache_lock_age 1000s;
+ proxy_http_version 1.1;
+ set $date $aws_auth_date;
+ # Getting 403 if this header not set
+ proxy_set_header Host $host;
+ # Cache all 200 OK's for 1 day
+ proxy_cache_valid 200 206 1d;
+ # Use stale cache file in all errors from upstream if we can
+ proxy_cache_use_stale updating;
+ proxy_cache_background_update on;
+ # Try to check if etag have changed, if yes, do not re-fetch from rgw the object
+ proxy_cache_revalidate on;
+ # Lock the cache so that only one request can populate it at a time
+ proxy_cache_lock on;
+ # prevent convertion of head requests to get requests
+ proxy_cache_convert_head off;
+ # Listing all buckets should not be cached
+ if ($request_uri = "/") {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ # URI including question mark are not supported to prevent bucket listing cache
+ if ($request_uri ~* (\?)) {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ # Only aws4 requests are being cached - As the aws auth module supporting only aws v2
+ if ($http_authorization !~* "aws4_request") {
+ set $date $http_x_amz_date;
+ }
+ if ($request_method = "PUT") {
+ set $date $http_x_amz_date;
+ }
+ if ($request_method = "POST") {
+ set $date $http_x_amz_date;
+ }
+ if ($request_method = "HEAD") {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ if ($request_method = "COPY") {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ if ($http_if_match) {
+ #set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ set $myrange $slice_range;
+ }
+ if ($request_method = "DELETE") {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ proxy_set_header if_match $http_if_match;
+ # Use the original x-amz-date if the aws auth module didn't create one
+ proxy_set_header x-amz-date $date;
+ proxy_set_header X-Amz-Cache $authvar;
+ proxy_no_cache $do_not_cache;
+ proxy_set_header Authorization $awsauthfour;
+ # This is on which content the nginx to use for hashing the cache keys
+ proxy_cache_key "$request_uri$request_method$request_body$slice_range";
+ client_max_body_size 0;
+ }
+}
diff --git a/examples/rgw-cache/nginx.conf b/examples/rgw-cache/nginx.conf
new file mode 100644
index 000000000..a478db1dc
--- /dev/null
+++ b/examples/rgw-cache/nginx.conf
@@ -0,0 +1,57 @@
+
+user nginx;
+#Process per core
+worker_processes auto;
+pid /var/run/nginx.pid;
+events {
+#Number of connections per worker
+ worker_connections 1024;
+}
+
+
+http {
+ types_hash_max_size 4096;
+ lua_package_path '/usr/local/openresty/lualib/?.lua;;';
+ aws_auth $aws_token {
+ # access key and secret key of the cache
+ # Please substitute with the access key and secret key of the amz-cache cap user
+ access_key cache;
+ secret_key cache;
+ service s3;
+ region us-east-1;
+ }
+ # This map is used to choose the original authorization header if the aws_auth module refuse to create one
+ map $aws_token $awsauth {
+ default $http_authorization;
+ ~. $aws_token; # Regular expression to match any value
+ }
+ map $request_uri $awsauthtwo {
+ "/" $http_authorization;
+ "~\?" $http_authorization;
+ default $awsauth;
+ }
+ map $request_method $awsauththree {
+ default $awsauthtwo;
+ "PUT" $http_authorization;
+ "HEAD" $http_authorization;
+ "POST" $http_authorization;
+ "DELETE" $http_authorization;
+ "COPY" $http_authorization;
+ }
+ map $http_if_match $awsauthfour {
+ ~. $http_authorization; # Regular expression to match any value
+ default $awsauththree;
+ }
+ include /etc/nginx/mime.types;
+ default_type application/octet-stream;
+ log_format main '$remote_addr - $remote_user [$time_local] "$request" '
+ '$status $body_bytes_sent "$http_referer" '
+ '"$http_user_agent" "$http_x_forwarded_for"';
+ error_log /var/log/nginx/error.log;
+ access_log /var/log/nginx/access.log main;
+
+ sendfile on;
+ tcp_nodelay on;
+ keepalive_timeout 65;
+ include /etc/nginx/conf.d/*.conf;
+}