summaryrefslogtreecommitdiffstats
path: root/examples/rgw
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /examples/rgw
parentInitial commit. (diff)
downloadceph-b26c4052f3542036551aa9dec9caa4226e456195.tar.xz
ceph-b26c4052f3542036551aa9dec9caa4226e456195.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--examples/rgw/boto3/README.md103
-rwxr-xr-xexamples/rgw/boto3/append_object.py42
-rwxr-xr-xexamples/rgw/boto3/delete_notification.py36
-rwxr-xr-xexamples/rgw/boto3/get_notification.py28
-rwxr-xr-xexamples/rgw/boto3/get_usage_stats.py17
-rwxr-xr-xexamples/rgw/boto3/list_unordered.py25
-rwxr-xr-xexamples/rgw/boto3/notification_filters.py48
-rw-r--r--examples/rgw/boto3/service-2.sdk-extras.json225
-rw-r--r--examples/rgw/boto3/topic_attributes.py23
-rw-r--r--examples/rgw/boto3/topic_with_endpoint.py30
-rw-r--r--examples/rgw/golang/object-upload/README.md16
-rw-r--r--examples/rgw/golang/object-upload/fortuna.txt1
-rw-r--r--examples/rgw/golang/object-upload/go.mod5
-rw-r--r--examples/rgw/golang/object-upload/go.sum24
-rw-r--r--examples/rgw/golang/object-upload/object-upload.go84
-rw-r--r--examples/rgw/golang/put-bucket-notification-creation/README.md24
-rw-r--r--examples/rgw/golang/put-bucket-notification-creation/go.mod5
-rw-r--r--examples/rgw/golang/put-bucket-notification-creation/go.sum24
-rw-r--r--examples/rgw/golang/put-bucket-notification-creation/put-bucket-notification-creation.go96
-rw-r--r--examples/rgw/golang/topic-creation/README.md11
-rw-r--r--examples/rgw/golang/topic-creation/go.mod5
-rw-r--r--examples/rgw/golang/topic-creation/go.sum24
-rw-r--r--examples/rgw/golang/topic-creation/topic-creation.go81
-rw-r--r--examples/rgw/java/ceph-s3-upload/README.md16
-rw-r--r--examples/rgw/java/ceph-s3-upload/ceph-s3-upload.txt1
-rw-r--r--examples/rgw/java/ceph-s3-upload/pom.xml67
-rw-r--r--examples/rgw/java/ceph-s3-upload/src/main/java/org/example/cephs3upload/App.java51
-rw-r--r--examples/rgw/java/ceph-s3-upload/src/test/java/org/example/cephs3upload/AppTest.java38
-rw-r--r--examples/rgw/lua/config/prometheus.yml19
-rw-r--r--examples/rgw/lua/elasticsearch_adapter.lua114
-rw-r--r--examples/rgw/lua/elasticsearch_adapter.md59
-rw-r--r--examples/rgw/lua/img/prometheus.pngbin0 -> 414931 bytes
-rw-r--r--examples/rgw/lua/nats_adapter.lua93
-rw-r--r--examples/rgw/lua/nats_adapter.md101
-rw-r--r--examples/rgw/lua/prometheus_adapter.lua23
-rw-r--r--examples/rgw/lua/prometheus_adapter.md59
-rw-r--r--examples/rgw/lua/storage_class.lua19
-rw-r--r--examples/rgw/lua/storage_class.md49
-rw-r--r--examples/rgw/rgw-cache/nginx-default.conf136
-rw-r--r--examples/rgw/rgw-cache/nginx-lua-file.lua26
-rw-r--r--examples/rgw/rgw-cache/nginx-noprefetch.conf101
-rw-r--r--examples/rgw/rgw-cache/nginx-slicing.conf137
-rw-r--r--examples/rgw/rgw-cache/nginx.conf57
-rw-r--r--examples/rgw/rgw_admin_curl.sh112
44 files changed, 2255 insertions, 0 deletions
diff --git a/examples/rgw/boto3/README.md b/examples/rgw/boto3/README.md
new file mode 100644
index 000000000..dde9edf4f
--- /dev/null
+++ b/examples/rgw/boto3/README.md
@@ -0,0 +1,103 @@
+# Introduction
+This directory contains examples on how to use AWS CLI/boto3 to exercise the RadosGW extensions to the S3 API.
+This is an extension to the [AWS SDK](https://github.com/boto/botocore/blob/develop/botocore/data/s3/2006-03-01/service-2.json).
+
+# Users
+For the standard client to support these extensions, the: ``service-2.sdk-extras.json`` file should be placed under: ``~/.aws/models/s3/2006-03-01/`` directory.
+For more information see [here](https://github.com/boto/botocore/blob/develop/botocore/loaders.py#L33).
+## Python
+The [boto3 client](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html) could be used with the extensions, code samples exists in this directory.
+## AWS CLI
+The standard [AWS CLI](https://docs.aws.amazon.com/cli/latest/) may also be used with these extensions. For example:
+- Unordered listing:
+```
+aws --endpoint-url http://localhost:8000 s3api list-objects --bucket=mybucket --allow-unordered
+```
+
+- Unordered listing (version 2):
+```
+aws --endpoint-url http://localhost:8000 s3api list-objects-v2 --bucket=mybucket --allow-unordered
+```
+
+- Topic creation with endpoint:
+```
+aws --endpoint-url http://localhost:8000 sns create-topic --name=mytopic --attributes='{"push-endpoint": "amqp://localhost:5672", "amqp-exchange": "ex1", "amqp-ack-level": "broker"}'
+```
+Expected output:
+```
+{
+ "TopicArn": "arn:aws:sns:default::mytopic"
+}
+```
+
+- Get topic attributes:
+```
+aws --endpoint-url http://localhost:8000 sns get-topic-attributes --topic-arn="arn:aws:sns:default::mytopic"
+```
+Expected output:
+```
+{
+ "Attributes": {
+ "User": "",
+ "Name": "mytopic",
+ "EndPoint": "{\"EndpointAddress\":\"amqp://localhost:5672\",\"EndpointArgs\":\"Attributes.entry.1.key=push-endpoint&Attributes.entry.1.value=amqp://localhost:5672&Attributes.entry.2.key=amqp-exchange&Attributes.entry.2.value=ex1&Attributes.entry.3.key=amqp-ack-level&Attributes.entry.3.value=broker&Version=2010-03-31&amqp-ack-level=broker&amqp-exchange=ex1&push-endpoint=amqp://localhost:5672\",\"EndpointTopic\":\"mytopic\",\"HasStoredSecret\":\"false\",\"Persistent\":\"false\"}",
+ "TopicArn": "arn:aws:sns:default::mytopic",
+ "OpaqueData": ""
+ }
+}
+```
+
+- Bucket notifications with filtering extensions (bucket must exist before calling this command):
+```
+aws --region=default --endpoint-url http://localhost:8000 s3api put-bucket-notification-configuration --bucket=mybucket --notification-configuration='{"TopicConfigurations": [{"Id": "notif1", "TopicArn": "arn:aws:sns:default::mytopic", "Events": ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"], "Filter": {"Metadata": {"FilterRules": [{"Name": "x-amz-meta-foo", "Value": "bar"}, {"Name": "x-amz-meta-hello", "Value": "world"}]}, "Key": {"FilterRules": [{"Name": "regex", "Value": "([a-z]+)"}]}}}]}'
+```
+
+- Get configuration of a specific notification of a bucket:
+```
+aws --endpoint-url http://localhost:8000 s3api get-bucket-notification-configuration --bucket=mybucket --notification=notif1
+```
+Expected output:
+```
+{
+ "TopicConfigurations": [
+ {
+ "Id": "notif1",
+ "TopicArn": "arn:aws:sns:default::mytopic",
+ "Events": [
+ "s3:ObjectCreated:*",
+ "s3:ObjectRemoved:*"
+ ],
+ "Filter": {
+ "Key": {
+ "FilterRules": [
+ {
+ "Name": "regex",
+ "Value": "([a-z]+)"
+ }
+ ]
+ },
+ "Metadata": {
+ "FilterRules": [
+ {
+ "Name": "x-amz-meta-foo",
+ "Value": "bar"
+ },
+ {
+ "Name": "x-amz-meta-hello",
+ "Value": "world"
+ }
+ ]
+ }
+ }
+ }
+ ]
+}
+```
+
+# Developers
+Anyone developing an extension to the S3 API supported by AWS, please modify ``service-2.sdk-extras.json`` (all extensions should go into the same file), so that boto3 could be used to test the new API.
+In addition, python files with code samples should be added to this directory demonstrating use of the new API.
+When testing you changes please:
+- make sure that the modified file is in the boto3 path as explained above
+- make sure that the standard S3 tests suit is not broken, even with the extensions files in the path
+
diff --git a/examples/rgw/boto3/append_object.py b/examples/rgw/boto3/append_object.py
new file mode 100755
index 000000000..0e13252ec
--- /dev/null
+++ b/examples/rgw/boto3/append_object.py
@@ -0,0 +1,42 @@
+#!/usr/bin/python
+from __future__ import print_function
+
+import boto3
+import sys
+import json
+
+def js_print(arg):
+ print(json.dumps(arg, indent=2))
+
+if len(sys.argv) != 3:
+ print('Usage: ' + sys.argv[0] + ' <bucket> <key>')
+ sys.exit(1)
+
+# bucket name as first argument
+bucketname = sys.argv[1]
+keyname = sys.argv[2]
+# endpoint and keys from vstart
+endpoint = 'http://127.0.0.1:8000'
+access_key='0555b35654ad1656d804'
+secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+
+client = boto3.client('s3',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+
+print('deleting object first')
+js_print(client.delete_object(Bucket=bucketname, Key=keyname))
+print('appending at position 0')
+resp = client.put_object(Bucket=bucketname, Key=keyname,
+ Append=True,
+ AppendPosition=0,
+ Body='8letters')
+
+js_print(resp)
+append_pos = resp['AppendPosition']
+print('appending at position %d' % append_pos)
+js_print(client.put_object(Bucket=bucketname, Key=keyname,
+ Append=True,
+ AppendPosition=append_pos,
+ Body='8letters'))
diff --git a/examples/rgw/boto3/delete_notification.py b/examples/rgw/boto3/delete_notification.py
new file mode 100755
index 000000000..e520c9f53
--- /dev/null
+++ b/examples/rgw/boto3/delete_notification.py
@@ -0,0 +1,36 @@
+#!/usr/bin/python
+
+import boto3
+import sys
+
+if len(sys.argv) == 3:
+ # bucket name as first argument
+ bucketname = sys.argv[1]
+ # notification name as second argument
+ notification_name = sys.argv[2]
+elif len(sys.argv) == 2:
+ # bucket name as first argument
+ bucketname = sys.argv[1]
+ notification_name = ""
+else:
+ print('Usage: ' + sys.argv[0] + ' <bucket> [notification]')
+ sys.exit(1)
+
+# endpoint and keys from vstart
+endpoint = 'http://127.0.0.1:8000'
+access_key='0555b35654ad1656d804'
+secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+
+client = boto3.client('s3',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+
+# deleting a specific notification configuration from a bucket (when NotificationId is provided) or
+# deleting all notification configurations on a bucket (without deleting the bucket itself) are extension to AWS S3 API
+
+if notification_name == "":
+ print(client.delete_bucket_notification_configuration(Bucket=bucketname))
+else:
+ print(client.delete_bucket_notification_configuration(Bucket=bucketname,
+ Notification=notification_name))
diff --git a/examples/rgw/boto3/get_notification.py b/examples/rgw/boto3/get_notification.py
new file mode 100755
index 000000000..490c018d4
--- /dev/null
+++ b/examples/rgw/boto3/get_notification.py
@@ -0,0 +1,28 @@
+#!/usr/bin/python
+
+import boto3
+import sys
+
+if len(sys.argv) != 3:
+ print('Usage: ' + sys.argv[0] + ' <bucket> <notification>')
+ sys.exit(1)
+
+# bucket name as first argument
+bucketname = sys.argv[1]
+ # notification name as second argument
+notification_name = sys.argv[2]
+
+# endpoint and keys from vstart
+endpoint = 'http://127.0.0.1:8000'
+access_key='0555b35654ad1656d804'
+secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+
+client = boto3.client('s3',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+
+# getting a specific notification configuration is an extension to AWS S3 API
+
+print(client.get_bucket_notification_configuration(Bucket=bucketname,
+ Notification=notification_name))
diff --git a/examples/rgw/boto3/get_usage_stats.py b/examples/rgw/boto3/get_usage_stats.py
new file mode 100755
index 000000000..0b7880d4f
--- /dev/null
+++ b/examples/rgw/boto3/get_usage_stats.py
@@ -0,0 +1,17 @@
+#!/usr/bin/python
+from __future__ import print_function
+
+import boto3
+import json
+
+# endpoint and keys from vstart
+endpoint = 'http://127.0.0.1:8000'
+access_key='0555b35654ad1656d804'
+secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+
+client = boto3.client('s3',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+
+print(json.dumps(client.get_usage_stats(), indent=2))
diff --git a/examples/rgw/boto3/list_unordered.py b/examples/rgw/boto3/list_unordered.py
new file mode 100755
index 000000000..979051319
--- /dev/null
+++ b/examples/rgw/boto3/list_unordered.py
@@ -0,0 +1,25 @@
+#!/usr/bin/python
+
+import boto3
+import sys
+
+if len(sys.argv) != 2:
+ print('Usage: ' + sys.argv[0] + ' <bucket>')
+ sys.exit(1)
+
+# bucket name as first argument
+bucketname = sys.argv[1]
+
+# endpoint and keys from vstart
+endpoint = 'http://127.0.0.1:8000'
+access_key='0555b35654ad1656d804'
+secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+
+client = boto3.client('s3',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+
+# getting an unordered list of objects is an extension to AWS S3 API
+
+print(client.list_objects(Bucket=bucketname, AllowUnordered=True))
diff --git a/examples/rgw/boto3/notification_filters.py b/examples/rgw/boto3/notification_filters.py
new file mode 100755
index 000000000..2687c8b3a
--- /dev/null
+++ b/examples/rgw/boto3/notification_filters.py
@@ -0,0 +1,48 @@
+#!/usr/bin/python
+
+import boto3
+import sys
+
+if len(sys.argv) != 4:
+ print('Usage: ' + sys.argv[0] + ' <bucket> <topic ARN> <notification Id>')
+ sys.exit(1)
+
+# bucket name as first argument
+bucketname = sys.argv[1]
+# topic ARN as second argument
+topic_arn = sys.argv[2]
+# notification id as third argument
+notification_id = sys.argv[3]
+
+# endpoint and keys from vstart
+endpoint = 'http://127.0.0.1:8000'
+access_key='0555b35654ad1656d804'
+secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+
+client = boto3.client('s3',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+
+# regex filter on the object name and metadata based filtering are extension to AWS S3 API
+# bucket and topic should be created beforehand
+
+topic_conf_list = [{'Id': notification_id,
+ 'TopicArn': topic_arn,
+ 'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*'],
+ 'Filter': {
+ 'Metadata': {
+ 'FilterRules': [{'Name': 'x-amz-meta-foo', 'Value': 'bar'},
+ {'Name': 'x-amz-meta-hello', 'Value': 'world'}]
+ },
+ 'Tags': {
+ 'FilterRules': [{'Name': 'foo', 'Value': 'bar'},
+ {'Name': 'hello', 'Value': 'world'}]
+ },
+ 'Key': {
+ 'FilterRules': [{'Name': 'regex', 'Value': '([a-z]+)'}]
+ }
+ }}]
+
+print(client.put_bucket_notification_configuration(Bucket=bucketname,
+ NotificationConfiguration={'TopicConfigurations': topic_conf_list}))
diff --git a/examples/rgw/boto3/service-2.sdk-extras.json b/examples/rgw/boto3/service-2.sdk-extras.json
new file mode 100644
index 000000000..d660b6a02
--- /dev/null
+++ b/examples/rgw/boto3/service-2.sdk-extras.json
@@ -0,0 +1,225 @@
+{
+"version": 1.0,
+"merge": {
+ "operations":{
+ "DeleteBucketNotificationConfiguration":{
+ "name":"DeleteBucketNotificationConfiguration",
+ "http":{
+ "method":"DELETE",
+ "requestUri":"/{Bucket}?notification",
+ "responseCode":204
+ },
+ "input":{"shape":"DeleteBucketNotificationConfigurationRequest"},
+ "documentationUrl":"https://docs.ceph.com/docs/master/radosgw/s3/bucketops/#delete-notification",
+ "documentation":"<p>Deletes the notification configuration from the bucket.</p>"
+ },
+ "GetUsageStats":{
+ "name":"GetUsageStats",
+ "http":{
+ "method":"GET",
+ "requestUri":"/?usage",
+ "responseCode":200
+ },
+ "output": {"shape": "GetUsageStatsOutput"},
+ "documentationUrl":"https://docs.ceph.com/docs/master/radosgw/s3/serviceops#get-usage-stats",
+ "documentation":"<p>Get usage stats for the user</p>"
+ }
+ },
+ "shapes": {
+ "ListObjectsRequest": {
+ "members": {
+ "AllowUnordered": {
+ "shape":"AllowUnordered",
+ "documentation":"<p>Allow the listing results to be returned in unsorted order. This may be faster when listing very large buckets.</p>",
+ "location":"querystring",
+ "locationName":"allow-unordered"
+ }
+ }
+ },
+ "ListObjectsV2Request": {
+ "members": {
+ "AllowUnordered": {
+ "shape":"AllowUnordered",
+ "documentation":"<p>Allow the listing results to be returned in unsorted order. This may be faster when listing very large buckets.</p>",
+ "location":"querystring",
+ "locationName":"allow-unordered"
+ }
+ }
+ },
+ "AllowUnordered":{"type":"boolean"},
+ "PutObjectRequest": {
+ "members": {
+ "AppendPosition": {
+ "shape":"AppendPosition",
+ "documentation": "<p>Position to allow appending</p>",
+ "location": "querystring",
+ "locationName": "position"
+ },
+ "Append": {
+ "shape":"Append",
+ "documentation":"<p>Append Object</p>",
+ "location": "querystring",
+ "locationName": "append"
+ }
+ }
+ },
+ "Append": {"type":"boolean"},
+ "AppendPosition":{"type":"integer"},
+ "PutObjectOutput": {
+ "members": {
+ "AppendPosition": {
+ "shape":"AppendPosition",
+ "documentation": "<p>Position to allow appending</p>",
+ "location": "header",
+ "locationName": "x-rgw-next-append-position",
+ "documentationUrl":"https://docs.ceph.com/docs/master/radosgw/s3/objectops/#append-object"
+ }
+ }
+ },
+ "GetBucketNotificationConfigurationRequest":{
+ "type":"structure",
+ "required":["Bucket"],
+ "members":{
+ "Bucket":{
+ "shape":"BucketName",
+ "documentation":"<p>Name of the bucket to get the notifications configuration for.</p>",
+ "location":"uri",
+ "locationName":"Bucket"
+ },
+ "Notification":{
+ "shape":"NotificationId",
+ "documentation":"<p>Id of the specific notification on the bucket for which the configuration should be retrieved.</p>",
+ "location":"querystring",
+ "locationName":"notification-id"
+ }
+ }
+ },
+ "DeleteBucketNotificationConfigurationRequest":{
+ "type":"structure",
+ "required":["Bucket"],
+ "members":{
+ "Bucket":{
+ "shape":"BucketName",
+ "documentation":"<p>Name of the bucket to delete the notifications configuration from.</p>",
+ "location":"uri",
+ "locationName":"Bucket"
+ },
+ "Notification":{
+ "shape":"NotificationId",
+ "documentation":"<p>Id of the specific notification on the bucket to be deleted.</p>",
+ "location":"querystring",
+ "locationName":"notification-id"
+ }
+ }
+ },
+ "FilterRule":{
+ "type":"structure",
+ "members":{
+ "Name":{
+ "shape":"FilterRuleName",
+ "documentation":"<p>The object key name prefix, suffix or regex identifying one or more objects to which the filtering rule applies. The maximum length is 1,024 characters. Overlapping prefixes and suffixes are supported.</p>"
+ },
+ "Value":{
+ "shape":"FilterRuleValue",
+ "documentation":"<p>The value that the filter searches for in object key names.</p>"
+ }
+ },
+ "documentation":"<p>Specifies the Amazon S3 object key name to filter on and whether to filter on the suffix, prefix or regex of the key name.</p>"
+ },
+ "FilterRuleName":{
+ "type":"string",
+ "enum":[
+ "prefix",
+ "suffix",
+ "regex"
+ ]
+ },
+ "NotificationConfigurationFilter":{
+ "type":"structure",
+ "members":{
+ "Key":{
+ "shape":"S3KeyFilter",
+ "documentation":"<p/>",
+ "locationName":"S3Key"
+ },
+ "Metadata":{
+ "shape":"S3MetadataFilter",
+ "documentation":"<p/>",
+ "locationName":"S3Metadata"
+ },
+ "Tags":{
+ "shape":"S3TagsFilter",
+ "documentation":"<p/>",
+ "locationName":"S3Tags"
+ }
+
+ },
+ "documentation":"<p>Specifies object key name filtering rules. For information about key name filtering, see <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html\">Configuring Event Notifications</a> in the <i>Amazon Simple Storage Service Developer Guide</i>.</p>"
+ },
+ "S3KeyFilter":{
+ "type":"structure",
+ "members":{
+ "FilterRules":{
+ "shape":"FilterRuleList",
+ "documentation":"<p/>",
+ "locationName":"FilterRule"
+ }
+ },
+ "documentation":"<p>A container for object key name prefix, suffix and regex filtering rules.</p>"
+ },
+ "S3MetadataFilter":{
+ "type":"structure",
+ "members":{
+ "FilterRules":{
+ "shape":"FilterRuleList",
+ "documentation":"<p/>",
+ "locationName":"FilterRule"
+ }
+ },
+ "documentation":"<p>A container for metadata filtering rules.</p>"
+ },
+ "S3TagsFilter":{
+ "type":"structure",
+ "members":{
+ "FilterRules":{
+ "shape":"FilterRuleList",
+ "documentation":"<p/>",
+ "locationName":"FilterRule"
+ }
+ },
+ "documentation":"<p>A container for object tags filtering rules.</p>"
+ },
+ "GetUsageStatsOutput": {
+ "type": "structure",
+ "members": {
+ "Summary": {
+ "shape":"UsageStatsSummary",
+ "documentation": "<p/>"
+ }
+ }
+ },
+ "UsageStatsSummary": {
+ "type": "structure",
+ "members": {
+ "QuotaMaxBytes":{"shape":"QuotaMaxBytes"},
+ "QuotaMaxBuckets":{"shape": "QuotaMaxBuckets"},
+ "QuotaMaxObjCount":{"shape":"QuotaMaxObjCount"},
+ "QuotaMaxBytesPerBucket":{"shape":"QuotaMaxBytesPerBucket"},
+ "QuotaMaxObjCountPerBucket":{"shape":"QuotaMaxObjCountPerBucket"},
+ "TotalBytes":{"shape":"TotalBytes"},
+ "TotalBytesRounded":{"shape":"TotalBytesRounded"},
+ "TotalEntries":{"shape":"TotalEntries"}
+ }
+ },
+ "QuotaMaxBytes":{"type":"integer"},
+ "QuotaMaxBuckets":{"type": "integer"},
+ "QuotaMaxObjCount":{"type":"integer"},
+ "QuotaMaxBytesPerBucket":{"type":"integer"},
+ "QuotaMaxObjCountPerBucket":{"type":"integer"},
+ "TotalBytesRounded":{"type":"integer"},
+ "TotalBytes":{"type":"integer"},
+ "TotalEntries":{"type":"integer"}
+ },
+ "documentation":"<p/>"
+}
+}
diff --git a/examples/rgw/boto3/topic_attributes.py b/examples/rgw/boto3/topic_attributes.py
new file mode 100644
index 000000000..3657459d8
--- /dev/null
+++ b/examples/rgw/boto3/topic_attributes.py
@@ -0,0 +1,23 @@
+import sys
+import boto3
+from pprint import pprint
+
+if len(sys.argv) == 2:
+ # topic arn as first argument
+ topic_arn = sys.argv[1]
+else:
+ print ('Usage: ' + sys.argv[0] + ' <topic arn>')
+ sys.exit(1)
+
+# endpoint and keys from vstart
+endpoint = 'http://127.0.0.1:8000'
+access_key='0555b35654ad1656d804'
+secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+
+# Add info to client to get the topi attirubutes of a given topi
+client = boto3.client('sns',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+# getting attributes of a specific topic is an extension to AWS sns
+pprint(client.get_topic_attributes(TopicArn=topic_arn))
diff --git a/examples/rgw/boto3/topic_with_endpoint.py b/examples/rgw/boto3/topic_with_endpoint.py
new file mode 100644
index 000000000..b21fe4651
--- /dev/null
+++ b/examples/rgw/boto3/topic_with_endpoint.py
@@ -0,0 +1,30 @@
+#!/usr/bin/python
+import boto3
+from botocore.client import Config
+import sys
+
+if len(sys.argv) == 2:
+ # topic name as first argument
+ topic_name = sys.argv[1]
+else:
+ print('Usage: ' + sys.argv[0] + ' <topic name> ')
+ sys.exit(1)
+
+# endpoint and keys from vstart
+endpoint = 'http://127.0.0.1:8000'
+access_key='0555b35654ad1656d804'
+secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+
+client = boto3.client('sns',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ config=Config(signature_version='s3'))
+
+# to see the list of available "regions" use:
+# radosgw-admin realm zonegroup list
+
+# this is standard AWS services call, using custom attributes to add AMQP endpoint information to the topic
+attributes = {"push-endpoint": "amqp://localhost:5672", "amqp-exchange": "ex1", "amqp-ack-level": "broker"}
+
+print(client.create_topic(Name=topic_name, Attributes=attributes))
diff --git a/examples/rgw/golang/object-upload/README.md b/examples/rgw/golang/object-upload/README.md
new file mode 100644
index 000000000..1bf1c24ae
--- /dev/null
+++ b/examples/rgw/golang/object-upload/README.md
@@ -0,0 +1,16 @@
+# Introduction
+This directory contains Golang code examples on how to upload an object to an S3 bucket running on a Ceph RGW cluster.
+
+# Prerequisite
+Linux machine running an RGW Ceph cluster. Preferrably started with the ``OSD=1 MON=1 MDS=0 MGR=0 RGW=1 ../src/vstart.sh --debug --new `` command.
+Go installed on the Linux machine.
+
+## Workflow Procedure
+1. Install AWS CLI version one on your Linux machine as explained [here](https://docs.aws.amazon.com/cli/v1/userguide/install-linux.html)
+2. Create a bucket on the Ceph cluster with the command
+```
+aws --endpoint-url http://localhost:8000 s3 mb s3://sample-bucket
+```
+3. Navigate through your file system to where the Golang example code exist on your terminal.
+4. Run ``` go mod tidy ``` to install the required Go packages on the system.
+5. Run the Golang program as ``` go run object-upload.go -b sample-bucket -f fortuna.txt ``` on the terminal window to test out object upload to Ceph RGW cluster.
diff --git a/examples/rgw/golang/object-upload/fortuna.txt b/examples/rgw/golang/object-upload/fortuna.txt
new file mode 100644
index 000000000..146240570
--- /dev/null
+++ b/examples/rgw/golang/object-upload/fortuna.txt
@@ -0,0 +1 @@
+This file is being used to test Go upload capabilities to Ceph RGW cluster.
diff --git a/examples/rgw/golang/object-upload/go.mod b/examples/rgw/golang/object-upload/go.mod
new file mode 100644
index 000000000..9cb158af1
--- /dev/null
+++ b/examples/rgw/golang/object-upload/go.mod
@@ -0,0 +1,5 @@
+module ceph/object-upload
+
+go 1.13
+
+require github.com/aws/aws-sdk-go v1.43.41
diff --git a/examples/rgw/golang/object-upload/go.sum b/examples/rgw/golang/object-upload/go.sum
new file mode 100644
index 000000000..8ab2f12b3
--- /dev/null
+++ b/examples/rgw/golang/object-upload/go.sum
@@ -0,0 +1,24 @@
+github.com/aws/aws-sdk-go v1.43.41 h1:HaazVplP8/t6SOfybQlNUmjAxLWDKdLdX8BSEHFlJdY=
+github.com/aws/aws-sdk-go v1.43.41/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/examples/rgw/golang/object-upload/object-upload.go b/examples/rgw/golang/object-upload/object-upload.go
new file mode 100644
index 000000000..82f09caa2
--- /dev/null
+++ b/examples/rgw/golang/object-upload/object-upload.go
@@ -0,0 +1,84 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3/s3manager"
+)
+
+func main() {
+ bucket := flag.String("b", "", "The name of the bucket")
+ filename := flag.String("f", "", "Complete file path to object to be uploaded")
+ flag.Parse()
+
+ if *bucket == "" {
+ fmt.Println("You must supply the name of the bucket")
+ fmt.Println("-b BUCKET")
+ return
+ }
+
+ if *filename == "" {
+ fmt.Println("You must supply the object to be uploaded")
+ fmt.Println("-f FILE/FILEPATH")
+ return
+ }
+
+ file, err := os.Open(*filename)
+ if err != nil {
+ exitErrorf("Unable to open file %q, %v", filename, err)
+ }
+
+ defer file.Close()
+
+ //Ceph RGW Cluster credentials
+ access_key := "0555b35654ad1656d804"
+ secret_key := "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
+ token_id := ""
+ url := "http://127.0.0.1:8000"
+
+ defaultResolver := endpoints.DefaultResolver()
+ s3CustResolverFn := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
+ if service == "s3" {
+ return endpoints.ResolvedEndpoint{
+ URL: url,
+ }, nil
+ }
+
+ return defaultResolver.EndpointFor(service, region, optFns...)
+ }
+
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ Config: aws.Config{
+ Region: aws.String("default"),
+ Credentials: credentials.NewStaticCredentials(access_key, secret_key, token_id),
+ S3ForcePathStyle: aws.Bool(true),
+ EndpointResolver: endpoints.ResolverFunc(s3CustResolverFn),
+ },
+ }))
+
+ uploader := s3manager.NewUploader(sess)
+
+ // Upload the file's body to S3 bucket as an object with the key being the
+ // same as the filename.
+ _, err = uploader.Upload(&s3manager.UploadInput{
+ Bucket: bucket,
+ Key: filename,
+ Body: file,
+ })
+ if err != nil {
+ exitErrorf("Unable to upload %q to %q, %v", *filename, *bucket, err)
+ }
+
+ fmt.Printf("Successfully uploaded %q to %q\n", *filename, *bucket)
+}
+
+func exitErrorf(msg string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, msg+"\n", args...)
+ os.Exit(1)
+}
diff --git a/examples/rgw/golang/put-bucket-notification-creation/README.md b/examples/rgw/golang/put-bucket-notification-creation/README.md
new file mode 100644
index 000000000..47555c0c0
--- /dev/null
+++ b/examples/rgw/golang/put-bucket-notification-creation/README.md
@@ -0,0 +1,24 @@
+# Introduction
+This directory contains Golang code examples on how to create a put bucket notification to a topic and S3 bucket running on a Ceph RGW cluster.
+
+# Prerequisite
+Linux machine running an RGW Ceph cluster. Preferrably started with the ``OSD=1 MON=1 MDS=0 MGR=0 RGW=1 ../src/vstart.sh --debug --new `` command.
+Go installed on the Linux machine.
+
+## Workflow Procedure
+1. Install AWS CLI version one on your Linux machine as explained [here](https://docs.aws.amazon.com/cli/v1/userguide/install-linux.html)
+2. Create a topic on the Ceph cluster with the command
+```
+aws --region default --endpoint-url http://localhost:8000 sns create-topic --name=sample-topic --attributes='{"push-endpoint": "http://localhost:10900"}'
+```
+3. Create a bucket to which the topic will be attached to with the command
+```
+aws --endpoint-url http://localhost:8000 s3 mb s3://sample-bucket
+```
+4. Navigate through your file system to where the Golang example code exists on your terminal.
+5. Run ``` go mod tidy ``` to install the required Go packages on the system.
+6. Run the Golang program as ``` go run put-bucket-notification-creation.go -b sample-bucket -t arn:aws:sns:default::sample-topic ``` on the terminal window to create the put bucket notification with the suffix filter rule.
+7. Upload any jpg file you have to the bucket with the command
+```
+aws --endpoint-url http://localhost:8000 s3 cp your-jpg-file.jpg s3://sample-bucket
+```
diff --git a/examples/rgw/golang/put-bucket-notification-creation/go.mod b/examples/rgw/golang/put-bucket-notification-creation/go.mod
new file mode 100644
index 000000000..4740ead2f
--- /dev/null
+++ b/examples/rgw/golang/put-bucket-notification-creation/go.mod
@@ -0,0 +1,5 @@
+module put-bucket-notification-creation
+
+go 1.13
+
+require github.com/aws/aws-sdk-go v1.44.62
diff --git a/examples/rgw/golang/put-bucket-notification-creation/go.sum b/examples/rgw/golang/put-bucket-notification-creation/go.sum
new file mode 100644
index 000000000..7cee1fa2b
--- /dev/null
+++ b/examples/rgw/golang/put-bucket-notification-creation/go.sum
@@ -0,0 +1,24 @@
+github.com/aws/aws-sdk-go v1.44.62 h1:N8qOPnBhl2ZCIFiqyB640Xt5CeX9D8CEVhG/Vj7jGJU=
+github.com/aws/aws-sdk-go v1.44.62/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/examples/rgw/golang/put-bucket-notification-creation/put-bucket-notification-creation.go b/examples/rgw/golang/put-bucket-notification-creation/put-bucket-notification-creation.go
new file mode 100644
index 000000000..5e8b7f4bc
--- /dev/null
+++ b/examples/rgw/golang/put-bucket-notification-creation/put-bucket-notification-creation.go
@@ -0,0 +1,96 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3"
+)
+
+func main() {
+ bucket := flag.String("b", "", "Name of the bucket to add notification to")
+ topic := flag.String("t", "", "The topic onto which the notification is attached to")
+ flag.Parse()
+
+ if *bucket == "" {
+ fmt.Println("You must supply the name of the bucket")
+ fmt.Println("-b BUCKET")
+ return
+ }
+
+ if *topic == "" {
+ fmt.Println("You must supply the name of the topic ARN")
+ fmt.Println("-t TOPIC ARN")
+ return
+ }
+
+ //Ceph RGW Credentials
+ access_key := "0555b35654ad1656d804"
+ secret_key := "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
+ token_id := ""
+ url := "http://127.0.0.1:8000"
+
+ defaultResolver := endpoints.DefaultResolver()
+ CustResolverFn := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
+ if service == "s3" {
+ return endpoints.ResolvedEndpoint{
+ URL: url,
+ }, nil
+ }
+
+ return defaultResolver.EndpointFor(service, region, optFns...)
+ }
+
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ Config: aws.Config{
+ Region: aws.String("default"),
+ Credentials: credentials.NewStaticCredentials(access_key, secret_key, token_id),
+ S3ForcePathStyle: aws.Bool(true),
+ EndpointResolver: endpoints.ResolverFunc(CustResolverFn),
+ },
+ }))
+
+ svc := s3.New(sess)
+
+ suffixRule := []*s3.FilterRule{
+ {
+ Name: aws.String("suffix"),
+ Value: aws.String("jpg"),
+ },
+ }
+
+ input := &s3.PutBucketNotificationConfigurationInput{
+ Bucket: bucket,
+ NotificationConfiguration: &s3.NotificationConfiguration{
+ TopicConfigurations: []*s3.TopicConfiguration{
+ {
+ Events: []*string{aws.String("s3:ObjectCreated:*")},
+ Filter: &s3.NotificationConfigurationFilter{
+ Key: &s3.KeyFilter{
+ FilterRules: suffixRule,
+ },
+ },
+ Id: aws.String("notif1"), //Raises MalformedXML if absent
+ TopicArn: topic,
+ },
+ },
+ },
+ }
+
+ _, err := svc.PutBucketNotificationConfiguration(input)
+
+ if err != nil {
+ exitErrorf("Unable to create Put Bucket Notification because of %s", err)
+ }
+ fmt.Println("Put bucket notification added to ", *topic)
+}
+
+func exitErrorf(msg string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, msg+"\n", args...)
+ os.Exit(1)
+}
diff --git a/examples/rgw/golang/topic-creation/README.md b/examples/rgw/golang/topic-creation/README.md
new file mode 100644
index 000000000..3033a86c5
--- /dev/null
+++ b/examples/rgw/golang/topic-creation/README.md
@@ -0,0 +1,11 @@
+# Introduction
+This directory contains Golang code example on how to create an SNS Topic on a Ceph RGW cluster.
+
+# Prerequisite
+Linux machine running an RGW Ceph cluster. Preferrably started with the ``OSD=1 MON=1 MDS=0 MGR=0 RGW=1 ../src/vstart.sh --debug --new `` command.
+Go installed on the Linux machine.
+
+## Workflow Procedure
+1. Navigate through your file system to where the Golang example code exists on your terminal.
+2. Run ``` go mod tidy ``` to install the required Golang packages on the system.
+3. Run the Golang program as ``` go run topic-creation.go -t sample-topic-1 -a '{"push-endpoint": "http://127.0.0.1:10900"}' ``` on the terminal window to create SNS topic with custom attributes.
diff --git a/examples/rgw/golang/topic-creation/go.mod b/examples/rgw/golang/topic-creation/go.mod
new file mode 100644
index 000000000..4b4f7fabd
--- /dev/null
+++ b/examples/rgw/golang/topic-creation/go.mod
@@ -0,0 +1,5 @@
+module examples/topic-creation
+
+go 1.13
+
+require github.com/aws/aws-sdk-go v1.44.61
diff --git a/examples/rgw/golang/topic-creation/go.sum b/examples/rgw/golang/topic-creation/go.sum
new file mode 100644
index 000000000..4c3ec3873
--- /dev/null
+++ b/examples/rgw/golang/topic-creation/go.sum
@@ -0,0 +1,24 @@
+github.com/aws/aws-sdk-go v1.44.61 h1:NcpLSS3Z0MiVQIYugx4I40vSIEEAXT0baO684ExNRco=
+github.com/aws/aws-sdk-go v1.44.61/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/examples/rgw/golang/topic-creation/topic-creation.go b/examples/rgw/golang/topic-creation/topic-creation.go
new file mode 100644
index 000000000..128358f40
--- /dev/null
+++ b/examples/rgw/golang/topic-creation/topic-creation.go
@@ -0,0 +1,81 @@
+package main
+
+import (
+ "encoding/json"
+ "flag"
+ "fmt"
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/sns"
+)
+
+func main() {
+ topic := flag.String("t", "", "The name of the topic")
+ attributes := flag.String("a", "", "Topic attributes needed")
+ flag.Parse()
+
+ attributesmap := map[string]*string{}
+ err := json.Unmarshal([]byte(*attributes), &attributesmap) // convert JSON string to Go map
+ if err != nil {
+ exitErrorf("Check your JSON String for any errors: %s : %s", err, *attributes)
+ }
+
+ if *topic == "" {
+ fmt.Println("You must supply the name of the topic")
+ fmt.Println("-t TOPIC")
+ return
+ }
+
+ if *attributes == "" {
+ fmt.Println("You must supply topic attributes")
+ fmt.Println("-a ATTRIBUTES")
+ return
+ }
+ //Ceph RGW Cluster credentials
+ access_key := "0555b35654ad1656d804"
+ secret_key := "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
+ token_id := ""
+ url := "http://127.0.0.1:8000"
+
+ defaultResolver := endpoints.DefaultResolver()
+ snsCustResolverFn := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
+ if service == "sns" {
+ return endpoints.ResolvedEndpoint{
+ URL: url,
+ }, nil
+ }
+
+ return defaultResolver.EndpointFor(service, region, optFns...)
+ }
+
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ Config: aws.Config{
+ Region: aws.String("default"),
+ Credentials: credentials.NewStaticCredentials(access_key, secret_key, token_id),
+ S3ForcePathStyle: aws.Bool(true),
+ EndpointResolver: endpoints.ResolverFunc(snsCustResolverFn),
+ },
+ }))
+
+ client := sns.New(sess)
+
+ results, err := client.CreateTopic(&sns.CreateTopicInput{
+ Attributes: attributesmap,
+ Name: topic,
+ })
+
+ if err != nil {
+ exitErrorf("Unable to create topic %s, %s", *topic, err)
+ }
+
+ fmt.Printf("Succesfully created %s \n", *results.TopicArn)
+}
+
+func exitErrorf(msg string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, msg+"\n", args...)
+ os.Exit(1)
+}
diff --git a/examples/rgw/java/ceph-s3-upload/README.md b/examples/rgw/java/ceph-s3-upload/README.md
new file mode 100644
index 000000000..235d43666
--- /dev/null
+++ b/examples/rgw/java/ceph-s3-upload/README.md
@@ -0,0 +1,16 @@
+# Introduction
+This directory contains Java code examples on how to upload an object to an S3 bucket running on a Ceph RGW cluster.
+
+# Prerequisites
+Linux machine running an RGW Ceph cluster. Preferrably started with the ``OSD=1 MON=1 MDS=0 MGR=0 RGW=1 ../src/vstart.sh --debug --new `` command.
+Java and Maven installed on the Linux machine.
+
+## Workflow Procedure
+1. Install AWS CLI version 1 on your Linux machine as explained [here](https://docs.aws.amazon.com/cli/v1/userguide/install-linux.html)
+2. Create a bucket on the Ceph cluster with the command
+``
+aws --endpoint-url http://localhost:8000 s3 mb s3://sample-bucket
+``
+3. Navigate through your file system into the ``ceph-s3-upload`` folder using your terminal. Please ensure you see the pom.xml file.
+4. Run `` mvn clean package `` to install the required Java packages on the system.
+5. Once successful, run `` java -jar target/ceph-s3-upload-1.0-SNAPSHOT-jar-with-dependencies.jar sample-bucket ceph-s3-upload.txt `` to test out Java s3 object upload on Ceph RGW cluster.
diff --git a/examples/rgw/java/ceph-s3-upload/ceph-s3-upload.txt b/examples/rgw/java/ceph-s3-upload/ceph-s3-upload.txt
new file mode 100644
index 000000000..02e6ceb47
--- /dev/null
+++ b/examples/rgw/java/ceph-s3-upload/ceph-s3-upload.txt
@@ -0,0 +1 @@
+This file will be uploaded to Ceph S3 storage
diff --git a/examples/rgw/java/ceph-s3-upload/pom.xml b/examples/rgw/java/ceph-s3-upload/pom.xml
new file mode 100644
index 000000000..bcd6f0805
--- /dev/null
+++ b/examples/rgw/java/ceph-s3-upload/pom.xml
@@ -0,0 +1,67 @@
+<project
+ xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.example.cephs3upload</groupId>
+ <artifactId>ceph-s3-upload</artifactId>
+ <packaging>jar</packaging>
+ <version>1.0-SNAPSHOT</version>
+ <name>ceph-s3-upload</name>
+ <url>http://maven.apache.org</url>
+ <properties>
+ <maven.compiler.source>1.8</maven.compiler.source>
+ <maven.compiler.target>1.8</maven.compiler.target>
+ </properties>
+ <dependencyManagement>
+ <dependencies>
+ <dependency>
+ <groupId>com.amazonaws</groupId>
+ <artifactId>aws-java-sdk-bom</artifactId>
+ <version>1.12.201</version>
+ <type>pom</type>
+ <scope>import</scope>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
+ <dependencies>
+ <dependency>
+ <groupId>com.amazonaws</groupId>
+ <artifactId>aws-java-sdk-s3</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <version>3.8.1</version>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+ <build>
+ <plugins>
+ <plugin>
+ <extensions>true</extensions>
+ <artifactId>maven-assembly-plugin</artifactId>
+
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>single</goal>
+ </goals>
+ </execution>
+ </executions>
+ <configuration>
+ <archive>
+ <manifest>
+ <addClasspath>true</addClasspath>
+ <mainClass>org.example.cephs3upload.App</mainClass>
+ </manifest>
+ </archive>
+ <descriptorRefs>
+ <descriptorRef>jar-with-dependencies</descriptorRef>
+ </descriptorRefs>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/examples/rgw/java/ceph-s3-upload/src/main/java/org/example/cephs3upload/App.java b/examples/rgw/java/ceph-s3-upload/src/main/java/org/example/cephs3upload/App.java
new file mode 100644
index 000000000..32f334cfb
--- /dev/null
+++ b/examples/rgw/java/ceph-s3-upload/src/main/java/org/example/cephs3upload/App.java
@@ -0,0 +1,51 @@
+package org.example.cephs3upload;
+
+import com.amazonaws.services.s3.model.AmazonS3Exception;
+import com.amazonaws.services.s3.AmazonS3;
+import com.amazonaws.services.s3.AmazonS3ClientBuilder;
+import com.amazonaws.client.builder.AwsClientBuilder;
+import com.amazonaws.auth.AWSStaticCredentialsProvider;
+import com.amazonaws.auth.BasicAWSCredentials;
+
+import java.io.File;
+import java.nio.file.Paths;
+
+public class App
+{
+ public static void main( String[] args )
+ {
+ final String USAGE = "\n" +
+ "To run this example, supply the name of an S3 bucket and a file to\n" +
+ "upload to it.\n" +
+ "\n" +
+ "Ex: java -jar target/ceph-s3-upload-1.0-SNAPSHOT-jar-with-dependencies.jar <bucketname> <filename>\n";
+
+ if (args.length < 2) {
+ System.out.println(USAGE);
+ System.exit(1);
+ }
+
+ String bucket_name = args[0];
+ String file_path = args[1];
+ String key_name = Paths.get(file_path).getFileName().toString();
+
+ System.out.format("Uploading %s to S3 bucket %s...\n", file_path, bucket_name);
+ // Put in the CEPH RGW access and secret keys here in that order "access key" "secret key"
+ // Must also be specified here
+ BasicAWSCredentials credentials = new BasicAWSCredentials("0555b35654ad1656d804","h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==");
+ // Note That the AWSClient builder takes in the endpoint and the region
+ // This has to be specified in this file
+ final AmazonS3 s3 = AmazonS3ClientBuilder
+ .standard()
+ .withCredentials(new AWSStaticCredentialsProvider(credentials))
+ .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration("http://127.0.0.1:8000", "default"))
+ .build();
+ try {
+ s3.putObject(bucket_name, key_name, new File(file_path));
+ } catch (AmazonS3Exception e) {
+ System.err.println(e.getMessage()); // raises more explicit error message than e.getErrorMessage() e.g when Bucket is not available
+ System.exit(1);
+ }
+ System.out.println("Object upload successful!");
+ }
+}
diff --git a/examples/rgw/java/ceph-s3-upload/src/test/java/org/example/cephs3upload/AppTest.java b/examples/rgw/java/ceph-s3-upload/src/test/java/org/example/cephs3upload/AppTest.java
new file mode 100644
index 000000000..1c8075253
--- /dev/null
+++ b/examples/rgw/java/ceph-s3-upload/src/test/java/org/example/cephs3upload/AppTest.java
@@ -0,0 +1,38 @@
+package org.example.cephs3upload;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+/**
+ * Unit test for simple App.
+ */
+public class AppTest
+ extends TestCase
+{
+ /**
+ * Create the test case
+ *
+ * @param testName name of the test case
+ */
+ public AppTest( String testName )
+ {
+ super( testName );
+ }
+
+ /**
+ * @return the suite of tests being tested
+ */
+ public static Test suite()
+ {
+ return new TestSuite( AppTest.class );
+ }
+
+ /**
+ * Rigourous Test :-)
+ */
+ public void testApp()
+ {
+ assertTrue( true );
+ }
+}
diff --git a/examples/rgw/lua/config/prometheus.yml b/examples/rgw/lua/config/prometheus.yml
new file mode 100644
index 000000000..37deee67c
--- /dev/null
+++ b/examples/rgw/lua/config/prometheus.yml
@@ -0,0 +1,19 @@
+global:
+ scrape_interval: 2s # By default, scrape targets every 15 seconds.
+
+ # Attach these labels to any time series or alerts when communicating with
+ # external systems (federation, remote storage, Alertmanager).
+ external_labels:
+ monitor: 'codelab-monitor'
+
+# A scrape configuration containing exactly one endpoint to scrape:
+# Here it's Prometheus itself.
+scrape_configs:
+ # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
+ - job_name: 'rgw'
+
+ # Override the global default and scrape targets from this job every 5 seconds.
+ scrape_interval: 1s
+
+ static_configs:
+ - targets: ['127.0.0.1:9091'] \ No newline at end of file
diff --git a/examples/rgw/lua/elasticsearch_adapter.lua b/examples/rgw/lua/elasticsearch_adapter.lua
new file mode 100644
index 000000000..a0c542fed
--- /dev/null
+++ b/examples/rgw/lua/elasticsearch_adapter.lua
@@ -0,0 +1,114 @@
+local elasticsearch = require ("elasticsearch")
+local json = require ("lunajson")
+
+local client = elasticsearch.client{
+ hosts = {
+ {
+ host = "localhost",
+ port = "9200"
+ }
+ }
+}
+
+local copyfrom = {}
+if (Request.CopyFrom ~= nil) then
+ copyfrom = {
+ Tenant = Request.CopyFrom.Tenant,
+ Bucket = Request.CopyFrom.Bucket,
+ Object = {
+ Name = Request.CopyFrom.Object.Name,
+ Instance = Request.CopyFrom.Object.Instance,
+ Id = Request.CopyFrom.Object.Id,
+ Size = Request.CopyFrom.Object.Size,
+ MTime = Request.CopyFrom.Object.MTime
+ }
+ }
+end
+
+local res, status = client:index{
+ index = "rgw",
+ type = "Request",
+ id = Request.Id,
+ body =
+ {
+ RGWOp = Request.RGWOp,
+ DecodedURI = Request.DecodedURI,
+ ContentLength = Request.ContentLength,
+ GenericAttributes = json.encode(Request.GenericAttributes),
+ Response = {
+ HTTPStatusCode = Request.Response.HTTPStatusCode,
+ HTTPStatus = Request.Response.HTTPStatus,
+ RGWCode = Request.Response.RGWCode,
+ Message = Request.Response.Message
+ },
+ SwiftAccountName = Request.SwiftAccountName,
+ Bucket = {
+ Tenant = Request.Bucket.Tenant,
+ Name = Request.Bucket.Name,
+ Marker = Request.Bucket.Marker,
+ Id = Request.Bucket.Id,
+ Count = Request.Bucket.Count,
+ Size = Request.Bucket.Size,
+ ZoneGroupId = Request.Bucket.ZoneGroupId,
+ CreationTime = Request.Bucket.CreationTime,
+ MTime = Request.Bucket.MTime,
+ Quota = {
+ MaxSize = Request.Bucket.Quota.MaxSize,
+ MaxObjects = Request.Bucket.Quota.MaxObjects,
+ Enabled = Request.Bucket.Quota.Enabled,
+ Rounded = Request.Bucket.Quota.Rounded
+ },
+ PlacementRule = {
+ Name = Request.Bucket.PlacementRule.Name,
+ StorageClass = Request.Bucket.PlacementRule.StorageClass
+ },
+ User = {
+ Tenant = Request.Bucket.User.Tenant,
+ Id = Request.Bucket.User.Id
+ }
+ },
+ Object = {
+ Name = Request.Object.Name,
+ Instance = Request.Object.Instance,
+ Id = Request.Object.Id,
+ Size = Request.Object.Size,
+ MTime = Request.Object.MTime
+ },
+ CopyFrom = copyfrom,
+ ObjectOwner = {
+ DisplayName = Request.ObjectOwner.DisplayName,
+ User = {
+ Tenant = Request.ObjectOwner.User.Tenant,
+ Id = Request.ObjectOwner.User.Id
+ }
+ },
+ ZoneGroup = {
+ Name = Request.ZoneGroup.Name,
+ Endpoint = Request.ZoneGroup.Endpoint
+ },
+ Environment = json.encode(Request.Environment),
+ Policy = json.encode(Request.Policy),
+ UserPolicies = json.encode(Request.UserPolicies),
+ RGWId = Request.RGWId,
+ HTTP = {
+ Parameters = json.encode(Request.HTTP.Parameters),
+ Resources = json.encode(Request.HTTP.Resources),
+ Metadata = json.encode(Request.HTTP.Metadata),
+ Host = Request.HTTP.Host,
+ Method = Request.HTTP.Method,
+ URI = Request.HTTP.URI,
+ QueryString = Request.HTTP.QueryString,
+ Domain = Request.HTTP.Domain
+ },
+ Time = Request.Time,
+ Dialect = Request.Dialect,
+ Id = Request.Id,
+ TransactionId = Request.TransactionId,
+ Tags = json.encode(Request.Tags),
+ User = {
+ Tenant = Request.User.Tenant,
+ Id = Request.User.Id
+ }
+ }
+}
+
diff --git a/examples/rgw/lua/elasticsearch_adapter.md b/examples/rgw/lua/elasticsearch_adapter.md
new file mode 100644
index 000000000..a32b5d36f
--- /dev/null
+++ b/examples/rgw/lua/elasticsearch_adapter.md
@@ -0,0 +1,59 @@
+# Introduction
+
+This directory contains an example `elasticsearch_adapter.lua` on how to
+use [Lua Scripting](https://docs.ceph.com/en/latest/radosgw/lua-scripting/)
+to push fields of the RGW requests
+to [Elasticsearch](https://www.elastic.co/elasticsearch/).
+
+## Elasticsearch
+
+Install and run Elasticsearch using docker:
+```bash
+docker network create elastic
+docker pull elasticsearch:2.4.6
+docker run --net elastic -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" elasticsearch:2.4.6
+```
+
+[Full documentation for Elasticsearch installation](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup.html)
+
+## Usage
+
+* Upload the script:
+
+```bash
+radosgw-admin script put --infile=elasticsearch_adapter.lua --context=postRequest
+```
+
+* Add the packages used in the script:
+
+```bash
+radosgw-admin script-package add --package='elasticsearch 1.0.0-1' --allow-compilation
+radosgw-admin script-package add --package='lunajson' --allow-compilation
+radosgw-admin script-package add --package='lua-cjson 2.1.0-1' --allow-compilation
+```
+
+* Restart radosgw.
+
+* Send a request:
+```bash
+s3cmd --host=localhost:8000 --host-bucket="localhost:8000/%(bucket)" --access_key=0555b35654ad1656d804 --secret_key=h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q== mb s3://mybucket
+s3cmd --host=localhost:8000 --host-bucket="localhost:8000/%(bucket)" --access_key=0555b35654ad1656d804 --secret_key=h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q== put -P /etc/hosts s3://mybucket
+curl http://localhost:8000/mybucket/hosts
+```
+
+* Search by bucket id from Elasticsearch:
+```bash
+curl -X GET "localhost:9200/rgw/_search?pretty" -H 'Content-Type: application/json' -d'
+{
+ "query": {
+ "match": {
+ "Bucket.Id": "05382336-b2db-409f-82dc-f28ab5fef978.4471.4471"
+ }
+ }
+}
+'
+```
+
+## Requirements
+* Lua 5.3
+
diff --git a/examples/rgw/lua/img/prometheus.png b/examples/rgw/lua/img/prometheus.png
new file mode 100644
index 000000000..7a3b63f64
--- /dev/null
+++ b/examples/rgw/lua/img/prometheus.png
Binary files differ
diff --git a/examples/rgw/lua/nats_adapter.lua b/examples/rgw/lua/nats_adapter.lua
new file mode 100644
index 000000000..38264dd46
--- /dev/null
+++ b/examples/rgw/lua/nats_adapter.lua
@@ -0,0 +1,93 @@
+ local json = require ("lunajson")
+ local nats = require ("nats")
+
+ function nats_connect(nats_host, nats_port)
+ local nats_params = {
+ host = nats_host,
+ port = nats_port,
+ }
+ client = nats.connect(nats_params)
+ client:connect()
+ end
+
+ function toJson(request, eventName, opaqueData, configure)
+ supported_event = true
+ local notification = {
+ ["Records"] = {
+ ["eventVersion"] = "2.1",
+ ["eventSource"] = "ceph:s3",
+ ["awsRegion"] = request.ZoneGroup.Name,
+ ["eventTime"] = request.Time,
+ ["eventName"] = eventName,
+ ["userIdentity"] = {
+ ["principalId"] = request.User.Id
+ },
+ ["requestParameters"] = {
+ ["sourceIPAddress"] = ""
+ },
+ ["responseElements"] = {
+ ["x-amz-request-id"] = request.Id,
+ ["x-amz-id-2"] = request.RGWId
+ },
+ ["s3"] = {
+ ["s3SchemaVersion"] = "1.0",
+ ["configurationId"] = configure,
+ ["bucket"] = {
+ ["name"] = request.Bucket.Name,
+ ["ownerIdentity"] = {
+ ["principalId"] = request.Bucket.User.Id
+ },
+ ["arn"] = "arn:aws:s3:" .. request.ZoneGroup.Name .. "::" .. request.Bucket.Name,
+ ["id"] = request.Bucket.Id
+ },
+ ["object"] = {
+ ["key"] = request.Object.Name,
+ ["size"] = request.Object.Size,
+ ["eTag"] = "", -- eTag is not supported yet
+ ["versionId"] = request.Object.Instance,
+ ["sequencer"] = string.format("%x", os.time()),
+ ["metadata"] = {
+ json.encode(request.HTTP.Metadata)
+ },
+ ["tags"] = {
+ json.encode(request.Tags)
+ }
+ }
+ },
+ ["eventId"] = "",
+ ["opaqueData"] = opaqueData
+ }
+ }
+ return notification
+ end
+
+ supported_event = false
+ configure = "mynotif1"
+ opaqueData = "me@example.com"
+ topic = "Bucket_Notification"
+ bucket_name = "mybucket"
+ nats_host = '0.0.0.0'
+ nats_port = 4222
+
+ if bucket_name == Request.Bucket.Name then
+ --Object Created
+ if Request.RGWOp == "put_obj" then
+ notification = toJson(Request ,'ObjectCreated:Put', opaqueData, configure)
+ elseif Request.RGWOp == "post_obj" then
+ notification = toJson(Request ,'ObjectCreated:Post', opaqueData, configure)
+
+ elseif Request.RGWOp == "copy_obj" then
+ notification = toJson(Request ,'ObjectCreated:Copy', opaqueData, configure)
+
+ --Object Removed
+ elseif Request.RGWOp == "delete_obj" then
+ notification = toJson(Request ,'ObjectRemoved:Delete', opaqueData, configure)
+ end
+
+ if supported_event == true then
+ nats_connect()
+ local payload = json.encode(notification)
+ client:publish(topic, payload)
+ RGWDebugLog("bucket notification sent to nats://" .. nats_host .. ":" .. nats_port .. "/" .. topic)
+ end
+ end
diff --git a/examples/rgw/lua/nats_adapter.md b/examples/rgw/lua/nats_adapter.md
new file mode 100644
index 000000000..35c1780c7
--- /dev/null
+++ b/examples/rgw/lua/nats_adapter.md
@@ -0,0 +1,101 @@
+# Introduction
+
+This directory contains examples on how to use [Lua Scripting](https://docs.ceph.com/en/latest/radosgw/lua-scripting/) together with a [NATS Lua client](https://github.com/dawnangel/lua-nats) to add NATS to the list of bucket notifications endpoints.
+
+## NATS
+To test your setup:
+* Install [NATS](https://docs.nats.io/nats-server/installation) and start a nats-server.
+
+* Subscribe to the NATS server using a [nats subscriber](https://github.com/nats-io/go-nats-examples/tree/master/patterns/publish-subscribe), choosing the topic to be 'Bucket_Notification' (as defined in the [script]())
+
+
+```bash
+nats-sub "Bucket_Notification"
+```
+
+
+[Full documentation for subscribing](https://docs.nats.io/nats-server/clients).
+
+Alternatively, configure the script to point to an existing NATS broker by editing the following part in the script to match the parameters of your existing nats server.
+
+```
+nats_host = '{host}',
+nats_port = {port},
+```
+
+## Usage
+
+* Upload the [script]():
+
+```bash
+radosgw-admin script put --infile=nats_adapter.lua --context=postRequest
+```
+* Add the packages used in the script:
+
+```bash
+radosgw-admin script-package add --package=nats --allow-compilation
+radosgw-admin script-package add --package=lunajson --allow-compilation
+radosgw-admin script-package add --package='lua-cjson 2.1.0-1' --allow-compilation
+```
+* Restart radosgw.
+* create a bucket:
+```
+s3cmd --host=localhost:8000 --host-bucket="localhost:8000/%(bucket)" mb s3://mybucket
+```
+* upload a file to the bucket and make sure that the nats server received the notification
+
+```
+s3cmd --host=localhost:8000 --host-bucket="localhost:8000/%(bucket)" put hello.txt s3://mybucket
+```
+
+Expected output:
+```
+Received on [Bucket_Notification]:
+ {"Records":[
+ {
+ "eventVersion":"2.1",
+ "eventSource":"ceph:s3",
+ "awsRegion":"default",
+ "eventTime":"2019-11-22T13:47:35.124724Z",
+ "eventName":"ObjectCreated:Put",
+ "userIdentity":{
+ "principalId":"tester"
+ },
+ "requestParameters":{
+ "sourceIPAddress":""
+ },
+ "responseElements":{
+ "x-amz-request-id":"503a4c37-85eb-47cd-8681-2817e80b4281.5330.903595",
+ "x-amz-id-2":"14d2-zone1-zonegroup1"
+ },
+ "s3":{
+ "s3SchemaVersion":"1.0",
+ "configurationId":"mynotif1",
+ "bucket":{
+ "name":"mybucket",
+ "ownerIdentity":{
+ "principalId":"tester"
+ },
+ "arn":"arn:aws:s3:us-east-1::mybucket1",
+ "id":"503a4c37-85eb-47cd-8681-2817e80b4281.5332.38"
+ },
+ "object":{
+ "key":"hello.txt",
+ "size":"1024",
+ "eTag":"",
+ "versionId":"",
+ "sequencer": "F7E6D75DC742D108",
+ "metadata":[],
+ "tags":[]
+ }
+ },
+ "eventId":"",
+ "opaqueData":"me@example.com"
+ }
+ ]}
+
+```
+
+## Requirements
+* Lua 5.3 (or higher)
+* Luarocks
diff --git a/examples/rgw/lua/prometheus_adapter.lua b/examples/rgw/lua/prometheus_adapter.lua
new file mode 100644
index 000000000..4f0af9a3b
--- /dev/null
+++ b/examples/rgw/lua/prometheus_adapter.lua
@@ -0,0 +1,23 @@
+local http = require("socket.http")
+local ltn12 = require("ltn12")
+
+local respbody = {}
+local op = "rgw_other_request_content_length"
+if (Request.RGWOp == "put_obj") then
+ op = "rgw_put_request_content_length"
+elseif (Request.RGWOp == "get_obj") then
+ op = "rgw_get_request_content_length"
+end
+local field = op .. " " .. tostring(Request.ContentLength) .. "\n"
+
+local body, code, headers, status = http.request{
+ url = "http://127.0.0.1:9091/metrics/job/rgw",
+ method = "POST",
+ headers = {
+ ["Content-Type"] = "application/x-www-form-urlencoded",
+ ["Content-Length"] = string.len(field)
+ },
+ source = ltn12.source.string(field),
+ sink = ltn12.sink.table(respbody),
+}
+
diff --git a/examples/rgw/lua/prometheus_adapter.md b/examples/rgw/lua/prometheus_adapter.md
new file mode 100644
index 000000000..eae1d8151
--- /dev/null
+++ b/examples/rgw/lua/prometheus_adapter.md
@@ -0,0 +1,59 @@
+# Introduction
+
+This directory contains an example `prometheus_adapter.lua` on how to
+use [Lua Scripting](https://docs.ceph.com/en/latest/radosgw/lua-scripting/)
+to push metrics from the RGW requests to [Prometheus](https://prometheus.io/),
+specifically to collect information on object sizes.
+
+## Prometheus
+
+As every single run of a lua script is short-lived,
+so [Pushgateway](https://github.com/prometheus/pushgateway)
+should be used as an intermediate service to enable Prometheus to scrape data
+from RGW.
+
+* Install and run Pushgateway using docker:
+
+```bash
+docker pull prom/pushgateway
+docker run -p 9091:9091 -it prom/pushgateway
+```
+
+* Install and run Prometheus using docker:
+
+```bash
+docker pull prom/prometheus
+docker run --network host -v ${CEPH_DIR}/examples/lua/config/prometheus.yml:/etc/prometheus/prometheus.yml prom/prometheus
+```
+
+[Full documentation for Prometheus installation](https://prometheus.io/docs/prometheus/latest/installation/)
+
+## Usage
+
+* Upload the script:
+
+```bash
+radosgw-admin script put --infile=prometheus_adapter.lua --context=postRequest
+```
+
+* Add the packages used in the script:
+
+```bash
+radosgw-admin script-package add --package='luasocket' --allow-compilation
+```
+
+* Restart radosgw.
+
+* Send a request:
+```bash
+s3cmd --host=localhost:8000 --host-bucket="localhost:8000/%(bucket)" --access_key=0555b35654ad1656d804 --secret_key=h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q== mb s3://mybucket
+s3cmd --host=localhost:8000 --host-bucket="localhost:8000/%(bucket)" --access_key=0555b35654ad1656d804 --secret_key=h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q== put -P /etc/hosts s3://mybucket
+curl http://localhost:8000/mybucket/hosts
+```
+
+* Open `http://localhost:9090` by browser and search for `rgw_request_content_length`
+![](img/prometheus.png)
+
+## Requirements
+* Lua 5.3 or higher
+
diff --git a/examples/rgw/lua/storage_class.lua b/examples/rgw/lua/storage_class.lua
new file mode 100644
index 000000000..08d41094c
--- /dev/null
+++ b/examples/rgw/lua/storage_class.lua
@@ -0,0 +1,19 @@
+local function isempty(input)
+ return input == nil or input == ''
+end
+
+if Request.RGWOp == 'put_obj' then
+ RGWDebugLog("Put_Obj with StorageClass: " .. Request.HTTP.StorageClass )
+ if (isempty(Request.HTTP.StorageClass)) then
+ if (Request.ContentLength >= 65536) then
+ RGWDebugLog("No StorageClass for Object and size >= threshold: " .. Request.Object.Name .. " adding QLC StorageClass")
+ Request.HTTP.StorageClass = "QLC_CLASS"
+ else
+ RGWDebugLog("No StorageClass for Object and size < threshold: " .. Request.Object.Name .. " adding STANDARD StorageClass")
+ Request.HTTP.StorageClass = "STANDARD"
+ end
+ else
+ RGWDebugLog("Storage Class Header Present on Object: " .. Request.Object.Name .. " with StorageClass: " .. Request.HTTP.StorageClass)
+ end
+end
+
diff --git a/examples/rgw/lua/storage_class.md b/examples/rgw/lua/storage_class.md
new file mode 100644
index 000000000..8da92ccc3
--- /dev/null
+++ b/examples/rgw/lua/storage_class.md
@@ -0,0 +1,49 @@
+# Introduction
+
+This directory contains an example `storage_class.lua` on how to
+use [Lua Scripting](https://docs.ceph.com/en/latest/radosgw/lua-scripting/)
+to read and write the Storage Class field of a put request.
+
+## Usage - following examples based on vstart environment built in ceph/build and commands invoked from ceph/build
+
+* Create Zonegroup placement info for a Storage Class (QLC_CLASS in this example) and point class to a data pool (qlc_pool in this example)
+NOTE: RGW will need restarted due to the Zonegroup placement info change.
+See: https://docs.ceph.com/en/latest/radosgw/placement/#zonegroup-zone-configuration for more information.
+
+```bash
+# Create Storage Class
+./bin/radosgw-admin zonegroup placement add --rgw-zonegroup default --placement-id default-placement --storage-class QLC_CLASS
+# Steer objects in QLC_CLASS to the qlc_pool data pool
+./bin/radosgw-admin zone placement add --rgw-zone default --placement-id default-placement --storage-class QLC_CLASS --data-pool qlc_pool
+```
+* Restart radosgw for Zone/ZoneGroup placement changes to take effect.
+
+* Upload the script:
+
+```bash
+./bin/radosgw-admin script put --infile=storage_class.lua --context=preRequest
+```
+
+* Create a bucket and put and object with a Storage Class header (no modification will occur):
+```bash
+aws --profile=ceph --endpoint=http://localhost:8000 s3api create-bucket --bucket test-bucket
+aws --profile=ceph --endpoint=http://localhost:8000 s3api put-object --bucket test-bucket --key truv-0 --body ./64KiB_object.bin --storage-class STANDARD
+```
+
+* Send a request without a Storage Class header (Storage Class will be changed to QLC_CLASS by Lua script):
+```bash
+aws --profile=ceph --endpoint=http://localhost:8000 s3api put-object --bucket test-bucket --key truv-0 --body ./64KiB_object.bin
+```
+NOTE: If you use s3cmd instead of aws command-line, s3cmd adds "STANDARD" StorageClass to any put request so the example Lua script will not modify it.
+
+* Verify S3 object had its StorageClass header added
+```bash
+grep Lua ceph/build/out/radosgw.8000.log
+
+2021-11-01T17:10:14.048-0400 7f9c7f697640 20 Lua INFO: Put_Obj with StorageClass:
+2021-11-01T17:10:14.048-0400 7f9c7f697640 20 Lua INFO: No StorageClass for Object and size >= threshold: truv-0 adding QLC StorageClass
+```
+
+## Requirements
+* Lua 5.3
+
diff --git a/examples/rgw/rgw-cache/nginx-default.conf b/examples/rgw/rgw-cache/nginx-default.conf
new file mode 100644
index 000000000..c22d15531
--- /dev/null
+++ b/examples/rgw/rgw-cache/nginx-default.conf
@@ -0,0 +1,136 @@
+#config cache size and path to the cache directory, you should make sure that the user that is running nginx have permissions to access the cache directory
+#max_size means that Nginx will not cache more than 20G, It should be tuned to a larger number if the /data/cache is bigger
+proxy_cache_path /data/cache levels=2:2:2 keys_zone=mycache:999m max_size=20G inactive=1d use_temp_path=off;
+upstream rgws {
+ # List of all rgws (ips or resolvable names)
+ server rgw1:8000 max_fails=2 fail_timeout=5s;
+ server rgw2:8000 max_fails=2 fail_timeout=5s;
+ server rgw3:8000 max_fails=2 fail_timeout=5s;
+}
+server {
+ listen 80;
+ server_name cacher;
+ location /authentication {
+ internal;
+ client_max_body_size 0;
+ proxy_pass http://rgws$request_uri;
+ proxy_pass_request_body off;
+ proxy_set_header Host $host;
+ # setting x-rgw-auth allow the RGW the ability to only authorize the request without fetching the obj data
+ proxy_set_header x-rgw-auth "yes";
+ proxy_set_header Authorization $http_authorization;
+ proxy_http_version 1.1;
+ proxy_method $request_method;
+ # Do not convert HEAD requests into GET requests
+ proxy_cache_convert_head off;
+ error_page 404 = @outage;
+ proxy_intercept_errors on;
+ if ($request_uri = "/") {
+ return 200;
+ }
+ # URI included with question mark is not being cached
+ if ($request_uri ~* (\?)) {
+ return 200;
+ }
+ if ($request_method = "PUT") {
+ return 200;
+ }
+ if ($request_method = "POST") {
+ return 200;
+ }
+ if ($request_method = "HEAD") {
+ return 200;
+ }
+ if ($request_method = "COPY") {
+ return 200;
+ }
+ if ($request_method = "DELETE") {
+ return 200;
+ }
+ if ($http_if_match) {
+ return 200;
+ }
+ if ($http_authorization !~* "aws4_request") {
+ return 200;
+ }
+ }
+ location @outage{
+ return 403;
+ }
+ location / {
+ auth_request /authentication;
+ proxy_pass http://rgws;
+ set $authvar '';
+ # if $do_not_cache is not empty the request would not be cached, this is relevant for list op for example
+ set $do_not_cache '';
+ # the IP or name of the RGWs
+ rewrite_by_lua_file /etc/nginx/nginx-lua-file.lua;
+ #proxy_set_header Authorization $http_authorization;
+ # my cache configured at the top of the file
+ proxy_cache mycache;
+ proxy_cache_lock_timeout 0s;
+ proxy_cache_lock_age 1000s;
+ proxy_http_version 1.1;
+ set $date $aws_auth_date;
+ # Getting 403 if this header not set
+ proxy_set_header Host $host;
+ # Cache all 200 OK's for 1 day
+ proxy_cache_valid 200 206 1d;
+ # Use stale cache file in all errors from upstream if we can
+ proxy_cache_use_stale updating;
+ proxy_cache_background_update on;
+ # Try to check if etag have changed, if yes, do not re-fetch from rgw the object
+ proxy_cache_revalidate on;
+ # Lock the cache so that only one request can populate it at a time
+ proxy_cache_lock on;
+ # prevent conversion of head requests to get requests
+ proxy_cache_convert_head off;
+ # Listing all buckets should not be cached
+ if ($request_uri = "/") {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ # URI including question mark are not supported to prevent bucket listing cache
+ if ($request_uri ~* (\?)) {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ # Only aws4 requests are being cached - As the aws auth module supporting only aws v2
+ if ($http_authorization !~* "aws4_request") {
+ set $date $http_x_amz_date;
+ }
+ if ($request_method = "PUT") {
+ set $date $http_x_amz_date;
+ }
+ if ($request_method = "POST") {
+ set $date $http_x_amz_date;
+ }
+ if ($request_method = "HEAD") {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ if ($request_method = "COPY") {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ if ($http_if_match) {
+ #set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ set $myrange $http_range;
+ }
+ if ($request_method = "DELETE") {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ proxy_set_header if_match $http_if_match;
+ proxy_set_header Range $myrange;
+ # Use the original x-amz-date if the aws auth module didn't create one
+ proxy_set_header x-amz-date $date;
+ proxy_set_header X-Amz-Cache $authvar;
+ proxy_no_cache $do_not_cache;
+ proxy_set_header Authorization $awsauthfour;
+ # This is on which content the nginx to use for hashing the cache keys
+ proxy_cache_key "$request_uri$request_method$request_body$myrange";
+ client_max_body_size 0;
+ }
+}
diff --git a/examples/rgw/rgw-cache/nginx-lua-file.lua b/examples/rgw/rgw-cache/nginx-lua-file.lua
new file mode 100644
index 000000000..efaf42230
--- /dev/null
+++ b/examples/rgw/rgw-cache/nginx-lua-file.lua
@@ -0,0 +1,26 @@
+local check = ngx.req.get_headers()["AUTHORIZATION"]
+local uri = ngx.var.request_uri
+local ngx_re = require "ngx.re"
+local hdrs = ngx.req.get_headers()
+--Take all signedheaders names, this for creating the X-Amz-Cache which is necessary to override range header to be able to readahead an object
+local res, err = ngx_re.split(check,"SignedHeaders=")
+local res2, err2 = ngx_re.split(res[2],",")
+local res3, err3 = ngx_re.split(res2[1],";")
+local t = {}
+local concathdrs = string.char(0x00)
+for i = 1, #res3, 1 do
+ if hdrs[res3[i]] ~= nil then
+--0xB1 is the separator between header name and value
+ t[i] = res3[i] .. string.char(0xB1) .. hdrs[res3[i]]
+--0xB2 is the separator between headers
+ concathdrs = concathdrs .. string.char(0xB2) .. t[i]
+ end
+end
+-- check if the authorization header is not empty
+if check ~= nil then
+ local xamzcache = concathdrs:sub(2)
+ xamzcache = xamzcache .. string.char(0xB2) .. "Authorization" .. string.char(0xB1) .. check
+ if xamzcache:find("aws4_request") ~= nil and uri ~= "/" and uri:find("?") == nil and hdrs["if-match"] == nil then
+ ngx.var.authvar = xamzcache
+ end
+end
diff --git a/examples/rgw/rgw-cache/nginx-noprefetch.conf b/examples/rgw/rgw-cache/nginx-noprefetch.conf
new file mode 100644
index 000000000..02294415a
--- /dev/null
+++ b/examples/rgw/rgw-cache/nginx-noprefetch.conf
@@ -0,0 +1,101 @@
+#config cache size and path to the cache directory, you should make sure that the user that is running nginx have permissions to access the cache directory
+#max_size means that Nginx will not cache more than 20G, It should be tuned to a larger number if the /data/cache is bigger
+proxy_cache_path /data/cache levels=2:2:2 keys_zone=mycache:999m max_size=20G inactive=1d use_temp_path=off;
+upstream rgws {
+ # List of all rgws (ips or resolvable names)
+ server rgw1:8000 max_fails=2 fail_timeout=5s;
+ server rgw2:8000 max_fails=2 fail_timeout=5s;
+ server rgw3:8000 max_fails=2 fail_timeout=5s;
+}
+server {
+ listen 80;
+ server_name cacher;
+ location /authentication {
+ internal;
+ client_max_body_size 0;
+ proxy_pass http://rgws$request_uri;
+ proxy_pass_request_body off;
+ proxy_set_header Host $host;
+ # setting x-rgw-auth allow the RGW the ability to only authorize the request without fetching the obj data
+ proxy_set_header x-rgw-auth "yes";
+ proxy_set_header Authorization $http_authorization;
+ proxy_http_version 1.1;
+ proxy_method $request_method;
+ # Do not convert HEAD requests into GET requests
+ proxy_cache_convert_head off;
+ error_page 404 = @outage;
+ proxy_intercept_errors on;
+ if ($request_uri = "/") {
+ return 200;
+ }
+ # URI included with question mark is not being cached
+ if ($request_uri ~* (\?)) {
+ return 200;
+ }
+ if ($request_method = "PUT") {
+ return 200;
+ }
+ if ($request_method = "POST") {
+ return 200;
+ }
+ if ($request_method = "HEAD") {
+ return 200;
+ }
+ if ($request_method = "COPY") {
+ return 200;
+ }
+ if ($request_method = "DELETE") {
+ return 200;
+ }
+ if ($http_if_match) {
+ return 200;
+ }
+ if ($http_authorization !~* "aws4_request") {
+ return 200;
+ }
+ }
+ location @outage{
+ return 403;
+ }
+ location / {
+ auth_request /authentication;
+ proxy_pass http://rgws;
+ # if $do_not_cache is not empty the request would not be cached, this is relevant for list op for example
+ set $do_not_cache '';
+ # the IP or name of the RGWs
+ #proxy_set_header Authorization $http_authorization;
+ # my cache configured at the top of the file
+ proxy_cache mycache;
+ proxy_cache_lock_timeout 0s;
+ proxy_cache_lock_age 1000s;
+ proxy_http_version 1.1;
+ # Getting 403 if this header not set
+ proxy_set_header Host $host;
+ # Cache all 200 OK's for 1 day
+ proxy_cache_valid 200 206 1d;
+ # Use stale cache file in all errors from upstream if we can
+ proxy_cache_use_stale updating;
+ proxy_cache_background_update on;
+ # Try to check if etag have changed, if yes, do not re-fetch from rgw the object
+ proxy_cache_revalidate on;
+ # Lock the cache so that only one request can populate it at a time
+ proxy_cache_lock on;
+ # prevent conversion of head requests to get requests
+ proxy_cache_convert_head off;
+ # Listing all buckets should not be cached
+ if ($request_uri = "/") {
+ set $do_not_cache "no";
+ }
+ # URI including question mark are not supported to prevent bucket listing cache
+ if ($request_uri ~* (\?)) {
+ set $do_not_cache "no";
+ }
+ # Use the original x-amz-date if the aws auth module didn't create one
+ proxy_no_cache $do_not_cache;
+ proxy_set_header Authorization $http_authorization;
+ proxy_set_header Range $http_range;
+ # This is on which content the nginx to use for hashing the cache keys
+ proxy_cache_key "$request_uri$request_method$request_body$http_range";
+ client_max_body_size 0;
+ }
+}
diff --git a/examples/rgw/rgw-cache/nginx-slicing.conf b/examples/rgw/rgw-cache/nginx-slicing.conf
new file mode 100644
index 000000000..3de4c67fc
--- /dev/null
+++ b/examples/rgw/rgw-cache/nginx-slicing.conf
@@ -0,0 +1,137 @@
+#config cache size and path to the cache directory, you should make sure that the user that is running nginx have permissions to access the cache directory
+#max_size means that Nginx will not cache more than 20G, It should be tuned to a larger number if the /data/cache is bigger
+proxy_cache_path /data/cache levels=2:2:2 keys_zone=mycache:999m max_size=20G inactive=1d use_temp_path=off;
+upstream rgws {
+ # List of all rgws (ips or resolvable names)
+ server rgw1:8000 max_fails=2 fail_timeout=5s;
+ server rgw2:8000 max_fails=2 fail_timeout=5s;
+ server rgw3:8000 max_fails=2 fail_timeout=5s;
+}
+server {
+ listen 80;
+ server_name cacher;
+ location /authentication {
+ internal;
+ client_max_body_size 0;
+ proxy_pass http://rgws$request_uri;
+ proxy_pass_request_body off;
+ proxy_set_header Host $host;
+ # setting x-rgw-auth allow the RGW the ability to only authorize the request without fetching the obj data
+ proxy_set_header x-rgw-auth "yes";
+ proxy_set_header Authorization $http_authorization;
+ proxy_http_version 1.1;
+ proxy_method $request_method;
+ # Do not convert HEAD requests into GET requests
+ proxy_cache_convert_head off;
+ error_page 404 = @outage;
+ proxy_intercept_errors on;
+ if ($request_uri = "/") {
+ return 200;
+ }
+ # URI included with question mark is not being cached
+ if ($request_uri ~* (\?)) {
+ return 200;
+ }
+ if ($request_method = "PUT") {
+ return 200;
+ }
+ if ($request_method = "POST") {
+ return 200;
+ }
+ if ($request_method = "HEAD") {
+ return 200;
+ }
+ if ($request_method = "COPY") {
+ return 200;
+ }
+ if ($request_method = "DELETE") {
+ return 200;
+ }
+ if ($http_if_match) {
+ return 200;
+ }
+ if ($http_authorization !~* "aws4_request") {
+ return 200;
+ }
+ }
+ location @outage{
+ return 403;
+ }
+ location / {
+ slice 1m;
+ auth_request /authentication;
+ proxy_set_header Range $slice_range;
+ proxy_pass http://rgws;
+ set $authvar '';
+ # if $do_not_cache is not empty the request would not be cached, this is relevant for list op for example
+ set $do_not_cache '';
+ # the IP or name of the RGWs
+ rewrite_by_lua_file /etc/nginx/nginx-lua-file.lua;
+ #proxy_set_header Authorization $http_authorization;
+ # my cache configured at the top of the file
+ proxy_cache mycache;
+ proxy_cache_lock_timeout 0s;
+ proxy_cache_lock_age 1000s;
+ proxy_http_version 1.1;
+ set $date $aws_auth_date;
+ # Getting 403 if this header not set
+ proxy_set_header Host $host;
+ # Cache all 200 OK's for 1 day
+ proxy_cache_valid 200 206 1d;
+ # Use stale cache file in all errors from upstream if we can
+ proxy_cache_use_stale updating;
+ proxy_cache_background_update on;
+ # Try to check if etag have changed, if yes, do not re-fetch from rgw the object
+ proxy_cache_revalidate on;
+ # Lock the cache so that only one request can populate it at a time
+ proxy_cache_lock on;
+ # prevent conversion of head requests to get requests
+ proxy_cache_convert_head off;
+ # Listing all buckets should not be cached
+ if ($request_uri = "/") {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ # URI including question mark are not supported to prevent bucket listing cache
+ if ($request_uri ~* (\?)) {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ # Only aws4 requests are being cached - As the aws auth module supporting only aws v2
+ if ($http_authorization !~* "aws4_request") {
+ set $date $http_x_amz_date;
+ }
+ if ($request_method = "PUT") {
+ set $date $http_x_amz_date;
+ }
+ if ($request_method = "POST") {
+ set $date $http_x_amz_date;
+ }
+ if ($request_method = "HEAD") {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ if ($request_method = "COPY") {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ if ($http_if_match) {
+ #set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ set $myrange $slice_range;
+ }
+ if ($request_method = "DELETE") {
+ set $do_not_cache "no";
+ set $date $http_x_amz_date;
+ }
+ proxy_set_header if_match $http_if_match;
+ # Use the original x-amz-date if the aws auth module didn't create one
+ proxy_set_header x-amz-date $date;
+ proxy_set_header X-Amz-Cache $authvar;
+ proxy_no_cache $do_not_cache;
+ proxy_set_header Authorization $awsauthfour;
+ # This is on which content the nginx to use for hashing the cache keys
+ proxy_cache_key "$request_uri$request_method$request_body$slice_range";
+ client_max_body_size 0;
+ }
+}
diff --git a/examples/rgw/rgw-cache/nginx.conf b/examples/rgw/rgw-cache/nginx.conf
new file mode 100644
index 000000000..a478db1dc
--- /dev/null
+++ b/examples/rgw/rgw-cache/nginx.conf
@@ -0,0 +1,57 @@
+
+user nginx;
+#Process per core
+worker_processes auto;
+pid /var/run/nginx.pid;
+events {
+#Number of connections per worker
+ worker_connections 1024;
+}
+
+
+http {
+ types_hash_max_size 4096;
+ lua_package_path '/usr/local/openresty/lualib/?.lua;;';
+ aws_auth $aws_token {
+ # access key and secret key of the cache
+ # Please substitute with the access key and secret key of the amz-cache cap user
+ access_key cache;
+ secret_key cache;
+ service s3;
+ region us-east-1;
+ }
+ # This map is used to choose the original authorization header if the aws_auth module refuse to create one
+ map $aws_token $awsauth {
+ default $http_authorization;
+ ~. $aws_token; # Regular expression to match any value
+ }
+ map $request_uri $awsauthtwo {
+ "/" $http_authorization;
+ "~\?" $http_authorization;
+ default $awsauth;
+ }
+ map $request_method $awsauththree {
+ default $awsauthtwo;
+ "PUT" $http_authorization;
+ "HEAD" $http_authorization;
+ "POST" $http_authorization;
+ "DELETE" $http_authorization;
+ "COPY" $http_authorization;
+ }
+ map $http_if_match $awsauthfour {
+ ~. $http_authorization; # Regular expression to match any value
+ default $awsauththree;
+ }
+ include /etc/nginx/mime.types;
+ default_type application/octet-stream;
+ log_format main '$remote_addr - $remote_user [$time_local] "$request" '
+ '$status $body_bytes_sent "$http_referer" '
+ '"$http_user_agent" "$http_x_forwarded_for"';
+ error_log /var/log/nginx/error.log;
+ access_log /var/log/nginx/access.log main;
+
+ sendfile on;
+ tcp_nodelay on;
+ keepalive_timeout 65;
+ include /etc/nginx/conf.d/*.conf;
+}
diff --git a/examples/rgw/rgw_admin_curl.sh b/examples/rgw/rgw_admin_curl.sh
new file mode 100644
index 000000000..7bab8e5fa
--- /dev/null
+++ b/examples/rgw/rgw_admin_curl.sh
@@ -0,0 +1,112 @@
+#!/usr/bin/env bash
+
+show_help()
+{
+ echo "Usage: `basename $0` -a <access-key> -s <secret-key>" \
+ "-e <rgw-endpoint> -r <http-request>" \
+ "-p <admin-resource> -q \"<http-query-string>\""
+ echo " -a Access key of rgw user"
+ echo " -s Secret key of rgw user"
+ echo " -e RGW endpoint in <ipaddr:port> format"
+ echo " -r HTTP request type GET/PUT/DELETE"
+ echo " -p RGW admin resource e.g user, bucket etc"
+ echo " -q HTTP query string"
+ echo " -j (Optional) Print output in pretty JSON format"
+ echo " Examples :"
+ echo " - To create rgw user"
+ echo " # `basename $0` -a ABCD1234EFGH5678IJ90" \
+ "-s klmnopqrstuvwxyz12345ABCD987654321efghij" \
+ "-e 10.0.0.1:8080 -r PUT -p user" \
+ "-q \"uid=admin&display-name=Administrator\""
+ echo " - To get rgw user info"
+ echo " # `basename $0` -a ABCD1234EFGH5678IJ90" \
+ "-s klmnopqrstuvwxyz12345ABCD987654321efghij" \
+ "-e 10.0.0.1:8080 -r GET -p user -q \"uid=admin\""
+ echo " - To list buckets"
+ echo " (List all buckets)"
+ echo " # `basename $0` -a ABCD1234EFGH5678IJ90" \
+ "-s klmnopqrstuvwxyz12345ABCD987654321efghij" \
+ "-e 10.0.0.1:8080 -r GET -p bucket"
+ echo " (For specific rgw user)"
+ echo " # `basename $0` -a ABCD1234EFGH5678IJ90" \
+ "-s klmnopqrstuvwxyz12345ABCD987654321efghij" \
+ "-e 10.0.0.1:8080 -r GET -p bucket -q \"uid=admin\""
+ echo " - To delete bucket"
+ echo " # `basename $0` -a ABCD1234EFGH5678IJ90" \
+ "-s klmnopqrstuvwxyz12345ABCD987654321efghij" \
+ "-e 10.0.0.1:8080 -r DELETE -p bucket -q \"bucket=foo\""
+ echo " - To delete rgw user"
+ echo " # `basename $0` -a ABCD1234EFGH5678IJ90" \
+ "-s klmnopqrstuvwxyz12345ABCD987654321efghij" \
+ "-e 10.0.0.1:8080 -r DELETE -p user -q \"uid=admin\""
+ exit 1
+}
+
+access_key=""
+secret_key=""
+rgw_endpoint=""
+http_request=""
+admin_resource=""
+http_query=""
+use_jq=false
+
+while getopts "a:s:e:r:p:q:j" opt; do
+ case "$opt" in
+ a)
+ access_key=${OPTARG}
+ ;;
+ s) secret_key=${OPTARG}
+ ;;
+ e) rgw_endpoint=${OPTARG}
+ ;;
+ r) http_request=${OPTARG}
+ ;;
+ p) admin_resource=${OPTARG}
+ ;;
+ q) http_query=${OPTARG}
+ ;;
+ j) use_jq=true
+ ;;
+ *)
+ show_help
+ exit 1
+ ;;
+ esac
+done
+shift $((OPTIND-1))
+
+if [ -z "${access_key}" ] || [ -z "${secret_key}" ] || \
+ [ -z "${rgw_endpoint}" ] || [ -z "${http_request}" ] || \
+ [ -z "${admin_resource}" ] || [ -z "${http_query}" ]; then
+ if [ "${http_request}" = "GET" ] && [ "${admin_resource}" = "bucket" ] && \
+ [ -z "${http_query}" ]; then
+ :
+ else
+ show_help
+ fi
+fi
+
+resource="/admin/${admin_resource}"
+contentType="application/x-compressed-tar"
+dateTime=`date -R -u`
+
+headerToSign="${http_request}
+
+${contentType}
+${dateTime}
+${resource}"
+
+signature=`echo -en "$headerToSign" | \
+ openssl sha1 -hmac ${secret_key} -binary | base64`
+
+if "$use_jq";
+then
+ curl -X ${http_request} -H "Content-Type: ${contentType}" -H "Date: ${dateTime}" \
+ -H "Authorization: AWS ${access_key}:${signature}" -H "Host: ${rgw_endpoint}" \
+ "http://${rgw_endpoint}${resource}?${http_query}" 2> /dev/null|jq "."
+else
+ curl -X ${http_request} -H "Content-Type: ${contentType}" -H "Date: ${dateTime}" \
+ -H "Authorization: AWS ${access_key}:${signature}" -H "Host: ${rgw_endpoint}" \
+ "http://${rgw_endpoint}${resource}?${http_query}"
+fi
+echo ""