summaryrefslogtreecommitdiffstats
path: root/src/pybind/mgr/diskprediction_cloud
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/pybind/mgr/diskprediction_cloud
parentInitial commit. (diff)
downloadceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.tar.xz
ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/pybind/mgr/diskprediction_cloud')
-rw-r--r--src/pybind/mgr/diskprediction_cloud/__init__.py1
-rw-r--r--src/pybind/mgr/diskprediction_cloud/agent/__init__.py38
-rw-r--r--src/pybind/mgr/diskprediction_cloud/agent/metrics/__init__.py61
-rw-r--r--src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_cluster.py145
-rw-r--r--src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_mon_osd.py222
-rw-r--r--src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_pool.py57
-rw-r--r--src/pybind/mgr/diskprediction_cloud/agent/metrics/db_relay.py703
-rw-r--r--src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_agent.py70
-rw-r--r--src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_cluster.py35
-rw-r--r--src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk.py175
-rw-r--r--src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk_smart.py182
-rw-r--r--src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_host.py105
-rw-r--r--src/pybind/mgr/diskprediction_cloud/agent/predictor.py48
-rw-r--r--src/pybind/mgr/diskprediction_cloud/common/__init__.py61
-rw-r--r--src/pybind/mgr/diskprediction_cloud/common/client_pb2.py1775
-rw-r--r--src/pybind/mgr/diskprediction_cloud/common/client_pb2_grpc.py395
-rw-r--r--src/pybind/mgr/diskprediction_cloud/common/clusterdata.py464
-rw-r--r--src/pybind/mgr/diskprediction_cloud/common/cypher.py71
-rw-r--r--src/pybind/mgr/diskprediction_cloud/common/grpcclient.py242
-rw-r--r--src/pybind/mgr/diskprediction_cloud/common/server.crt17
-rw-r--r--src/pybind/mgr/diskprediction_cloud/module.py454
-rw-r--r--src/pybind/mgr/diskprediction_cloud/requirements.txt12
-rw-r--r--src/pybind/mgr/diskprediction_cloud/task.py181
23 files changed, 5514 insertions, 0 deletions
diff --git a/src/pybind/mgr/diskprediction_cloud/__init__.py b/src/pybind/mgr/diskprediction_cloud/__init__.py
new file mode 100644
index 00000000..8f210ac9
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/__init__.py
@@ -0,0 +1 @@
+from .module import Module
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/__init__.py b/src/pybind/mgr/diskprediction_cloud/agent/__init__.py
new file mode 100644
index 00000000..c7702e52
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/agent/__init__.py
@@ -0,0 +1,38 @@
+from __future__ import absolute_import
+
+from ..common import timeout, TimeoutError
+
+
+class BaseAgent(object):
+
+ measurement = ''
+
+ def __init__(self, mgr_module, obj_sender, timeout=30):
+ self.data = []
+ self._client = None
+ self._client = obj_sender
+ self._logger = mgr_module.log
+ self._module_inst = mgr_module
+ self._timeout = timeout
+
+ def run(self):
+ try:
+ self._collect_data()
+ self._run()
+ except TimeoutError:
+ self._logger.error('{} failed to execute {} task'.format(
+ __name__, self.measurement))
+
+ def __nonzero__(self):
+ if not self._module_inst:
+ return False
+ else:
+ return True
+
+ @timeout()
+ def _run(self):
+ pass
+
+ @timeout()
+ def _collect_data(self):
+ pass
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/__init__.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/__init__.py
new file mode 100644
index 00000000..9e7e5b0b
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/__init__.py
@@ -0,0 +1,61 @@
+from __future__ import absolute_import
+
+from .. import BaseAgent
+from ...common import DP_MGR_STAT_FAILED, DP_MGR_STAT_WARNING, DP_MGR_STAT_OK
+
+AGENT_VERSION = '1.0.0'
+
+
+class MetricsField(object):
+ def __init__(self):
+ self.tags = {}
+ self.fields = {}
+ self.timestamp = None
+
+ def __str__(self):
+ return str({
+ 'tags': self.tags,
+ 'fields': self.fields,
+ 'timestamp': self.timestamp
+ })
+
+
+class MetricsAgent(BaseAgent):
+
+ def log_summary(self, status_info):
+ try:
+ if status_info:
+ measurement = status_info['measurement']
+ success_count = status_info['success_count']
+ failure_count = status_info['failure_count']
+ total_count = success_count + failure_count
+ display_string = \
+ '%s agent stats in total count: %s, success count: %s, failure count: %s.'
+ self._logger.info(
+ display_string % (measurement, total_count, success_count, failure_count)
+ )
+ except Exception as e:
+ self._logger.error(str(e))
+
+ def _run(self):
+ collect_data = self.data
+ result = {}
+ if collect_data and self._client:
+ status_info = self._client.send_info(collect_data, self.measurement)
+ # show summary info
+ self.log_summary(status_info)
+ # write sub_agent buffer
+ total_count = status_info['success_count'] + status_info['failure_count']
+ if total_count:
+ if status_info['success_count'] == 0:
+ self._module_inst.status = \
+ {'status': DP_MGR_STAT_FAILED,
+ 'reason': 'failed to send metrics data to the server'}
+ elif status_info['failure_count'] == 0:
+ self._module_inst.status = \
+ {'status': DP_MGR_STAT_OK}
+ else:
+ self._module_inst.status = \
+ {'status': DP_MGR_STAT_WARNING,
+ 'reason': 'failed to send partial metrics data to the server'}
+ return result
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_cluster.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_cluster.py
new file mode 100644
index 00000000..2491644a
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_cluster.py
@@ -0,0 +1,145 @@
+from __future__ import absolute_import
+
+import socket
+
+from . import MetricsAgent, MetricsField
+from ...common.clusterdata import ClusterAPI
+
+
+class CephCluster(MetricsField):
+ """ Ceph cluster structure """
+ measurement = 'ceph_cluster'
+
+ def __init__(self):
+ super(CephCluster, self).__init__()
+ self.tags['cluster_id'] = None
+ self.fields['agenthost'] = None
+ self.tags['agenthost_domain_id'] = None
+ self.fields['cluster_health'] = ''
+ self.fields['num_mon'] = None
+ self.fields['num_mon_quorum'] = None
+ self.fields['num_osd'] = None
+ self.fields['num_osd_up'] = None
+ self.fields['num_osd_in'] = None
+ self.fields['osd_epoch'] = None
+ self.fields['osd_bytes'] = None
+ self.fields['osd_bytes_used'] = None
+ self.fields['osd_bytes_avail'] = None
+ self.fields['num_pool'] = None
+ self.fields['num_pg'] = None
+ self.fields['num_pg_active_clean'] = None
+ self.fields['num_pg_active'] = None
+ self.fields['num_pg_peering'] = None
+ self.fields['num_object'] = None
+ self.fields['num_object_degraded'] = None
+ self.fields['num_object_misplaced'] = None
+ self.fields['num_object_unfound'] = None
+ self.fields['num_bytes'] = None
+ self.fields['num_mds_up'] = None
+ self.fields['num_mds_in'] = None
+ self.fields['num_mds_failed'] = None
+ self.fields['mds_epoch'] = None
+
+
+class CephClusterAgent(MetricsAgent):
+ measurement = 'ceph_cluster'
+
+ def _collect_data(self):
+ # process data and save to 'self.data'
+ obj_api = ClusterAPI(self._module_inst)
+ cluster_id = obj_api.get_cluster_id()
+
+ c_data = CephCluster()
+ cluster_state = obj_api.get_health_status()
+ c_data.tags['cluster_id'] = cluster_id
+ c_data.fields['cluster_health'] = str(cluster_state)
+ c_data.fields['agenthost'] = socket.gethostname()
+ c_data.tags['agenthost_domain_id'] = cluster_id
+ c_data.fields['osd_epoch'] = obj_api.get_osd_epoch()
+ c_data.fields['num_mon'] = len(obj_api.get_mons())
+ c_data.fields['num_mon_quorum'] = \
+ len(obj_api.get_mon_status().get('quorum', []))
+
+ osds = obj_api.get_osds()
+ num_osd_up = 0
+ num_osd_in = 0
+ for osd_data in osds:
+ if osd_data.get('up'):
+ num_osd_up = num_osd_up + 1
+ if osd_data.get('in'):
+ num_osd_in = num_osd_in + 1
+ if osds:
+ c_data.fields['num_osd'] = len(osds)
+ else:
+ c_data.fields['num_osd'] = 0
+ c_data.fields['num_osd_up'] = num_osd_up
+ c_data.fields['num_osd_in'] = num_osd_in
+ c_data.fields['num_pool'] = len(obj_api.get_osd_pools())
+
+ df_stats = obj_api.module.get('df').get('stats', {})
+ total_bytes = df_stats.get('total_bytes', 0)
+ total_used_bytes = df_stats.get('total_used_bytes', 0)
+ total_avail_bytes = df_stats.get('total_avail_bytes', 0)
+ c_data.fields['osd_bytes'] = total_bytes
+ c_data.fields['osd_bytes_used'] = total_used_bytes
+ c_data.fields['osd_bytes_avail'] = total_avail_bytes
+ if total_bytes and total_avail_bytes:
+ c_data.fields['osd_bytes_used_percentage'] = \
+ round((float(total_used_bytes) / float(total_bytes)) * 100, 4)
+ else:
+ c_data.fields['osd_bytes_used_percentage'] = 0.0000
+
+ pg_stats = obj_api.module.get('pg_stats').get('pg_stats', [])
+ num_bytes = 0
+ num_object = 0
+ num_object_degraded = 0
+ num_object_misplaced = 0
+ num_object_unfound = 0
+ num_pg_active = 0
+ num_pg_active_clean = 0
+ num_pg_peering = 0
+ for pg_data in pg_stats:
+ num_pg_active = num_pg_active + len(pg_data.get('acting'))
+ if 'active+clean' in pg_data.get('state'):
+ num_pg_active_clean = num_pg_active_clean + 1
+ if 'peering' in pg_data.get('state'):
+ num_pg_peering = num_pg_peering + 1
+
+ stat_sum = pg_data.get('stat_sum', {})
+ num_object = num_object + stat_sum.get('num_objects', 0)
+ num_object_degraded = \
+ num_object_degraded + stat_sum.get('num_objects_degraded', 0)
+ num_object_misplaced = \
+ num_object_misplaced + stat_sum.get('num_objects_misplaced', 0)
+ num_object_unfound = \
+ num_object_unfound + stat_sum.get('num_objects_unfound', 0)
+ num_bytes = num_bytes + stat_sum.get('num_bytes', 0)
+
+ c_data.fields['num_pg'] = len(pg_stats)
+ c_data.fields['num_object'] = num_object
+ c_data.fields['num_object_degraded'] = num_object_degraded
+ c_data.fields['num_object_misplaced'] = num_object_misplaced
+ c_data.fields['num_object_unfound'] = num_object_unfound
+ c_data.fields['num_bytes'] = num_bytes
+ c_data.fields['num_pg_active'] = num_pg_active
+ c_data.fields['num_pg_active_clean'] = num_pg_active_clean
+ c_data.fields['num_pg_peering'] = num_pg_active_clean
+
+ filesystems = obj_api.get_file_systems()
+ num_mds_in = 0
+ num_mds_up = 0
+ num_mds_failed = 0
+ mds_epoch = 0
+ for fs_data in filesystems:
+ num_mds_in = \
+ num_mds_in + len(fs_data.get('mdsmap', {}).get('in', []))
+ num_mds_up = \
+ num_mds_up + len(fs_data.get('mdsmap', {}).get('up', {}))
+ num_mds_failed = \
+ num_mds_failed + len(fs_data.get('mdsmap', {}).get('failed', []))
+ mds_epoch = mds_epoch + fs_data.get('mdsmap', {}).get('epoch', 0)
+ c_data.fields['num_mds_in'] = num_mds_in
+ c_data.fields['num_mds_up'] = num_mds_up
+ c_data.fields['num_mds_failed'] = num_mds_failed
+ c_data.fields['mds_epoch'] = mds_epoch
+ self.data.append(c_data)
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_mon_osd.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_mon_osd.py
new file mode 100644
index 00000000..4b4d8fa8
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_mon_osd.py
@@ -0,0 +1,222 @@
+from __future__ import absolute_import
+
+import socket
+
+from . import MetricsAgent, MetricsField
+from ...common.clusterdata import ClusterAPI
+
+
+class CephMON(MetricsField):
+ """ Ceph monitor structure """
+ measurement = 'ceph_mon'
+
+ def __init__(self):
+ super(CephMON, self).__init__()
+ self.tags['cluster_id'] = None
+ self.tags['mon_id'] = None
+ self.fields['agenthost'] = None
+ self.tags['agenthost_domain_id'] = None
+ self.fields['num_sessions'] = None
+ self.fields['session_add'] = None
+ self.fields['session_rm'] = None
+ self.fields['session_trim'] = None
+ self.fields['num_elections'] = None
+ self.fields['election_call'] = None
+ self.fields['election_win'] = None
+ self.fields['election_lose'] = None
+
+
+class CephErasureProfile(MetricsField):
+ """ Ceph osd erasure profile """
+ measurement = 'ceph_erasure_profile'
+
+ def __init__(self):
+ super(CephErasureProfile, self).__init__()
+ self.tags['cluster_id'] = None
+ self.fields['agenthost'] = None
+ self.tags['agenthost_domain_id'] = None
+ self.tags['host_domain_id'] = None
+ self.fields['name'] = None
+
+
+class CephOsdTree(MetricsField):
+ """ Ceph osd tree map """
+ measurement = 'ceph_osd_tree'
+
+ def __init__(self):
+ super(CephOsdTree, self).__init__()
+ self.tags['cluster_id'] = None
+ self.fields['agenthost'] = None
+ self.tags['agenthost_domain_id'] = None
+ self.tags['host_domain_id'] = None
+ self.fields['name'] = None
+
+
+class CephOSD(MetricsField):
+ """ Ceph osd structure """
+ measurement = 'ceph_osd'
+
+ def __init__(self):
+ super(CephOSD, self).__init__()
+ self.tags['cluster_id'] = None
+ self.tags['osd_id'] = None
+ self.fields['agenthost'] = None
+ self.tags['agenthost_domain_id'] = None
+ self.tags['host_domain_id'] = None
+ self.fields['op_w'] = None
+ self.fields['op_in_bytes'] = None
+ self.fields['op_r'] = None
+ self.fields['op_out_bytes'] = None
+ self.fields['op_wip'] = None
+ self.fields['op_latency'] = None
+ self.fields['op_process_latency'] = None
+ self.fields['op_r_latency'] = None
+ self.fields['op_r_process_latency'] = None
+ self.fields['op_w_in_bytes'] = None
+ self.fields['op_w_latency'] = None
+ self.fields['op_w_process_latency'] = None
+ self.fields['op_w_prepare_latency'] = None
+ self.fields['op_rw'] = None
+ self.fields['op_rw_in_bytes'] = None
+ self.fields['op_rw_out_bytes'] = None
+ self.fields['op_rw_latency'] = None
+ self.fields['op_rw_process_latency'] = None
+ self.fields['op_rw_prepare_latency'] = None
+ self.fields['op_before_queue_op_lat'] = None
+ self.fields['op_before_dequeue_op_lat'] = None
+
+
+class CephMonOsdAgent(MetricsAgent):
+ measurement = 'ceph_mon_osd'
+
+ # counter types
+ PERFCOUNTER_LONGRUNAVG = 4
+ PERFCOUNTER_COUNTER = 8
+ PERFCOUNTER_HISTOGRAM = 0x10
+ PERFCOUNTER_TYPE_MASK = ~3
+
+ def _stattype_to_str(self, stattype):
+ typeonly = stattype & self.PERFCOUNTER_TYPE_MASK
+ if typeonly == 0:
+ return 'gauge'
+ if typeonly == self.PERFCOUNTER_LONGRUNAVG:
+ # this lie matches the DaemonState decoding: only val, no counts
+ return 'counter'
+ if typeonly == self.PERFCOUNTER_COUNTER:
+ return 'counter'
+ if typeonly == self.PERFCOUNTER_HISTOGRAM:
+ return 'histogram'
+ return ''
+
+ def _generage_osd_erasure_profile(self, cluster_id):
+ obj_api = ClusterAPI(self._module_inst)
+ osd_map = obj_api.module.get('osd_map')
+ if osd_map:
+ for n, n_value in osd_map.get('erasure_code_profiles', {}).items():
+ e_osd = CephErasureProfile()
+ e_osd.fields['name'] = n
+ e_osd.tags['cluster_id'] = cluster_id
+ e_osd.fields['agenthost'] = socket.gethostname()
+ e_osd.tags['agenthost_domain_id'] = cluster_id
+ e_osd.tags['host_domain_id'] = '%s_%s' % (cluster_id, socket.gethostname())
+ for k in n_value.keys():
+ e_osd.fields[k] = str(n_value[k])
+ self.data.append(e_osd)
+
+ def _generate_osd_tree(self, cluster_id):
+ obj_api = ClusterAPI(self._module_inst)
+ osd_tree = obj_api.module.get('osd_map_tree')
+ if osd_tree:
+ for node in osd_tree.get('nodes', []):
+ n_node = CephOsdTree()
+ n_node.tags['cluster_id'] = cluster_id
+ n_node.fields['agenthost'] = socket.gethostname()
+ n_node.tags['agenthost_domain_id'] = cluster_id
+ n_node.tags['host_domain_id'] = '%s_%s' % (cluster_id, socket.gethostname())
+ n_node.fields['children'] = ','.join(str(x) for x in node.get('children', []))
+ n_node.fields['type_id'] = str(node.get('type_id', ''))
+ n_node.fields['id'] = str(node.get('id', ''))
+ n_node.fields['name'] = str(node.get('name', ''))
+ n_node.fields['type'] = str(node.get('type', ''))
+ n_node.fields['reweight'] = float(node.get('reweight', 0.0))
+ n_node.fields['crush_weight'] = float(node.get('crush_weight', 0.0))
+ n_node.fields['primary_affinity'] = float(node.get('primary_affinity', 0.0))
+ n_node.fields['device_class'] = str(node.get('device_class', ''))
+ self.data.append(n_node)
+
+ def _generate_osd(self, cluster_id, service_name, perf_counts):
+ obj_api = ClusterAPI(self._module_inst)
+ service_id = service_name[4:]
+ d_osd = CephOSD()
+ stat_bytes = 0
+ stat_bytes_used = 0
+ d_osd.tags['cluster_id'] = cluster_id
+ d_osd.tags['osd_id'] = service_name[4:]
+ d_osd.fields['agenthost'] = socket.gethostname()
+ d_osd.tags['agenthost_domain_id'] = cluster_id
+ d_osd.tags['host_domain_id'] = \
+ '%s_%s' % (cluster_id,
+ obj_api.get_osd_hostname(d_osd.tags['osd_id']))
+
+ for i_key, i_val in perf_counts.items():
+ if i_key[:4] == 'osd.':
+ key_name = i_key[4:]
+ else:
+ key_name = i_key
+ if self._stattype_to_str(i_val['type']) == 'counter':
+ value = obj_api.get_rate('osd', service_id, i_key)
+ else:
+ value = obj_api.get_latest('osd', service_id, i_key)
+ if key_name == 'stat_bytes':
+ stat_bytes = value
+ elif key_name == 'stat_bytes_used':
+ stat_bytes_used = value
+ else:
+ d_osd.fields[key_name] = float(value)
+
+ if stat_bytes and stat_bytes_used:
+ d_osd.fields['stat_bytes_used_percentage'] = \
+ round((float(stat_bytes_used) / float(stat_bytes)) * 100, 4)
+ else:
+ d_osd.fields['stat_bytes_used_percentage'] = 0.0000
+ self.data.append(d_osd)
+
+ def _generate_mon(self, cluster_id, service_name, perf_counts):
+ d_mon = CephMON()
+ d_mon.tags['cluster_id'] = cluster_id
+ d_mon.tags['mon_id'] = service_name[4:]
+ d_mon.fields['agenthost'] = socket.gethostname()
+ d_mon.tags['agenthost_domain_id'] = cluster_id
+ d_mon.fields['num_sessions'] = \
+ perf_counts.get('mon.num_sessions', {}).get('value', 0)
+ d_mon.fields['session_add'] = \
+ perf_counts.get('mon.session_add', {}).get('value', 0)
+ d_mon.fields['session_rm'] = \
+ perf_counts.get('mon.session_rm', {}).get('value', 0)
+ d_mon.fields['session_trim'] = \
+ perf_counts.get('mon.session_trim', {}).get('value', 0)
+ d_mon.fields['num_elections'] = \
+ perf_counts.get('mon.num_elections', {}).get('value', 0)
+ d_mon.fields['election_call'] = \
+ perf_counts.get('mon.election_call', {}).get('value', 0)
+ d_mon.fields['election_win'] = \
+ perf_counts.get('mon.election_win', {}).get('value', 0)
+ d_mon.fields['election_lose'] = \
+ perf_counts.get('election_lose', {}).get('value', 0)
+ self.data.append(d_mon)
+
+ def _collect_data(self):
+ # process data and save to 'self.data'
+ obj_api = ClusterAPI(self._module_inst)
+ perf_data = obj_api.module.get_all_perf_counters(services=('mon', 'osd'))
+ if not perf_data and not isinstance(perf_data, dict):
+ self._logger.error('unable to get all perf counters')
+ return
+ cluster_id = obj_api.get_cluster_id()
+ for n_name, i_perf in perf_data.items():
+ if n_name[0:3].lower() == 'mon':
+ self._generate_mon(cluster_id, n_name, i_perf)
+ elif n_name[0:3].lower() == 'osd':
+ self._generate_osd(cluster_id, n_name, i_perf)
+ self._generage_osd_erasure_profile(cluster_id)
+ self._generate_osd_tree(cluster_id)
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_pool.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_pool.py
new file mode 100644
index 00000000..e8b39566
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_pool.py
@@ -0,0 +1,57 @@
+from __future__ import absolute_import
+
+import socket
+
+from . import MetricsAgent, MetricsField
+from ...common.clusterdata import ClusterAPI
+
+
+class CephPool(MetricsField):
+ """ Ceph pool structure """
+ measurement = 'ceph_pool'
+
+ def __init__(self):
+ super(CephPool, self).__init__()
+ self.tags['cluster_id'] = None
+ self.tags['pool_id'] = None
+ self.fields['agenthost'] = None
+ self.tags['agenthost_domain_id'] = None
+ self.fields['bytes_used'] = None
+ self.fields['max_avail'] = None
+ self.fields['objects'] = None
+ self.fields['wr_bytes'] = None
+ self.fields['dirty'] = None
+ self.fields['rd_bytes'] = None
+ self.fields['stored_raw'] = None
+
+
+class CephPoolAgent(MetricsAgent):
+ measurement = 'ceph_pool'
+
+ def _collect_data(self):
+ # process data and save to 'self.data'
+ obj_api = ClusterAPI(self._module_inst)
+ df_data = obj_api.module.get('df')
+ cluster_id = obj_api.get_cluster_id()
+ for pool in df_data.get('pools', []):
+ d_pool = CephPool()
+ p_id = pool.get('id')
+ d_pool.tags['cluster_id'] = cluster_id
+ d_pool.tags['pool_id'] = p_id
+ d_pool.fields['agenthost'] = socket.gethostname()
+ d_pool.tags['agenthost_domain_id'] = cluster_id
+ d_pool.fields['bytes_used'] = \
+ pool.get('stats', {}).get('bytes_used', 0)
+ d_pool.fields['max_avail'] = \
+ pool.get('stats', {}).get('max_avail', 0)
+ d_pool.fields['objects'] = \
+ pool.get('stats', {}).get('objects', 0)
+ d_pool.fields['wr_bytes'] = \
+ pool.get('stats', {}).get('wr_bytes', 0)
+ d_pool.fields['dirty'] = \
+ pool.get('stats', {}).get('dirty', 0)
+ d_pool.fields['rd_bytes'] = \
+ pool.get('stats', {}).get('rd_bytes', 0)
+ d_pool.fields['stored_raw'] = \
+ pool.get('stats', {}).get('stored_raw', 0)
+ self.data.append(d_pool)
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/db_relay.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/db_relay.py
new file mode 100644
index 00000000..2f5d60db
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/db_relay.py
@@ -0,0 +1,703 @@
+from __future__ import absolute_import
+
+import re
+import socket
+
+from . import MetricsAgent, MetricsField
+from ...common.clusterdata import ClusterAPI
+from ...common.cypher import CypherOP, NodeInfo
+
+
+class BaseDP(object):
+ """ basic diskprediction structure """
+ _fields = []
+
+ def __init__(self, *args, **kwargs):
+ if len(args) > len(self._fields):
+ raise TypeError('Expected {} arguments'.format(len(self._fields)))
+
+ for name, value in zip(self._fields, args):
+ setattr(self, name, value)
+
+ for name in self._fields[len(args):]:
+ setattr(self, name, kwargs.pop(name))
+
+ if kwargs:
+ raise TypeError('Invalid argument(s): {}'.format(','.join(kwargs)))
+
+
+class MGRDpCeph(BaseDP):
+ _fields = [
+ 'fsid', 'health', 'max_osd', 'size',
+ 'avail_size', 'raw_used', 'raw_used_percent'
+ ]
+
+
+class MGRDpHost(BaseDP):
+ _fields = ['fsid', 'host', 'ipaddr']
+
+
+class MGRDpMon(BaseDP):
+ _fields = ['fsid', 'host', 'ipaddr']
+
+
+class MGRDpOsd(BaseDP):
+ _fields = [
+ 'fsid', 'host', '_id', 'uuid', 'up', '_in', 'weight', 'public_addr',
+ 'cluster_addr', 'state', 'ceph_release', 'osd_devices', 'rotational'
+ ]
+
+
+class MGRDpMds(BaseDP):
+ _fields = ['fsid', 'host', 'ipaddr']
+
+
+class MGRDpPool(BaseDP):
+ _fields = [
+ 'fsid', 'size', 'pool_name', 'pool_id', 'type', 'min_size',
+ 'pg_num', 'pgp_num', 'created_time', 'pgids', 'osd_ids', 'tiers', 'cache_mode',
+ 'erasure_code_profile', 'tier_of'
+ ]
+
+
+class MGRDpRBD(BaseDP):
+ _fields = ['fsid', '_id', 'name', 'pool_name', 'pool_id']
+
+
+class MGRDpFS(BaseDP):
+ _fields = ['fsid', '_id', 'name', 'metadata_pool', 'data_pools', 'mds_nodes']
+
+
+class MGRDpPG(BaseDP):
+ _fields = [
+ 'fsid', 'pgid', 'up_osds', 'acting_osds', 'state',
+ 'objects', 'degraded', 'misplaced', 'unfound'
+ ]
+
+
+class MGRDpDisk(BaseDP):
+ _fields = ['host_domain_id', 'host', 'fs_journal_osd', 'bs_db_osd', 'bs_wal_osd', 'data_osd', 'osd_ids']
+
+
+class DBRelay(MetricsField):
+ """ DB Relay structure """
+ measurement = 'db_relay'
+
+ def __init__(self):
+ super(DBRelay, self).__init__()
+ self.fields['agenthost'] = None
+ self.tags['agenthost_domain_id'] = None
+ self.tags['dc_tag'] = 'na'
+ self.tags['host'] = None
+ self.fields['cmd'] = None
+
+
+class DBRelayAgent(MetricsAgent):
+ measurement = 'db_relay'
+
+ def __init__(self, *args, **kwargs):
+ super(DBRelayAgent, self).__init__(*args, **kwargs)
+ self._cluster_node = None
+ self._cluster_id = None
+ self._ceph = ClusterAPI(self._module_inst)
+ self._osd_maps = self._ceph.module.get('osd_map')
+ self._mon_maps = self._ceph.module.get('mon_map')
+ self._fs_maps = self._ceph.module.get('fs_map')
+ self._osd_metadata = self._ceph.module.get('osd_metadata')
+ self._host_nodes = dict()
+ self._osd_nodes = dict()
+ self._mon_nodes = dict()
+ self._mds_nodes = dict()
+ self._dev_nodes = dict()
+ self._pool_nodes = dict()
+ self._rbd_nodes = dict()
+ self._fs_nodes = dict()
+ # initial ceph all node states
+ self._init_cluster_node()
+ self._init_hosts()
+ self._init_mons()
+ self._init_mds()
+ self._init_osds()
+ self._init_devices()
+ self._init_pools()
+ self._init_rbds()
+ self._init_fs()
+
+ def _init_hosts(self):
+ hosts = set()
+ # Add host from osd
+ osd_data = self._osd_maps.get('osds', [])
+ for _data in osd_data:
+ osd_id = _data['osd']
+ if not _data.get('in'):
+ continue
+ osd_addr = _data['public_addr'].split(':')[0]
+ osd_metadata = self._ceph.get_osd_metadata(osd_id)
+ if osd_metadata:
+ osd_host = osd_metadata['hostname']
+ hosts.add((osd_host, osd_addr))
+
+ # Add host from mon
+ mons = self._mon_maps.get('mons', [])
+ for _data in mons:
+ mon_host = _data['name']
+ mon_addr = _data['public_addr'].split(':')[0]
+ if mon_host:
+ hosts.add((mon_host, mon_addr))
+
+ # Add host from mds
+ file_systems = self._fs_maps.get('filesystems', [])
+ for _data in file_systems:
+ mds_info = _data.get('mdsmap').get('info')
+ for _gid in mds_info:
+ mds_data = mds_info[_gid]
+ mds_addr = mds_data.get('addr').split(':')[0]
+ mds_host = mds_data.get('name')
+ if mds_host:
+ hosts.add((mds_host, mds_addr))
+ for tp in hosts:
+ host = tp[0]
+ self._host_nodes[host] = None
+
+ host_node = NodeInfo(
+ label='VMHost',
+ domain_id='{}_{}'.format(self._cluster_id, host),
+ name=host,
+ meta={}
+ )
+ self._host_nodes[host] = host_node
+
+ def _init_mons(self):
+ cluster_id = self._cluster_id
+ mons = self._mon_maps.get('mons')
+ for mon in mons:
+ mon_name = mon.get('name', '')
+ mon_addr = mon.get('addr', '').split(':')[0]
+ if mon_name not in self._host_nodes.keys():
+ continue
+
+ dp_mon = MGRDpMon(
+ fsid=cluster_id,
+ host=mon_name,
+ ipaddr=mon_addr
+ )
+
+ # create mon node
+ mon_node = NodeInfo(
+ label='CephMon',
+ domain_id='{}.mon.{}'.format(cluster_id, mon_name),
+ name=mon_name,
+ meta=dp_mon.__dict__
+ )
+ self._mon_nodes[mon_name] = mon_node
+
+ def _init_mds(self):
+ cluster_id = self._cluster_id
+ file_systems = self._fs_maps.get('filesystems', [])
+ for _data in file_systems:
+ mds_info = _data.get('mdsmap').get('info')
+ for _gid in mds_info:
+ mds_data = mds_info[_gid]
+ mds_addr = mds_data.get('addr').split(':')[0]
+ mds_host = mds_data.get('name')
+ mds_gid = mds_data.get('gid')
+
+ if mds_host not in self._host_nodes:
+ continue
+
+ dp_mds = MGRDpMds(
+ fsid=cluster_id,
+ host=mds_host,
+ ipaddr=mds_addr
+ )
+
+ # create osd node
+ mds_node = NodeInfo(
+ label='CephMds',
+ domain_id='{}.mds.{}'.format(cluster_id, mds_gid),
+ name='MDS.{}'.format(mds_gid),
+ meta=dp_mds.__dict__
+ )
+ self._mds_nodes[mds_host] = mds_node
+
+ def _init_osds(self):
+ for osd in self._osd_maps.get('osds', []):
+ osd_id = osd.get('osd', -1)
+ meta = self._osd_metadata.get(str(osd_id), {})
+ osd_host = meta['hostname']
+ osd_ceph_version = meta['ceph_version']
+ osd_rotational = meta['rotational']
+ osd_devices = meta['devices'].split(',')
+
+ # filter 'dm' device.
+ devices = []
+ for devname in osd_devices:
+ if 'dm' in devname:
+ continue
+ devices.append(devname)
+
+ if osd_host not in self._host_nodes.keys():
+ continue
+ self._osd_nodes[str(osd_id)] = None
+ public_addr = []
+ cluster_addr = []
+ for addr in osd.get('public_addrs', {}).get('addrvec', []):
+ public_addr.append(addr.get('addr'))
+ for addr in osd.get('cluster_addrs', {}).get('addrvec', []):
+ cluster_addr.append(addr.get('addr'))
+ dp_osd = MGRDpOsd(
+ fsid=self._cluster_id,
+ host=osd_host,
+ _id=osd_id,
+ uuid=osd.get('uuid'),
+ up=osd.get('up'),
+ _in=osd.get('in'),
+ weight=osd.get('weight'),
+ public_addr=','.join(public_addr),
+ cluster_addr=','.join(cluster_addr),
+ state=','.join(osd.get('state', [])),
+ ceph_release=osd_ceph_version,
+ osd_devices=','.join(devices),
+ rotational=osd_rotational)
+ for k, v in meta.items():
+ setattr(dp_osd, k, v)
+
+ # create osd node
+ osd_node = NodeInfo(
+ label='CephOsd',
+ domain_id='{}.osd.{}'.format(self._cluster_id, osd_id),
+ name='OSD.{}'.format(osd_id),
+ meta=dp_osd.__dict__
+ )
+ self._osd_nodes[str(osd_id)] = osd_node
+
+ def _init_devices(self):
+ r = re.compile('[^/dev]\D+')
+ for osdid, o_val in self._osd_nodes.items():
+ o_devs = o_val.meta.get('device_ids', '').split(',')
+ # fs_store
+ journal_devs = o_val.meta.get('backend_filestore_journal_dev_node', '').split(',')
+ # bs_store
+ bs_db_devs = o_val.meta.get('bluefs_db_dev_node', '').split(',')
+ bs_wal_devs = o_val.meta.get('bluefs_wal_dev_node', '').split(',')
+
+ for dev in o_devs:
+ fs_journal = []
+ bs_db = []
+ bs_wal = []
+ data = []
+ if len(dev.split('=')) != 2:
+ continue
+ dev_name = dev.split('=')[0]
+ dev_id = dev.split('=')[1]
+ if not dev_id:
+ continue
+
+ for j_dev in journal_devs:
+ if dev_name == ''.join(r.findall(j_dev)):
+ fs_journal.append(osdid)
+ for db_dev in bs_db_devs:
+ if dev_name == ''.join(r.findall(db_dev)):
+ bs_db.append(osdid)
+ for wal_dev in bs_wal_devs:
+ if dev_name == ''.join(r.findall(wal_dev)):
+ bs_wal.append(osdid)
+
+ if not fs_journal and not bs_db and not bs_wal:
+ data.append(osdid)
+
+ disk_domain_id = dev_id
+ if disk_domain_id not in self._dev_nodes.keys():
+ dp_disk = MGRDpDisk(
+ host_domain_id='{}_{}'.format(self._cluster_id, o_val.meta.get('host')),
+ host=o_val.meta.get('host'),
+ osd_ids=osdid,
+ fs_journal_osd=','.join(str(x) for x in fs_journal) if fs_journal else '',
+ bs_db_osd=','.join(str(x) for x in bs_db) if bs_db else '',
+ bs_wal_osd=','.join(str(x) for x in bs_wal) if bs_wal else '',
+ data_osd=','.join(str(x) for x in data) if data else ''
+ )
+ # create disk node
+ disk_node = NodeInfo(
+ label='VMDisk',
+ domain_id=disk_domain_id,
+ name=dev_name,
+ meta=dp_disk.__dict__
+ )
+ self._dev_nodes[disk_domain_id] = disk_node
+ else:
+ dev_node = self._dev_nodes[disk_domain_id]
+ osd_ids = dev_node.meta.get('osd_ids', '')
+ if osdid not in osd_ids.split(','):
+ arr_value = osd_ids.split(',')
+ arr_value.append(str(osdid))
+ dev_node.meta['osd_ids'] = ','.join(arr_value)
+ if fs_journal:
+ arr_value = None
+ for t in fs_journal:
+ value = dev_node.meta.get('fs_journal_osd', '')
+ if value:
+ arr_value = value.split(',')
+ else:
+ arr_value = []
+ if t not in arr_value:
+ arr_value.append(t)
+ if arr_value:
+ dev_node.meta['fs_journal_osd'] = ','.join(str(x) for x in arr_value)
+ if bs_db:
+ arr_value = None
+ for t in bs_db:
+ value = dev_node.meta.get('bs_db_osd', '')
+ if value:
+ arr_value = value.split(',')
+ else:
+ arr_value = []
+ if t not in arr_value:
+ arr_value.append(t)
+ if arr_value:
+ dev_node.meta['bs_db_osd'] = ','.join(str(x) for x in arr_value)
+ if bs_wal:
+ arr_value = None
+ for t in bs_wal:
+ value = dev_node.meta.get('bs_wal_osd', '')
+ if value:
+ arr_value = value.split(',')
+ else:
+ arr_value = []
+ if t not in arr_value:
+ arr_value.append(t)
+ if arr_value:
+ dev_node.meta['bs_wal_osd'] = ','.join(str(x) for x in arr_value)
+ if data:
+ arr_value = None
+ for t in data:
+ value = dev_node.meta.get('data_osd', '')
+ if value:
+ arr_value = value.split(',')
+ else:
+ arr_value = []
+ if t not in arr_value:
+ arr_value.append(t)
+ if arr_value:
+ dev_node.meta['data_osd'] = ','.join(str(x) for x in arr_value)
+
+ def _init_cluster_node(self):
+ cluster_id = self._ceph.get_cluster_id()
+ ceph_df_stat = self._ceph.get_ceph_df_state()
+ dp_cluster = MGRDpCeph(
+ fsid=cluster_id,
+ health=self._ceph.get_health_status(),
+ max_osd=len(self._ceph.get_osds()),
+ size=ceph_df_stat.get('total_size'),
+ avail_size=ceph_df_stat.get('avail_size'),
+ raw_used=ceph_df_stat.get('raw_used_size'),
+ raw_used_percent=ceph_df_stat.get('used_percent')
+ )
+ cluster_name = cluster_id[-12:]
+ cluster_node = NodeInfo(
+ label='CephCluster',
+ domain_id=cluster_id,
+ name='cluster-{}'.format(cluster_name),
+ meta=dp_cluster.__dict__
+ )
+ self._cluster_id = cluster_id
+ self._cluster_node = cluster_node
+
+ def _init_pools(self):
+ pools = self._osd_maps.get('pools', [])
+ cluster_id = self._cluster_id
+ for pool in pools:
+ osds = []
+ pgs = self._ceph.get_pgs_up_by_poolid(int(pool.get('pool', -1)))
+ for pg_id, osd_id in pgs.items():
+ for o_id in osd_id:
+ if o_id not in osds:
+ osds.append(str(o_id))
+ dp_pool = MGRDpPool(
+ fsid=cluster_id,
+ size=pool.get('size'),
+ pool_name=pool.get('pool_name'),
+ pool_id=pool.get('pool'),
+ type=pool.get('type'),
+ min_size=pool.get('min_szie'),
+ pg_num=pool.get('pg_num'),
+ pgp_num=pool.get('pg_placement_num'),
+ created_time=pool.get('create_time'),
+ pgids=','.join(pgs.keys()),
+ osd_ids=','.join(osds),
+ tiers=','.join(str(x) for x in pool.get('tiers', [])),
+ cache_mode=pool.get('cache_mode', ''),
+ erasure_code_profile=str(pool.get('erasure_code_profile', '')),
+ tier_of=str(pool.get('tier_of', -1)))
+ # create pool node
+ pool_node = NodeInfo(
+ label='CephPool',
+ domain_id='{}_pool_{}'.format(cluster_id, pool.get('pool')),
+ name=pool.get('pool_name'),
+ meta=dp_pool.__dict__
+ )
+ self._pool_nodes[str(pool.get('pool'))] = pool_node
+
+ def _init_rbds(self):
+ cluster_id = self._cluster_id
+ for p_id, p_node in self._pool_nodes.items():
+ rbds = self._ceph.get_rbd_list(p_node.name)
+ self._rbd_nodes[str(p_id)] = []
+ for rbd in rbds:
+ dp_rbd = MGRDpRBD(
+ fsid=cluster_id,
+ _id=rbd['id'],
+ name=rbd['name'],
+ pool_name=rbd['pool_name'],
+ pool_id=p_id,
+ )
+ # create pool node
+ rbd_node = NodeInfo(
+ label='CephRBD',
+ domain_id='{}_rbd_{}'.format(cluster_id, rbd['id']),
+ name=rbd['name'],
+ meta=dp_rbd.__dict__,
+ )
+ self._rbd_nodes[str(p_id)].append(rbd_node)
+
+ def _init_fs(self):
+ # _fields = ['fsid', '_id', 'name', 'metadata_pool', 'data_pool', 'mds_nodes']
+ cluster_id = self._cluster_id
+ file_systems = self._fs_maps.get('filesystems', [])
+ for fs in file_systems:
+ mdsmap = fs.get('mdsmap', {})
+ mds_hostnames = []
+ for m, md in mdsmap.get('info', {}).items():
+ if md.get('name') not in mds_hostnames:
+ mds_hostnames.append(md.get('name'))
+ dp_fs = MGRDpFS(
+ fsid=cluster_id,
+ _id=fs.get('id'),
+ name=mdsmap.get('fs_name'),
+ metadata_pool=str(mdsmap.get('metadata_pool', -1)),
+ data_pools=','.join(str(i) for i in mdsmap.get('data_pools', [])),
+ mds_nodes=','.join(mds_hostnames),
+ )
+ fs_node = NodeInfo(
+ label='CephFS',
+ domain_id='{}_fs_{}'.format(cluster_id, fs.get('id')),
+ name=mdsmap.get('fs_name'),
+ meta=dp_fs.__dict__,
+ )
+ self._fs_nodes[str(fs.get('id'))] = fs_node
+
+ def _cluster_contains_host(self):
+ cluster_id = self._cluster_id
+ cluster_node = self._cluster_node
+
+ # create node relation
+ for h_id, h_node in self._host_nodes.items():
+ data = DBRelay()
+ # add osd node relationship
+ cypher_cmd = CypherOP.add_link(
+ cluster_node,
+ h_node,
+ 'CephClusterContainsHost'
+ )
+ cluster_host = socket.gethostname()
+ data.fields['agenthost'] = cluster_host
+ data.tags['agenthost_domain_id'] = cluster_id
+ data.tags['host'] = cluster_host
+ data.fields['cmd'] = str(cypher_cmd)
+ self.data.append(data)
+
+ def _host_contains_mon(self):
+ for m_name, m_node in self._mon_nodes.items():
+ host_node = self._host_nodes.get(m_name)
+ if not host_node:
+ continue
+ data = DBRelay()
+ # add mon node relationship
+ cypher_cmd = CypherOP.add_link(
+ host_node,
+ m_node,
+ 'HostContainsMon'
+ )
+ cluster_host = socket.gethostname()
+ data.fields['agenthost'] = cluster_host
+ data.tags['agenthost_domain_id'] = self._cluster_id
+ data.tags['host'] = cluster_host
+ data.fields['cmd'] = str(cypher_cmd)
+ self.data.append(data)
+
+ def _host_contains_osd(self):
+ cluster_id = self._cluster_id
+ for o_id, o_node in self._osd_nodes.items():
+ host_node = self._host_nodes.get(o_node.meta.get('host'))
+ if not host_node:
+ continue
+ data = DBRelay()
+ # add osd node relationship
+ cypher_cmd = CypherOP.add_link(
+ host_node,
+ o_node,
+ 'HostContainsOsd'
+ )
+ cluster_host = socket.gethostname()
+ data.fields['agenthost'] = cluster_host
+ data.tags['agenthost_domain_id'] = cluster_id, data
+ data.tags['host'] = cluster_host
+ data.fields['cmd'] = str(cypher_cmd)
+ self.data.append(data)
+
+ def _host_contains_mds(self):
+ cluster_id = self._cluster_id
+ for m_name, mds_node in self._mds_nodes.items():
+ data = DBRelay()
+ host_node = self._host_nodes.get(mds_node.meta.get('host'))
+ if not host_node:
+ continue
+ # add osd node relationship
+ cypher_cmd = CypherOP.add_link(
+ host_node,
+ mds_node,
+ 'HostContainsMds'
+ )
+ cluster_host = socket.gethostname()
+ data.fields['agenthost'] = cluster_host
+ data.tags['agenthost_domain_id'] = cluster_id
+ data.tags['host'] = cluster_host
+ data.fields['cmd'] = str(cypher_cmd)
+ self.data.append(data)
+
+ def _osd_contains_disk(self):
+ cluster_id = self._cluster_id
+ cluster_host = socket.gethostname()
+ for d_name, d_node in self._dev_nodes.items():
+ keys = {'data_osd': 'DataDiskOfOSD',
+ 'fs_journal_osd': 'FsJournalDiskOfOSD',
+ 'bs_db_osd': 'BsDBDiskOfOSD',
+ 'bs_wal_osd': 'BsWalDiskOfOSD'}
+ for k, v in keys.items():
+ if not d_node.meta.get(k):
+ continue
+ for osdid in d_node.meta.get(k, '').split(','):
+ data = DBRelay()
+ osd_node = self._osd_nodes.get(str(osdid))
+ if not osd_node:
+ continue
+ # add disk node relationship
+ cypher_cmd = CypherOP.add_link(
+ osd_node,
+ d_node,
+ v)
+ data.fields['agenthost'] = cluster_host
+ data.tags['agenthost_domain_id'] = cluster_id
+ data.tags['host'] = cluster_host
+ data.fields['cmd'] = str(cypher_cmd)
+ self.data.append(data)
+
+ hostname = d_node.meta.get('host', '')
+ if not hostname:
+ continue
+ host_node = self._host_nodes.get(hostname)
+ if not host_node:
+ continue
+ # add osd node relationship
+ data = DBRelay()
+ cypher_cmd = CypherOP.add_link(
+ host_node,
+ d_node,
+ 'VmHostContainsVmDisk'
+ )
+ data.fields['agenthost'] = cluster_host
+ data.tags['agenthost_domain_id'] = cluster_id
+ data.tags['host'] = cluster_host
+ data.fields['cmd'] = str(cypher_cmd)
+ self.data.append(data)
+
+ def _pool_contains_osd(self):
+ cluster_id = self._cluster_id
+ cluster_host = socket.gethostname()
+ for p_id, p_node in self._pool_nodes.items():
+ for o_id in p_node.meta.get('osd_ids', '').split(','):
+ osd_node = self._osd_nodes.get(str(o_id))
+ if not osd_node:
+ continue
+ data = DBRelay()
+ cypher_cmd = CypherOP.add_link(
+ osd_node,
+ p_node,
+ 'OsdContainsPool'
+ )
+ data.fields['agenthost'] = cluster_host
+ data.tags['agenthost_domain_id'] = cluster_id
+ data.tags['host'] = cluster_host
+ data.fields['cmd'] = str(cypher_cmd)
+ self.data.append(data)
+
+ def _pool_contains_rbd(self):
+ cluster_id = self._cluster_id
+ cluster_host = socket.gethostname()
+ for p_id, p_node in self._pool_nodes.items():
+ for rbd_node in self._rbd_nodes.get(str(p_id), []):
+ if not rbd_node:
+ continue
+ data = DBRelay()
+ cypher_cmd = CypherOP.add_link(
+ p_node,
+ rbd_node,
+ 'PoolContainsRBD'
+ )
+ data.fields['agenthost'] = cluster_host
+ data.tags['agenthost_domain_id'] = cluster_id
+ data.tags['host'] = cluster_host
+ data.fields['cmd'] = str(cypher_cmd)
+ self.data.append(data)
+
+ def _pool_contains_fs(self):
+ cluster_id = self._cluster_id
+ cluster_host = socket.gethostname()
+ for fs_id, fs_node in self._fs_nodes.items():
+ pool_attrs = ['metadata_pool', 'data_pools']
+ for p_attr in pool_attrs:
+ pools_id = fs_node.meta.get(p_attr).split(',')
+ for p_id in pools_id:
+ p_node = self._pool_nodes.get(str(p_id))
+ if p_node:
+ data = DBRelay()
+ cypher_cmd = CypherOP.add_link(
+ p_node,
+ fs_node,
+ 'MetadataPoolContainsFS' if p_attr == 'metadata_pool' else 'DataPoolContainsFS'
+ )
+ data.fields['agenthost'] = cluster_host
+ data.tags['agenthost_domain_id'] = cluster_id
+ data.tags['host'] = cluster_host
+ data.fields['cmd'] = str(cypher_cmd)
+ self.data.append(data)
+ for mds_name in fs_node.meta.get('mds_nodes', '').split(','):
+ mds_node = self._mds_nodes.get(mds_name)
+ if not mds_node:
+ continue
+ data = DBRelay()
+ cypher_cmd = CypherOP.add_link(
+ mds_node,
+ fs_node,
+ 'MDSContainsFS'
+ )
+ data.fields['agenthost'] = cluster_host
+ data.tags['agenthost_domain_id'] = cluster_id
+ data.tags['host'] = cluster_host
+ data.fields['cmd'] = str(cypher_cmd)
+ self.data.append(data)
+
+ def _collect_data(self):
+ if not self._module_inst:
+ return
+ job_name = ['cluster_contains_host', 'host_contains_mon', 'host_contains_mds', 'host_contains_osd', 'osd_contains_disk',
+ 'pool_contains_osd', 'pool_contains_rbd', 'pool_contains_fs']
+ for job in job_name:
+ fn = getattr(self, '_%s' % job)
+ if not fn:
+ continue
+ try:
+ fn()
+ except Exception as e:
+ self._module_inst.log.error('dbrelay - execute function {} fail, due to {}'.format(job, str(e)))
+ continue
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_agent.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_agent.py
new file mode 100644
index 00000000..81fbdf96
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_agent.py
@@ -0,0 +1,70 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+from __future__ import absolute_import
+
+import socket
+import time
+
+from . import AGENT_VERSION, MetricsAgent, MetricsField
+from ...common import DP_MGR_STAT_FAILED, DP_MGR_STAT_WARNING
+from ...common.clusterdata import ClusterAPI
+
+
+class SAIAgentFields(MetricsField):
+ """ SAI DiskSmart structure """
+ measurement = 'sai_agent'
+
+ def __init__(self):
+ super(SAIAgentFields, self).__init__()
+ self.tags['agenthost_domain_id'] = None
+ self.fields['agent_type'] = str('ceph')
+ self.fields['agent_version'] = str(AGENT_VERSION)
+ self.fields['agenthost'] = ''
+ self.fields['cluster_domain_id'] = ''
+ self.fields['heartbeat_interval'] = ''
+ self.fields['host_ip'] = ''
+ self.fields['host_name'] = ''
+ self.fields['is_error'] = False
+ self.fields['is_ceph_error'] = False
+ self.fields['needs_warning'] = False
+ self.fields['send'] = None
+
+
+class SAIAgent(MetricsAgent):
+ measurement = 'sai_agent'
+
+ def _collect_data(self):
+ mgr_id = []
+ c_data = SAIAgentFields()
+ obj_api = ClusterAPI(self._module_inst)
+ svc_data = obj_api.get_server(socket.gethostname())
+ cluster_state = obj_api.get_health_status()
+ if not svc_data:
+ raise Exception('unable to get %s service info' % socket.gethostname())
+ # Filter mgr id
+ for s in svc_data.get('services', []):
+ if s.get('type', '') == 'mgr':
+ mgr_id.append(s.get('id'))
+
+ for _id in mgr_id:
+ mgr_meta = obj_api.get_mgr_metadata(_id)
+ cluster_id = obj_api.get_cluster_id()
+ c_data.fields['cluster_domain_id'] = str(cluster_id)
+ c_data.fields['agenthost'] = str(socket.gethostname())
+ c_data.tags['agenthost_domain_id'] = cluster_id
+ c_data.fields['heartbeat_interval'] = \
+ int(obj_api.get_configuration('diskprediction_upload_metrics_interval'))
+ c_data.fields['host_ip'] = str(mgr_meta.get('addr', '127.0.0.1'))
+ c_data.fields['host_name'] = str(socket.gethostname())
+ if obj_api.module.status.get('status', '') in [DP_MGR_STAT_WARNING, DP_MGR_STAT_FAILED]:
+ c_data.fields['is_error'] = bool(True)
+ else:
+ c_data.fields['is_error'] = bool(False)
+ if cluster_state in ['HEALTH_ERR', 'HEALTH_WARN']:
+ c_data.fields['is_ceph_error'] = bool(True)
+ c_data.fields['needs_warning'] = bool(True)
+ c_data.fields['is_error'] = bool(True)
+ c_data.fields['problems'] = str(obj_api.get_health_checks())
+ else:
+ c_data.fields['is_ceph_error'] = bool(False)
+ c_data.fields['send'] = int(time.time() * 1000)
+ self.data.append(c_data)
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_cluster.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_cluster.py
new file mode 100644
index 00000000..d444f9a2
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_cluster.py
@@ -0,0 +1,35 @@
+from __future__ import absolute_import
+
+import socket
+
+from . import AGENT_VERSION, MetricsAgent, MetricsField
+from ...common.clusterdata import ClusterAPI
+
+
+class SAIClusterFields(MetricsField):
+ """ SAI Host structure """
+ measurement = 'sai_cluster'
+
+ def __init__(self):
+ super(SAIClusterFields, self).__init__()
+ self.tags['domain_id'] = None
+ self.fields['agenthost'] = None
+ self.fields['agenthost_domain_id'] = None
+ self.fields['name'] = None
+ self.fields['agent_version'] = str(AGENT_VERSION)
+
+
+class SAICluserAgent(MetricsAgent):
+ measurement = 'sai_cluster'
+
+ def _collect_data(self):
+ c_data = SAIClusterFields()
+ obj_api = ClusterAPI(self._module_inst)
+ cluster_id = obj_api.get_cluster_id()
+
+ c_data.tags['domain_id'] = str(cluster_id)
+ c_data.tags['host_domain_id'] = '%s_%s' % (str(cluster_id), str(socket.gethostname()))
+ c_data.fields['agenthost'] = str(socket.gethostname())
+ c_data.tags['agenthost_domain_id'] = cluster_id
+ c_data.fields['name'] = 'Ceph mgr plugin'
+ self.data.append(c_data)
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk.py
new file mode 100644
index 00000000..3b177e69
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk.py
@@ -0,0 +1,175 @@
+from __future__ import absolute_import
+
+import socket
+
+from . import AGENT_VERSION, MetricsAgent, MetricsField
+from ...common import get_human_readable
+from ...common.clusterdata import ClusterAPI
+
+
+class SAIDiskFields(MetricsField):
+ """ SAI Disk structure """
+ measurement = 'sai_disk'
+
+ def __init__(self):
+ super(SAIDiskFields, self).__init__()
+ self.fields['agenthost'] = None
+ self.tags['agenthost_domain_id'] = None
+ self.tags['disk_domain_id'] = None
+ self.tags['disk_name'] = None
+ self.tags['disk_wwn'] = None
+ self.tags['primary_key'] = None
+ self.fields['cluster_domain_id'] = None
+ self.fields['host_domain_id'] = None
+ self.fields['model'] = None
+ self.fields['serial_number'] = None
+ self.fields['size'] = None
+ self.fields['vendor'] = None
+ self.fields['agent_version'] = str(AGENT_VERSION)
+
+ """disk_status
+ 0: unknown 1: good 2: failure
+ """
+ self.fields['disk_status'] = 0
+
+ """disk_type
+ 0: unknown 1: HDD 2: SSD 3: SSD NVME
+ 4: SSD SAS 5: SSD SATA 6: HDD SAS 7: HDD SATA
+ """
+ self.fields['disk_type'] = 0
+
+
+class SAIDiskAgent(MetricsAgent):
+ measurement = 'sai_disk'
+
+ @staticmethod
+ def _convert_disk_type(is_ssd, sata_version, protocol):
+ """ return type:
+ 0: "Unknown', 1: 'HDD',
+ 2: 'SSD", 3: "SSD NVME",
+ 4: "SSD SAS", 5: "SSD SATA",
+ 6: "HDD SAS", 7: "HDD SATA"
+ """
+ if is_ssd:
+ if sata_version and not protocol:
+ disk_type = 5
+ elif 'SCSI'.lower() in protocol.lower():
+ disk_type = 4
+ elif 'NVMe'.lower() in protocol.lower():
+ disk_type = 3
+ else:
+ disk_type = 2
+ else:
+ if sata_version and not protocol:
+ disk_type = 7
+ elif 'SCSI'.lower() in protocol.lower():
+ disk_type = 6
+ else:
+ disk_type = 1
+ return disk_type
+
+ def _collect_data(self):
+ # process data and save to 'self.data'
+ obj_api = ClusterAPI(self._module_inst)
+ cluster_id = obj_api.get_cluster_id()
+ osds = obj_api.get_osds()
+ for osd in osds:
+ if osd.get('osd') is None:
+ continue
+ if not osd.get('in'):
+ continue
+ osds_meta = obj_api.get_osd_metadata(osd.get('osd'))
+ if not osds_meta:
+ continue
+ osds_smart = obj_api.get_osd_smart(osd.get('osd'))
+ if not osds_smart:
+ continue
+ for dev_name, s_val in osds_smart.items():
+ d_data = SAIDiskFields()
+ d_data.tags['disk_name'] = str(dev_name)
+ d_data.fields['cluster_domain_id'] = str(cluster_id)
+ d_data.tags['host_domain_id'] = \
+ str('%s_%s'
+ % (cluster_id, osds_meta.get('hostname', 'None')))
+ d_data.fields['agenthost'] = str(socket.gethostname())
+ d_data.tags['agenthost_domain_id'] = cluster_id
+ serial_number = s_val.get('serial_number')
+ wwn = s_val.get('wwn', {})
+ wwpn = ''
+ if wwn:
+ wwpn = '%06X%X' % (wwn.get('oui', 0), wwn.get('id', 0))
+ for k in wwn.keys():
+ if k in ['naa', 't10', 'eui', 'iqn']:
+ wwpn = ('%X%s' % (wwn[k], wwpn)).lower()
+ break
+
+ if wwpn:
+ d_data.tags['disk_domain_id'] = str(dev_name)
+ d_data.tags['disk_wwn'] = str(wwpn)
+ if serial_number:
+ d_data.fields['serial_number'] = str(serial_number)
+ else:
+ d_data.fields['serial_number'] = str(wwpn)
+ elif serial_number:
+ d_data.tags['disk_domain_id'] = str(dev_name)
+ d_data.fields['serial_number'] = str(serial_number)
+ if wwpn:
+ d_data.tags['disk_wwn'] = str(wwpn)
+ else:
+ d_data.tags['disk_wwn'] = str(serial_number)
+ else:
+ d_data.tags['disk_domain_id'] = str(dev_name)
+ d_data.tags['disk_wwn'] = str(dev_name)
+ d_data.fields['serial_number'] = str(dev_name)
+ d_data.tags['primary_key'] = \
+ str('%s%s%s'
+ % (cluster_id, d_data.tags['host_domain_id'],
+ d_data.tags['disk_domain_id']))
+ d_data.fields['disk_status'] = int(1)
+ is_ssd = True if s_val.get('rotation_rate') == 0 else False
+ vendor = s_val.get('vendor', None)
+ model = s_val.get('model_name', None)
+ if s_val.get('sata_version', {}).get('string'):
+ sata_version = s_val['sata_version']['string']
+ else:
+ sata_version = ''
+ if s_val.get('device', {}).get('protocol'):
+ protocol = s_val['device']['protocol']
+ else:
+ protocol = ''
+ d_data.fields['disk_type'] = \
+ self._convert_disk_type(is_ssd, sata_version, protocol)
+ d_data.fields['firmware_version'] = \
+ str(s_val.get('firmware_version'))
+ if model:
+ d_data.fields['model'] = str(model)
+ if vendor:
+ d_data.fields['vendor'] = str(vendor)
+ if sata_version:
+ d_data.fields['sata_version'] = str(sata_version)
+ if s_val.get('logical_block_size'):
+ d_data.fields['sector_size'] = \
+ str(str(s_val['logical_block_size']))
+ d_data.fields['transport_protocol'] = str('')
+ d_data.fields['vendor'] = \
+ str(s_val.get('model_family', '')).replace('\"', '\'')
+ try:
+ if isinstance(s_val.get('user_capacity'), dict):
+ if isinstance(s_val['user_capacity'].get('bytes'), dict):
+ user_capacity = \
+ s_val['user_capacity'].get('bytes', {}).get('n', 0)
+ else:
+ user_capacity = s_val['user_capacity'].get('bytes')
+ else:
+ user_capacity = s_val.get('user_capacity', 0)
+ except ValueError:
+ user_capacity = 0
+ if str(user_capacity).isdigit():
+ d_data.fields['size'] = get_human_readable(int(user_capacity), 0)
+ else:
+ d_data.fields['size'] = str(user_capacity)
+ if s_val.get('smart_status', {}).get('passed'):
+ d_data.fields['smart_health_status'] = 'PASSED'
+ else:
+ d_data.fields['smart_health_status'] = 'FAILED'
+ self.data.append(d_data)
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk_smart.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk_smart.py
new file mode 100644
index 00000000..1ad34784
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk_smart.py
@@ -0,0 +1,182 @@
+from __future__ import absolute_import
+
+import datetime
+import json
+import _strptime
+import socket
+import time
+
+from . import AGENT_VERSION, MetricsAgent, MetricsField
+from ...common.clusterdata import ClusterAPI
+
+
+class SAIDiskSmartFields(MetricsField):
+ """ SAI DiskSmart structure """
+ measurement = 'sai_disk_smart'
+
+ def __init__(self):
+ super(SAIDiskSmartFields, self).__init__()
+ self.fields['agenthost'] = None
+ self.tags['agenthost_domain_id'] = None
+ self.tags['disk_domain_id'] = None
+ self.tags['disk_name'] = None
+ self.tags['disk_wwn'] = None
+ self.tags['primary_key'] = None
+ self.fields['cluster_domain_id'] = None
+ self.fields['host_domain_id'] = None
+ self.fields['agent_version'] = str(AGENT_VERSION)
+
+
+class SAIDiskSmartAgent(MetricsAgent):
+ measurement = 'sai_disk_smart'
+
+ def _collect_data(self):
+ # process data and save to 'self.data'
+ obj_api = ClusterAPI(self._module_inst)
+ cluster_id = obj_api.get_cluster_id()
+ osds = obj_api.get_osds()
+ for osd in osds:
+ if osd.get('osd') is None:
+ continue
+ if not osd.get('in'):
+ continue
+ osds_meta = obj_api.get_osd_metadata(osd.get('osd'))
+ if not osds_meta:
+ continue
+ devs_info = obj_api.get_osd_device_id(osd.get('osd'))
+ if devs_info:
+ for dev_name, dev_info in devs_info.items():
+ osds_smart = obj_api.get_device_health(dev_info['dev_id'])
+ if not osds_smart:
+ continue
+ # Always pass through last smart data record
+ o_key = sorted(osds_smart.keys(), reverse=True)[0]
+ if o_key:
+ s_date = o_key
+ s_val = osds_smart[s_date]
+ smart_data = SAIDiskSmartFields()
+ smart_data.tags['disk_name'] = str(dev_name)
+ smart_data.fields['cluster_domain_id'] = str(cluster_id)
+ smart_data.tags['host_domain_id'] = \
+ str('%s_%s'
+ % (cluster_id, osds_meta.get('hostname', 'None')))
+ smart_data.fields['agenthost'] = str(socket.gethostname())
+ smart_data.tags['agenthost_domain_id'] = cluster_id
+ # parse attributes
+ protocol = s_val.get('device', {}).get('protocol', '')
+ if str(protocol).lower() == 'nvme':
+ nvme_info = s_val.get('nvme_smart_health_information_log', {})
+ smart_data['CriticalWarniing_raw'] = int(nvme_info.get('critical_warning', 0))
+ smart_data['CurrentDriveTemperature_raw'] = int(nvme_info.get('temperature', 0))
+ smart_data['AvailableSpare_raw'] = int(nvme_info.get('available_spare', 0))
+ smart_data['AvailableSpareThreshold_raw'] = int(nvme_info.get('available_spare_threshold', 0))
+ smart_data['PercentageUsed_raw'] = int(nvme_info.get('percentage_used', 0))
+ smart_data['DataUnitsRead_raw'] = int(nvme_info.get('data_units_read', 0))
+ smart_data['DataUnitsRead'] = int(nvme_info.get('data_units_written', 0))
+ smart_data['HostReadCommands_raw'] = int(nvme_info.get('host_reads', 0))
+ smart_data['HostWriteCommands_raw'] = int(nvme_info.get('host_writes', 0))
+ smart_data['ControllerBusyTime_raw'] = int(nvme_info.get('controller_busy_time', 0))
+ smart_data['PowerCycles_raw'] = int(nvme_info.get('power_cycles', 0))
+ smart_data['PowerOnHours_raw'] = int(nvme_info.get('power_on_hours', 0))
+ smart_data['UnsafeShutdowns_raw'] = int(nvme_info.get('unsafe_shutdowns', 0))
+ smart_data['MediaandDataIntegrityErrors_raw'] = int(nvme_info.get('media_errors', 0))
+ smart_data['ErrorInformationLogEntries'] = int(nvme_info.get('num_err_log_entries'))
+ nvme_addition = s_val.get('nvme_smart_health_information_add_log', {})
+ for k, v in nvme_addition.get("Device stats", {}).items():
+ if v.get('raw') is None:
+ continue
+ if isinstance(v.get('raw'), int):
+ smart_data[k] = int(v['raw'])
+ else:
+ smart_data[k] = str(v.get('raw'))
+ else:
+ ata_smart = s_val.get('ata_smart_attributes', {})
+ for attr in ata_smart.get('table', []):
+ if attr.get('raw', {}).get('string'):
+ if str(attr.get('raw', {}).get('string', '0')).isdigit():
+ smart_data.fields['%s_raw' % attr.get('id')] = \
+ int(attr.get('raw', {}).get('string', '0'))
+ else:
+ if str(attr.get('raw', {}).get('string', '0')).split(' ')[0].isdigit():
+ smart_data.fields['%s_raw' % attr.get('id')] = \
+ int(attr.get('raw', {}).get('string', '0').split(' ')[0])
+ else:
+ smart_data.fields['%s_raw' % attr.get('id')] = \
+ attr.get('raw', {}).get('value', 0)
+ smart_data.fields['raw_data'] = str(json.dumps(osds_smart[s_date]).replace("\"", "\'"))
+ if s_val.get('temperature', {}).get('current') is not None:
+ smart_data.fields['CurrentDriveTemperature_raw'] = \
+ int(s_val['temperature']['current'])
+ if s_val.get('temperature', {}).get('drive_trip') is not None:
+ smart_data.fields['DriveTripTemperature_raw'] = \
+ int(s_val['temperature']['drive_trip'])
+ if s_val.get('elements_grown_list') is not None:
+ smart_data.fields['ElementsInGrownDefectList_raw'] = int(s_val['elements_grown_list'])
+ if s_val.get('power_on_time', {}).get('hours') is not None:
+ smart_data.fields['9_raw'] = int(s_val['power_on_time']['hours'])
+ if s_val.get('scsi_percentage_used_endurance_indicator') is not None:
+ smart_data.fields['PercentageUsedEnduranceIndicator_raw'] = \
+ int(s_val['scsi_percentage_used_endurance_indicator'])
+ if s_val.get('scsi_error_counter_log') is not None:
+ s_err_counter = s_val['scsi_error_counter_log']
+ for s_key in s_err_counter.keys():
+ if s_key.lower() in ['read', 'write']:
+ for s1_key in s_err_counter[s_key].keys():
+ if s1_key.lower() == 'errors_corrected_by_eccfast':
+ smart_data.fields['ErrorsCorrectedbyECCFast%s_raw' % s_key.capitalize()] = \
+ int(s_err_counter[s_key]['errors_corrected_by_eccfast'])
+ elif s1_key.lower() == 'errors_corrected_by_eccdelayed':
+ smart_data.fields['ErrorsCorrectedbyECCDelayed%s_raw' % s_key.capitalize()] = \
+ int(s_err_counter[s_key]['errors_corrected_by_eccdelayed'])
+ elif s1_key.lower() == 'errors_corrected_by_rereads_rewrites':
+ smart_data.fields['ErrorCorrectedByRereadsRewrites%s_raw' % s_key.capitalize()] = \
+ int(s_err_counter[s_key]['errors_corrected_by_rereads_rewrites'])
+ elif s1_key.lower() == 'total_errors_corrected':
+ smart_data.fields['TotalErrorsCorrected%s_raw' % s_key.capitalize()] = \
+ int(s_err_counter[s_key]['total_errors_corrected'])
+ elif s1_key.lower() == 'correction_algorithm_invocations':
+ smart_data.fields['CorrectionAlgorithmInvocations%s_raw' % s_key.capitalize()] = \
+ int(s_err_counter[s_key]['correction_algorithm_invocations'])
+ elif s1_key.lower() == 'gigabytes_processed':
+ smart_data.fields['GigaBytesProcessed%s_raw' % s_key.capitalize()] = \
+ float(s_err_counter[s_key]['gigabytes_processed'])
+ elif s1_key.lower() == 'total_uncorrected_errors':
+ smart_data.fields['TotalUncorrectedErrors%s_raw' % s_key.capitalize()] = \
+ int(s_err_counter[s_key]['total_uncorrected_errors'])
+
+ serial_number = s_val.get('serial_number')
+ wwn = s_val.get('wwn', {})
+ wwpn = ''
+ if wwn:
+ wwpn = '%06X%X' % (wwn.get('oui', 0), wwn.get('id', 0))
+ for k in wwn.keys():
+ if k in ['naa', 't10', 'eui', 'iqn']:
+ wwpn = ('%X%s' % (wwn[k], wwpn)).lower()
+ break
+ if wwpn:
+ smart_data.tags['disk_domain_id'] = str(dev_info['dev_id'])
+ smart_data.tags['disk_wwn'] = str(wwpn)
+ if serial_number:
+ smart_data.fields['serial_number'] = str(serial_number)
+ else:
+ smart_data.fields['serial_number'] = str(wwpn)
+ elif serial_number:
+ smart_data.tags['disk_domain_id'] = str(dev_info['dev_id'])
+ smart_data.fields['serial_number'] = str(serial_number)
+ if wwpn:
+ smart_data.tags['disk_wwn'] = str(wwpn)
+ else:
+ smart_data.tags['disk_wwn'] = str(serial_number)
+ else:
+ smart_data.tags['disk_domain_id'] = str(dev_info['dev_id'])
+ smart_data.tags['disk_wwn'] = str(dev_name)
+ smart_data.fields['serial_number'] = str(dev_name)
+ smart_data.tags['primary_key'] = \
+ str('%s%s%s'
+ % (cluster_id,
+ smart_data.tags['host_domain_id'],
+ smart_data.tags['disk_domain_id']))
+ smart_data.timestamp = \
+ time.mktime(datetime.datetime.strptime(
+ s_date, '%Y%m%d-%H%M%S').timetuple())
+ self.data.append(smart_data)
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_host.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_host.py
new file mode 100644
index 00000000..0f3698ad
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_host.py
@@ -0,0 +1,105 @@
+from __future__ import absolute_import
+
+import socket
+
+from . import AGENT_VERSION, MetricsAgent, MetricsField
+from ...common.clusterdata import ClusterAPI
+
+
+class SAIHostFields(MetricsField):
+ """ SAI Host structure """
+ measurement = 'sai_host'
+
+ def __init__(self):
+ super(SAIHostFields, self).__init__()
+ self.tags['domain_id'] = None
+ self.fields['agenthost'] = None
+ self.tags['agenthost_domain_id'] = None
+ self.fields['cluster_domain_id'] = None
+ self.fields['name'] = None
+ self.fields['host_ip'] = None
+ self.fields['host_ipv6'] = None
+ self.fields['host_uuid'] = None
+ self.fields['os_type'] = str('ceph')
+ self.fields['os_name'] = None
+ self.fields['os_version'] = None
+ self.fields['agent_version'] = str(AGENT_VERSION)
+
+
+class SAIHostAgent(MetricsAgent):
+ measurement = 'sai_host'
+
+ def _collect_data(self):
+ db = ClusterAPI(self._module_inst)
+ cluster_id = db.get_cluster_id()
+
+ hosts = set()
+
+ # Parse osd's host
+ osd_data = db.get_osds()
+ for _data in osd_data:
+ osd_id = _data['osd']
+ if not _data.get('in'):
+ continue
+ osd_addr = _data['public_addr'].split(':')[0]
+ osd_metadata = db.get_osd_metadata(osd_id)
+ if osd_metadata:
+ osd_host = osd_metadata.get('hostname', 'None')
+ if osd_host not in hosts:
+ data = SAIHostFields()
+ data.fields['agenthost'] = str(socket.gethostname())
+ data.tags['agenthost_domain_id'] = cluster_id
+ data.tags['domain_id'] = \
+ str('%s_%s' % (cluster_id, osd_host))
+ data.fields['cluster_domain_id'] = str(cluster_id)
+ data.fields['host_ip'] = osd_addr
+ data.fields['host_uuid'] = \
+ str('%s_%s' % (cluster_id, osd_host))
+ data.fields['os_name'] = \
+ osd_metadata.get('ceph_release', '')
+ data.fields['os_version'] = \
+ osd_metadata.get('ceph_version_short', '')
+ data.fields['name'] = 'osd_{}'.format(osd_host)
+ hosts.add(osd_host)
+ self.data.append(data)
+
+ # Parse mon node host
+ mons = db.get_mons()
+ for _data in mons:
+ mon_host = _data['name']
+ mon_addr = _data['public_addr'].split(':')[0]
+ if mon_host not in hosts:
+ data = SAIHostFields()
+ data.fields['agenthost'] = str(socket.gethostname())
+ data.tags['agenthost_domain_id'] = cluster_id
+ data.tags['domain_id'] = \
+ str('%s_%s' % (cluster_id, mon_host))
+ data.fields['cluster_domain_id'] = str(cluster_id)
+ data.fields['host_ip'] = mon_addr
+ data.fields['host_uuid'] = \
+ str('%s_%s' % (cluster_id, mon_host))
+ data.fields['name'] = 'mon_{}'.format(mon_host)
+ hosts.add((mon_host, mon_addr))
+ self.data.append(data)
+
+ # Parse fs host
+ file_systems = db.get_file_systems()
+ for _data in file_systems:
+ mds_info = _data.get('mdsmap').get('info')
+ for _gid in mds_info:
+ mds_data = mds_info[_gid]
+ mds_addr = mds_data.get('addr').split(':')[0]
+ mds_host = mds_data.get('name')
+ if mds_host not in hosts:
+ data = SAIHostFields()
+ data.fields['agenthost'] = str(socket.gethostname())
+ data.tags['agenthost_domain_id'] = cluster_id
+ data.tags['domain_id'] = \
+ str('%s_%s' % (cluster_id, mds_host))
+ data.fields['cluster_domain_id'] = str(cluster_id)
+ data.fields['host_ip'] = mds_addr
+ data.fields['host_uuid'] = \
+ str('%s_%s' % (cluster_id, mds_host))
+ data.fields['name'] = 'mds_{}'.format(mds_host)
+ hosts.add((mds_host, mds_addr))
+ self.data.append(data)
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/predictor.py b/src/pybind/mgr/diskprediction_cloud/agent/predictor.py
new file mode 100644
index 00000000..1fdea46d
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/agent/predictor.py
@@ -0,0 +1,48 @@
+from __future__ import absolute_import
+
+
+class PredictAgent(object):
+
+ measurement = 'predictor'
+
+ def __init__(self, mgr_module, obj_sender, timeout=30):
+ self.data = []
+ self._client = None
+ self._client = obj_sender
+ self._logger = mgr_module.log
+ self._module_inst = mgr_module
+ self._timeout = timeout
+
+ def __nonzero__(self):
+ if not self._module_inst:
+ return False
+ else:
+ return True
+
+ def run(self):
+ result = self._module_inst.get('devices')
+ cluster_id = self._module_inst.get('mon_map').get('fsid')
+ if not result:
+ return -1, '', 'unable to get all devices for prediction'
+ for dev in result.get('devices', []):
+ for location in dev.get('location', []):
+ host = location.get('host')
+ host_domain_id = '{}_{}'.format(cluster_id, host)
+ prediction_data = self._get_cloud_prediction_result(host_domain_id, dev.get('devid'))
+ if prediction_data:
+ self._module_inst.prediction_result[dev.get('devid')] = prediction_data
+
+ def _get_cloud_prediction_result(self, host_domain_id, disk_domain_id):
+ result = {}
+ try:
+ query_info = self._client.query_info(host_domain_id, disk_domain_id, 'sai_disk_prediction')
+ status_code = query_info.status_code
+ if status_code == 200:
+ result = query_info.json()
+ else:
+ resp = query_info.json()
+ if resp.get('error'):
+ self._logger.error(str(resp['error']))
+ except Exception as e:
+ self._logger.error('failed to get %s prediction result %s' % (disk_domain_id, str(e)))
+ return result
diff --git a/src/pybind/mgr/diskprediction_cloud/common/__init__.py b/src/pybind/mgr/diskprediction_cloud/common/__init__.py
new file mode 100644
index 00000000..ce5131b8
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/common/__init__.py
@@ -0,0 +1,61 @@
+from __future__ import absolute_import
+import errno
+from functools import wraps
+import os
+import signal
+
+
+DP_MGR_STAT_OK = 'OK'
+DP_MGR_STAT_WARNING = 'WARNING'
+DP_MGR_STAT_FAILED = 'FAILED'
+DP_MGR_STAT_DISABLED = 'DISABLED'
+DP_MGR_STAT_ENABLED = 'ENABLED'
+
+
+class DummyResonse:
+ def __init__(self):
+ self.resp_json = dict()
+ self.content = 'DummyResponse'
+ self.status_code = 404
+
+ def json(self):
+ return self.resp_json
+
+ def __str__(self):
+ return '{}'.format({'resp': self.resp_json, 'content': self.content, 'status_code': self.status_code})
+
+
+class TimeoutError(Exception):
+ pass
+
+
+def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):
+ def decorator(func):
+ def _handle_timeout(signum, frame):
+ raise TimeoutError(error_message)
+
+ def wrapper(*args, **kwargs):
+ if hasattr(args[0], '_timeout') is not None:
+ seconds = args[0]._timeout
+ signal.signal(signal.SIGALRM, _handle_timeout)
+ signal.alarm(seconds)
+ try:
+ result = func(*args, **kwargs)
+ finally:
+ signal.alarm(0)
+ return result
+
+ return wraps(func)(wrapper)
+
+ return decorator
+
+
+def get_human_readable(size, precision=2):
+ suffixes = ['B', 'KB', 'MB', 'GB', 'TB']
+ suffix_index = 0
+ while size > 1000 and suffix_index < 4:
+ # increment the index of the suffix
+ suffix_index += 1
+ # apply the division
+ size = size/1000.0
+ return '%.*d %s' % (precision, size, suffixes[suffix_index])
diff --git a/src/pybind/mgr/diskprediction_cloud/common/client_pb2.py b/src/pybind/mgr/diskprediction_cloud/common/client_pb2.py
new file mode 100644
index 00000000..9f65c731
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/common/client_pb2.py
@@ -0,0 +1,1775 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: mainServer.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='mainServer.proto',
+ package='proto',
+ syntax='proto3',
+ serialized_pb=_b('\n\x10mainServer.proto\x12\x05proto\x1a\x1cgoogle/api/annotations.proto\"\x07\n\x05\x45mpty\"#\n\x10GeneralMsgOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\")\n\x16GeneralHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"\x1d\n\nPingOutout\x12\x0f\n\x07message\x18\x01 \x01(\t\"*\n\tTestInput\x12\x1d\n\x06people\x18\x01 \x03(\x0b\x32\r.proto.Person\"\xbe\x01\n\nTestOutput\x12\x10\n\x08strArray\x18\x01 \x03(\t\x12\x31\n\x08mapValue\x18\x02 \x03(\x0b\x32\x1f.proto.TestOutput.MapValueEntry\x12\x19\n\x02pn\x18\x04 \x01(\x0b\x32\r.proto.Person\x12\x1f\n\x07profile\x18\x03 \x03(\x0b\x32\x0e.proto.Profile\x1a/\n\rMapValueEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xcf\x01\n\x06Person\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x03\x12\r\n\x05\x65mail\x18\x03 \x01(\t\x12)\n\x06phones\x18\x04 \x03(\x0b\x32\x19.proto.Person.PhoneNumber\x1a\x44\n\x0bPhoneNumber\x12\x0e\n\x06number\x18\x01 \x01(\t\x12%\n\x04type\x18\x02 \x01(\x0e\x32\x17.proto.Person.PhoneType\"+\n\tPhoneType\x12\n\n\x06MOBILE\x10\x00\x12\x08\n\x04HOME\x10\x01\x12\x08\n\x04WORK\x10\x02\"\xa9\x01\n\x07Profile\x12%\n\x08\x66ileInfo\x18\x01 \x01(\x0b\x32\x13.proto.Profile.File\x1aw\n\x04\x46ile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\ttypeInt32\x18\x02 \x01(\x05\x12\x11\n\ttypeInt64\x18\x03 \x01(\x03\x12\x11\n\ttypeFloat\x18\x04 \x01(\x02\x12\x12\n\ntypeDouble\x18\x05 \x01(\x01\x12\x14\n\x0c\x62ooleanValue\x18\x06 \x01(\x08\"4\n\x15GetUsersByStatusInput\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\":\n\x16GetUsersByStatusOutput\x12 \n\x05users\x18\x01 \x03(\x0b\x32\x11.proto.UserOutput\")\n\x16\x41\x63\x63ountHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"-\n\nLoginInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x10\n\x08password\x18\x02 \x01(\t\"\xf2\x01\n\nUserOutput\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05\x65mail\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\r\n\x05phone\x18\x04 \x01(\t\x12\x11\n\tfirstName\x18\x05 \x01(\t\x12\x10\n\x08lastName\x18\x06 \x01(\t\x12\x13\n\x0b\x63reatedTime\x18\x07 \x01(\t\x12\x11\n\tnamespace\x18\x08 \x01(\t\x12\x12\n\ndomainName\x18\t \x01(\t\x12\x0f\n\x07\x63ompany\x18\n \x01(\t\x12\x0b\n\x03url\x18\x0b \x01(\t\x12\x14\n\x0c\x61gentAccount\x18\x0c \x01(\t\x12\x15\n\ragentPassword\x18\r \x01(\t\"s\n\x0bSingupInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\r\n\x05phone\x18\x02 \x01(\t\x12\x11\n\tfirstName\x18\x03 \x01(\t\x12\x10\n\x08lastName\x18\x04 \x01(\t\x12\x10\n\x08password\x18\x05 \x01(\t\x12\x0f\n\x07\x63ompany\x18\x06 \x01(\t\"\x1f\n\x0cSingupOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"-\n\x0f\x44\x65leteUserInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\"C\n\x15UpdateUserStatusInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\"\'\n\x16ResendConfirmCodeInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\"+\n\x0c\x43onfirmInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x0c\n\x04\x63ode\x18\x02 \x01(\t\"$\n\x11\x44PHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"n\n\x17\x44PGetPhysicalDisksInput\x12\x0f\n\x07hostIds\x18\x01 \x01(\t\x12\x0b\n\x03ids\x18\x02 \x01(\t\x12\r\n\x05limit\x18\x03 \x01(\x03\x12\x0c\n\x04page\x18\x04 \x01(\x03\x12\x0c\n\x04\x66rom\x18\x05 \x01(\t\x12\n\n\x02to\x18\x06 \x01(\t\"{\n\x19\x44PGetDisksPredictionInput\x12\x17\n\x0fphysicalDiskIds\x18\x01 \x01(\t\x12\x0e\n\x06status\x18\x02 \x01(\t\x12\r\n\x05limit\x18\x03 \x01(\x03\x12\x0c\n\x04page\x18\x04 \x01(\x03\x12\x0c\n\x04\x66rom\x18\x05 \x01(\t\x12\n\n\x02to\x18\x06 \x01(\t\"\x1e\n\x0e\x44PBinaryOutput\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\",\n\x19\x43ollectionHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"\"\n\x10PostMetricsInput\x12\x0e\n\x06points\x18\x01 \x03(\t\" \n\x10PostDBRelayInput\x12\x0c\n\x04\x63mds\x18\x01 \x03(\t\":\n\x17\x43ollectionMessageOutput\x12\x0e\n\x06status\x18\x01 \x01(\x03\x12\x0f\n\x07message\x18\x02 \x01(\t2\x85\x02\n\x07General\x12\x63\n\x10GeneralHeartbeat\x12\x0c.proto.Empty\x1a\x1d.proto.GeneralHeartbeatOutput\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/apis/v2/general/heartbeat\x12\x46\n\x04Ping\x12\x0c.proto.Empty\x1a\x11.proto.PingOutout\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/apis/v2/general/ping\x12M\n\x04Test\x12\x10.proto.TestInput\x1a\x11.proto.TestOutput\" \x82\xd3\xe4\x93\x02\x1a\"\x15/apis/v2/general/test:\x01*2\xa4\x06\n\x07\x41\x63\x63ount\x12\x63\n\x10\x41\x63\x63ountHeartbeat\x12\x0c.proto.Empty\x1a\x1d.proto.AccountHeartbeatOutput\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/apis/v2/account/heartbeat\x12N\n\x05Login\x12\x11.proto.LoginInput\x1a\x11.proto.UserOutput\"\x1f\x82\xd3\xe4\x93\x02\x19\"\x14/apis/v2/users/login:\x01*\x12S\n\x06Signup\x12\x12.proto.SingupInput\x1a\x13.proto.SingupOutput\" \x82\xd3\xe4\x93\x02\x1a\"\x15/apis/v2/users/signup:\x01*\x12r\n\x11ResendConfirmCode\x12\x1d.proto.ResendConfirmCodeInput\x1a\x17.proto.GeneralMsgOutput\"%\x82\xd3\xe4\x93\x02\x1f\"\x1a/apis/v2/users/confirmcode:\x01*\x12_\n\x07\x43onfirm\x12\x13.proto.ConfirmInput\x1a\x17.proto.GeneralMsgOutput\"&\x82\xd3\xe4\x93\x02 \"\x1b/apis/v2/users/confirmation:\x01*\x12g\n\x10GetUsersByStatus\x12\x1c.proto.GetUsersByStatusInput\x1a\x1d.proto.GetUsersByStatusOutput\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/apis/v2/users\x12\x63\n\nDeleteUser\x12\x16.proto.DeleteUserInput\x1a\x17.proto.GeneralMsgOutput\"$\x82\xd3\xe4\x93\x02\x1e*\x1c/apis/v2/users/{email}/{key}\x12l\n\x10UpdateUserStatus\x12\x1c.proto.UpdateUserStatusInput\x1a\x17.proto.GeneralMsgOutput\"!\x82\xd3\xe4\x93\x02\x1b\x1a\x16/apis/v2/users/{email}:\x01*2\xcf\x02\n\x0b\x44iskprophet\x12T\n\x0b\x44PHeartbeat\x12\x0c.proto.Empty\x1a\x18.proto.DPHeartbeatOutput\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/apis/v2/dp/heartbeat\x12l\n\x12\x44PGetPhysicalDisks\x12\x1e.proto.DPGetPhysicalDisksInput\x1a\x15.proto.DPBinaryOutput\"\x1f\x82\xd3\xe4\x93\x02\x19\x12\x17/apis/v2/physical-disks\x12|\n\x14\x44PGetDisksPrediction\x12 .proto.DPGetDisksPredictionInput\x1a\x15.proto.DPBinaryOutput\"+\x82\xd3\xe4\x93\x02%\x12#/apis/v2/physical-disks/predictions2\xdb\x02\n\nCollection\x12l\n\x13\x43ollectionHeartbeat\x12\x0c.proto.Empty\x1a .proto.CollectionHeartbeatOutput\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/apis/v2/collection/heartbeat\x12o\n\x0bPostDBRelay\x12\x17.proto.PostDBRelayInput\x1a\x1e.proto.CollectionMessageOutput\"\'\x82\xd3\xe4\x93\x02!\"\x1c/apis/v2/collection/relation:\x01*\x12n\n\x0bPostMetrics\x12\x17.proto.PostMetricsInput\x1a\x1e.proto.CollectionMessageOutput\"&\x82\xd3\xe4\x93\x02 \"\x1b/apis/v2/collection/metrics:\x01*b\x06proto3')
+ ,
+ dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
+
+
+
+_PERSON_PHONETYPE = _descriptor.EnumDescriptor(
+ name='PhoneType',
+ full_name='proto.Person.PhoneType',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='MOBILE', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='HOME', index=1, number=1,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='WORK', index=2, number=2,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=579,
+ serialized_end=622,
+)
+_sym_db.RegisterEnumDescriptor(_PERSON_PHONETYPE)
+
+
+_EMPTY = _descriptor.Descriptor(
+ name='Empty',
+ full_name='proto.Empty',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=57,
+ serialized_end=64,
+)
+
+
+_GENERALMSGOUTPUT = _descriptor.Descriptor(
+ name='GeneralMsgOutput',
+ full_name='proto.GeneralMsgOutput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='message', full_name='proto.GeneralMsgOutput.message', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=66,
+ serialized_end=101,
+)
+
+
+_GENERALHEARTBEATOUTPUT = _descriptor.Descriptor(
+ name='GeneralHeartbeatOutput',
+ full_name='proto.GeneralHeartbeatOutput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='message', full_name='proto.GeneralHeartbeatOutput.message', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=103,
+ serialized_end=144,
+)
+
+
+_PINGOUTOUT = _descriptor.Descriptor(
+ name='PingOutout',
+ full_name='proto.PingOutout',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='message', full_name='proto.PingOutout.message', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=146,
+ serialized_end=175,
+)
+
+
+_TESTINPUT = _descriptor.Descriptor(
+ name='TestInput',
+ full_name='proto.TestInput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='people', full_name='proto.TestInput.people', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=177,
+ serialized_end=219,
+)
+
+
+_TESTOUTPUT_MAPVALUEENTRY = _descriptor.Descriptor(
+ name='MapValueEntry',
+ full_name='proto.TestOutput.MapValueEntry',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='key', full_name='proto.TestOutput.MapValueEntry.key', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='value', full_name='proto.TestOutput.MapValueEntry.value', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=365,
+ serialized_end=412,
+)
+
+_TESTOUTPUT = _descriptor.Descriptor(
+ name='TestOutput',
+ full_name='proto.TestOutput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='strArray', full_name='proto.TestOutput.strArray', index=0,
+ number=1, type=9, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='mapValue', full_name='proto.TestOutput.mapValue', index=1,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='pn', full_name='proto.TestOutput.pn', index=2,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='profile', full_name='proto.TestOutput.profile', index=3,
+ number=3, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_TESTOUTPUT_MAPVALUEENTRY, ],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=222,
+ serialized_end=412,
+)
+
+
+_PERSON_PHONENUMBER = _descriptor.Descriptor(
+ name='PhoneNumber',
+ full_name='proto.Person.PhoneNumber',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='number', full_name='proto.Person.PhoneNumber.number', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='type', full_name='proto.Person.PhoneNumber.type', index=1,
+ number=2, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=509,
+ serialized_end=577,
+)
+
+_PERSON = _descriptor.Descriptor(
+ name='Person',
+ full_name='proto.Person',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='proto.Person.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='id', full_name='proto.Person.id', index=1,
+ number=2, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='email', full_name='proto.Person.email', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='phones', full_name='proto.Person.phones', index=3,
+ number=4, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_PERSON_PHONENUMBER, ],
+ enum_types=[
+ _PERSON_PHONETYPE,
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=415,
+ serialized_end=622,
+)
+
+
+_PROFILE_FILE = _descriptor.Descriptor(
+ name='File',
+ full_name='proto.Profile.File',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='proto.Profile.File.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='typeInt32', full_name='proto.Profile.File.typeInt32', index=1,
+ number=2, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='typeInt64', full_name='proto.Profile.File.typeInt64', index=2,
+ number=3, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='typeFloat', full_name='proto.Profile.File.typeFloat', index=3,
+ number=4, type=2, cpp_type=6, label=1,
+ has_default_value=False, default_value=float(0),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='typeDouble', full_name='proto.Profile.File.typeDouble', index=4,
+ number=5, type=1, cpp_type=5, label=1,
+ has_default_value=False, default_value=float(0),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='booleanValue', full_name='proto.Profile.File.booleanValue', index=5,
+ number=6, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=675,
+ serialized_end=794,
+)
+
+_PROFILE = _descriptor.Descriptor(
+ name='Profile',
+ full_name='proto.Profile',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='fileInfo', full_name='proto.Profile.fileInfo', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_PROFILE_FILE, ],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=625,
+ serialized_end=794,
+)
+
+
+_GETUSERSBYSTATUSINPUT = _descriptor.Descriptor(
+ name='GetUsersByStatusInput',
+ full_name='proto.GetUsersByStatusInput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='status', full_name='proto.GetUsersByStatusInput.status', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='key', full_name='proto.GetUsersByStatusInput.key', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=796,
+ serialized_end=848,
+)
+
+
+_GETUSERSBYSTATUSOUTPUT = _descriptor.Descriptor(
+ name='GetUsersByStatusOutput',
+ full_name='proto.GetUsersByStatusOutput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='users', full_name='proto.GetUsersByStatusOutput.users', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=850,
+ serialized_end=908,
+)
+
+
+_ACCOUNTHEARTBEATOUTPUT = _descriptor.Descriptor(
+ name='AccountHeartbeatOutput',
+ full_name='proto.AccountHeartbeatOutput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='message', full_name='proto.AccountHeartbeatOutput.message', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=910,
+ serialized_end=951,
+)
+
+
+_LOGININPUT = _descriptor.Descriptor(
+ name='LoginInput',
+ full_name='proto.LoginInput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='email', full_name='proto.LoginInput.email', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='password', full_name='proto.LoginInput.password', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=953,
+ serialized_end=998,
+)
+
+
+_USEROUTPUT = _descriptor.Descriptor(
+ name='UserOutput',
+ full_name='proto.UserOutput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='id', full_name='proto.UserOutput.id', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='email', full_name='proto.UserOutput.email', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='status', full_name='proto.UserOutput.status', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='phone', full_name='proto.UserOutput.phone', index=3,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='firstName', full_name='proto.UserOutput.firstName', index=4,
+ number=5, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='lastName', full_name='proto.UserOutput.lastName', index=5,
+ number=6, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='createdTime', full_name='proto.UserOutput.createdTime', index=6,
+ number=7, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='namespace', full_name='proto.UserOutput.namespace', index=7,
+ number=8, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='domainName', full_name='proto.UserOutput.domainName', index=8,
+ number=9, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='company', full_name='proto.UserOutput.company', index=9,
+ number=10, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='url', full_name='proto.UserOutput.url', index=10,
+ number=11, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='agentAccount', full_name='proto.UserOutput.agentAccount', index=11,
+ number=12, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='agentPassword', full_name='proto.UserOutput.agentPassword', index=12,
+ number=13, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1001,
+ serialized_end=1243,
+)
+
+
+_SINGUPINPUT = _descriptor.Descriptor(
+ name='SingupInput',
+ full_name='proto.SingupInput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='email', full_name='proto.SingupInput.email', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='phone', full_name='proto.SingupInput.phone', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='firstName', full_name='proto.SingupInput.firstName', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='lastName', full_name='proto.SingupInput.lastName', index=3,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='password', full_name='proto.SingupInput.password', index=4,
+ number=5, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='company', full_name='proto.SingupInput.company', index=5,
+ number=6, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1245,
+ serialized_end=1360,
+)
+
+
+_SINGUPOUTPUT = _descriptor.Descriptor(
+ name='SingupOutput',
+ full_name='proto.SingupOutput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='message', full_name='proto.SingupOutput.message', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1362,
+ serialized_end=1393,
+)
+
+
+_DELETEUSERINPUT = _descriptor.Descriptor(
+ name='DeleteUserInput',
+ full_name='proto.DeleteUserInput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='email', full_name='proto.DeleteUserInput.email', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='key', full_name='proto.DeleteUserInput.key', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1395,
+ serialized_end=1440,
+)
+
+
+_UPDATEUSERSTATUSINPUT = _descriptor.Descriptor(
+ name='UpdateUserStatusInput',
+ full_name='proto.UpdateUserStatusInput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='email', full_name='proto.UpdateUserStatusInput.email', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='key', full_name='proto.UpdateUserStatusInput.key', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='status', full_name='proto.UpdateUserStatusInput.status', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1442,
+ serialized_end=1509,
+)
+
+
+_RESENDCONFIRMCODEINPUT = _descriptor.Descriptor(
+ name='ResendConfirmCodeInput',
+ full_name='proto.ResendConfirmCodeInput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='email', full_name='proto.ResendConfirmCodeInput.email', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1511,
+ serialized_end=1550,
+)
+
+
+_CONFIRMINPUT = _descriptor.Descriptor(
+ name='ConfirmInput',
+ full_name='proto.ConfirmInput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='email', full_name='proto.ConfirmInput.email', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='code', full_name='proto.ConfirmInput.code', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1552,
+ serialized_end=1595,
+)
+
+
+_DPHEARTBEATOUTPUT = _descriptor.Descriptor(
+ name='DPHeartbeatOutput',
+ full_name='proto.DPHeartbeatOutput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='message', full_name='proto.DPHeartbeatOutput.message', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1597,
+ serialized_end=1633,
+)
+
+
+_DPGETPHYSICALDISKSINPUT = _descriptor.Descriptor(
+ name='DPGetPhysicalDisksInput',
+ full_name='proto.DPGetPhysicalDisksInput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='hostIds', full_name='proto.DPGetPhysicalDisksInput.hostIds', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='ids', full_name='proto.DPGetPhysicalDisksInput.ids', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='limit', full_name='proto.DPGetPhysicalDisksInput.limit', index=2,
+ number=3, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='page', full_name='proto.DPGetPhysicalDisksInput.page', index=3,
+ number=4, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='from', full_name='proto.DPGetPhysicalDisksInput.from', index=4,
+ number=5, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='to', full_name='proto.DPGetPhysicalDisksInput.to', index=5,
+ number=6, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1635,
+ serialized_end=1745,
+)
+
+
+_DPGETDISKSPREDICTIONINPUT = _descriptor.Descriptor(
+ name='DPGetDisksPredictionInput',
+ full_name='proto.DPGetDisksPredictionInput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='physicalDiskIds', full_name='proto.DPGetDisksPredictionInput.physicalDiskIds', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='status', full_name='proto.DPGetDisksPredictionInput.status', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='limit', full_name='proto.DPGetDisksPredictionInput.limit', index=2,
+ number=3, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='page', full_name='proto.DPGetDisksPredictionInput.page', index=3,
+ number=4, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='from', full_name='proto.DPGetDisksPredictionInput.from', index=4,
+ number=5, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='to', full_name='proto.DPGetDisksPredictionInput.to', index=5,
+ number=6, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1747,
+ serialized_end=1870,
+)
+
+
+_DPBINARYOUTPUT = _descriptor.Descriptor(
+ name='DPBinaryOutput',
+ full_name='proto.DPBinaryOutput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='data', full_name='proto.DPBinaryOutput.data', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1872,
+ serialized_end=1902,
+)
+
+
+_COLLECTIONHEARTBEATOUTPUT = _descriptor.Descriptor(
+ name='CollectionHeartbeatOutput',
+ full_name='proto.CollectionHeartbeatOutput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='message', full_name='proto.CollectionHeartbeatOutput.message', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1904,
+ serialized_end=1948,
+)
+
+
+_POSTMETRICSINPUT = _descriptor.Descriptor(
+ name='PostMetricsInput',
+ full_name='proto.PostMetricsInput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='points', full_name='proto.PostMetricsInput.points', index=0,
+ number=1, type=9, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1950,
+ serialized_end=1984,
+)
+
+
+_POSTDBRELAYINPUT = _descriptor.Descriptor(
+ name='PostDBRelayInput',
+ full_name='proto.PostDBRelayInput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='cmds', full_name='proto.PostDBRelayInput.cmds', index=0,
+ number=1, type=9, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1986,
+ serialized_end=2018,
+)
+
+
+_COLLECTIONMESSAGEOUTPUT = _descriptor.Descriptor(
+ name='CollectionMessageOutput',
+ full_name='proto.CollectionMessageOutput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='status', full_name='proto.CollectionMessageOutput.status', index=0,
+ number=1, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='message', full_name='proto.CollectionMessageOutput.message', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2020,
+ serialized_end=2078,
+)
+
+_TESTINPUT.fields_by_name['people'].message_type = _PERSON
+_TESTOUTPUT_MAPVALUEENTRY.containing_type = _TESTOUTPUT
+_TESTOUTPUT.fields_by_name['mapValue'].message_type = _TESTOUTPUT_MAPVALUEENTRY
+_TESTOUTPUT.fields_by_name['pn'].message_type = _PERSON
+_TESTOUTPUT.fields_by_name['profile'].message_type = _PROFILE
+_PERSON_PHONENUMBER.fields_by_name['type'].enum_type = _PERSON_PHONETYPE
+_PERSON_PHONENUMBER.containing_type = _PERSON
+_PERSON.fields_by_name['phones'].message_type = _PERSON_PHONENUMBER
+_PERSON_PHONETYPE.containing_type = _PERSON
+_PROFILE_FILE.containing_type = _PROFILE
+_PROFILE.fields_by_name['fileInfo'].message_type = _PROFILE_FILE
+_GETUSERSBYSTATUSOUTPUT.fields_by_name['users'].message_type = _USEROUTPUT
+DESCRIPTOR.message_types_by_name['Empty'] = _EMPTY
+DESCRIPTOR.message_types_by_name['GeneralMsgOutput'] = _GENERALMSGOUTPUT
+DESCRIPTOR.message_types_by_name['GeneralHeartbeatOutput'] = _GENERALHEARTBEATOUTPUT
+DESCRIPTOR.message_types_by_name['PingOutout'] = _PINGOUTOUT
+DESCRIPTOR.message_types_by_name['TestInput'] = _TESTINPUT
+DESCRIPTOR.message_types_by_name['TestOutput'] = _TESTOUTPUT
+DESCRIPTOR.message_types_by_name['Person'] = _PERSON
+DESCRIPTOR.message_types_by_name['Profile'] = _PROFILE
+DESCRIPTOR.message_types_by_name['GetUsersByStatusInput'] = _GETUSERSBYSTATUSINPUT
+DESCRIPTOR.message_types_by_name['GetUsersByStatusOutput'] = _GETUSERSBYSTATUSOUTPUT
+DESCRIPTOR.message_types_by_name['AccountHeartbeatOutput'] = _ACCOUNTHEARTBEATOUTPUT
+DESCRIPTOR.message_types_by_name['LoginInput'] = _LOGININPUT
+DESCRIPTOR.message_types_by_name['UserOutput'] = _USEROUTPUT
+DESCRIPTOR.message_types_by_name['SingupInput'] = _SINGUPINPUT
+DESCRIPTOR.message_types_by_name['SingupOutput'] = _SINGUPOUTPUT
+DESCRIPTOR.message_types_by_name['DeleteUserInput'] = _DELETEUSERINPUT
+DESCRIPTOR.message_types_by_name['UpdateUserStatusInput'] = _UPDATEUSERSTATUSINPUT
+DESCRIPTOR.message_types_by_name['ResendConfirmCodeInput'] = _RESENDCONFIRMCODEINPUT
+DESCRIPTOR.message_types_by_name['ConfirmInput'] = _CONFIRMINPUT
+DESCRIPTOR.message_types_by_name['DPHeartbeatOutput'] = _DPHEARTBEATOUTPUT
+DESCRIPTOR.message_types_by_name['DPGetPhysicalDisksInput'] = _DPGETPHYSICALDISKSINPUT
+DESCRIPTOR.message_types_by_name['DPGetDisksPredictionInput'] = _DPGETDISKSPREDICTIONINPUT
+DESCRIPTOR.message_types_by_name['DPBinaryOutput'] = _DPBINARYOUTPUT
+DESCRIPTOR.message_types_by_name['CollectionHeartbeatOutput'] = _COLLECTIONHEARTBEATOUTPUT
+DESCRIPTOR.message_types_by_name['PostMetricsInput'] = _POSTMETRICSINPUT
+DESCRIPTOR.message_types_by_name['PostDBRelayInput'] = _POSTDBRELAYINPUT
+DESCRIPTOR.message_types_by_name['CollectionMessageOutput'] = _COLLECTIONMESSAGEOUTPUT
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), dict(
+ DESCRIPTOR = _EMPTY,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.Empty)
+ ))
+_sym_db.RegisterMessage(Empty)
+
+GeneralMsgOutput = _reflection.GeneratedProtocolMessageType('GeneralMsgOutput', (_message.Message,), dict(
+ DESCRIPTOR = _GENERALMSGOUTPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.GeneralMsgOutput)
+ ))
+_sym_db.RegisterMessage(GeneralMsgOutput)
+
+GeneralHeartbeatOutput = _reflection.GeneratedProtocolMessageType('GeneralHeartbeatOutput', (_message.Message,), dict(
+ DESCRIPTOR = _GENERALHEARTBEATOUTPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.GeneralHeartbeatOutput)
+ ))
+_sym_db.RegisterMessage(GeneralHeartbeatOutput)
+
+PingOutout = _reflection.GeneratedProtocolMessageType('PingOutout', (_message.Message,), dict(
+ DESCRIPTOR = _PINGOUTOUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.PingOutout)
+ ))
+_sym_db.RegisterMessage(PingOutout)
+
+TestInput = _reflection.GeneratedProtocolMessageType('TestInput', (_message.Message,), dict(
+ DESCRIPTOR = _TESTINPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.TestInput)
+ ))
+_sym_db.RegisterMessage(TestInput)
+
+TestOutput = _reflection.GeneratedProtocolMessageType('TestOutput', (_message.Message,), dict(
+
+ MapValueEntry = _reflection.GeneratedProtocolMessageType('MapValueEntry', (_message.Message,), dict(
+ DESCRIPTOR = _TESTOUTPUT_MAPVALUEENTRY,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.TestOutput.MapValueEntry)
+ ))
+ ,
+ DESCRIPTOR = _TESTOUTPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.TestOutput)
+ ))
+_sym_db.RegisterMessage(TestOutput)
+_sym_db.RegisterMessage(TestOutput.MapValueEntry)
+
+Person = _reflection.GeneratedProtocolMessageType('Person', (_message.Message,), dict(
+
+ PhoneNumber = _reflection.GeneratedProtocolMessageType('PhoneNumber', (_message.Message,), dict(
+ DESCRIPTOR = _PERSON_PHONENUMBER,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.Person.PhoneNumber)
+ ))
+ ,
+ DESCRIPTOR = _PERSON,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.Person)
+ ))
+_sym_db.RegisterMessage(Person)
+_sym_db.RegisterMessage(Person.PhoneNumber)
+
+Profile = _reflection.GeneratedProtocolMessageType('Profile', (_message.Message,), dict(
+
+ File = _reflection.GeneratedProtocolMessageType('File', (_message.Message,), dict(
+ DESCRIPTOR = _PROFILE_FILE,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.Profile.File)
+ ))
+ ,
+ DESCRIPTOR = _PROFILE,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.Profile)
+ ))
+_sym_db.RegisterMessage(Profile)
+_sym_db.RegisterMessage(Profile.File)
+
+GetUsersByStatusInput = _reflection.GeneratedProtocolMessageType('GetUsersByStatusInput', (_message.Message,), dict(
+ DESCRIPTOR = _GETUSERSBYSTATUSINPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.GetUsersByStatusInput)
+ ))
+_sym_db.RegisterMessage(GetUsersByStatusInput)
+
+GetUsersByStatusOutput = _reflection.GeneratedProtocolMessageType('GetUsersByStatusOutput', (_message.Message,), dict(
+ DESCRIPTOR = _GETUSERSBYSTATUSOUTPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.GetUsersByStatusOutput)
+ ))
+_sym_db.RegisterMessage(GetUsersByStatusOutput)
+
+AccountHeartbeatOutput = _reflection.GeneratedProtocolMessageType('AccountHeartbeatOutput', (_message.Message,), dict(
+ DESCRIPTOR = _ACCOUNTHEARTBEATOUTPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.AccountHeartbeatOutput)
+ ))
+_sym_db.RegisterMessage(AccountHeartbeatOutput)
+
+LoginInput = _reflection.GeneratedProtocolMessageType('LoginInput', (_message.Message,), dict(
+ DESCRIPTOR = _LOGININPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.LoginInput)
+ ))
+_sym_db.RegisterMessage(LoginInput)
+
+UserOutput = _reflection.GeneratedProtocolMessageType('UserOutput', (_message.Message,), dict(
+ DESCRIPTOR = _USEROUTPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.UserOutput)
+ ))
+_sym_db.RegisterMessage(UserOutput)
+
+SingupInput = _reflection.GeneratedProtocolMessageType('SingupInput', (_message.Message,), dict(
+ DESCRIPTOR = _SINGUPINPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.SingupInput)
+ ))
+_sym_db.RegisterMessage(SingupInput)
+
+SingupOutput = _reflection.GeneratedProtocolMessageType('SingupOutput', (_message.Message,), dict(
+ DESCRIPTOR = _SINGUPOUTPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.SingupOutput)
+ ))
+_sym_db.RegisterMessage(SingupOutput)
+
+DeleteUserInput = _reflection.GeneratedProtocolMessageType('DeleteUserInput', (_message.Message,), dict(
+ DESCRIPTOR = _DELETEUSERINPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.DeleteUserInput)
+ ))
+_sym_db.RegisterMessage(DeleteUserInput)
+
+UpdateUserStatusInput = _reflection.GeneratedProtocolMessageType('UpdateUserStatusInput', (_message.Message,), dict(
+ DESCRIPTOR = _UPDATEUSERSTATUSINPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.UpdateUserStatusInput)
+ ))
+_sym_db.RegisterMessage(UpdateUserStatusInput)
+
+ResendConfirmCodeInput = _reflection.GeneratedProtocolMessageType('ResendConfirmCodeInput', (_message.Message,), dict(
+ DESCRIPTOR = _RESENDCONFIRMCODEINPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.ResendConfirmCodeInput)
+ ))
+_sym_db.RegisterMessage(ResendConfirmCodeInput)
+
+ConfirmInput = _reflection.GeneratedProtocolMessageType('ConfirmInput', (_message.Message,), dict(
+ DESCRIPTOR = _CONFIRMINPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.ConfirmInput)
+ ))
+_sym_db.RegisterMessage(ConfirmInput)
+
+DPHeartbeatOutput = _reflection.GeneratedProtocolMessageType('DPHeartbeatOutput', (_message.Message,), dict(
+ DESCRIPTOR = _DPHEARTBEATOUTPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.DPHeartbeatOutput)
+ ))
+_sym_db.RegisterMessage(DPHeartbeatOutput)
+
+DPGetPhysicalDisksInput = _reflection.GeneratedProtocolMessageType('DPGetPhysicalDisksInput', (_message.Message,), dict(
+ DESCRIPTOR = _DPGETPHYSICALDISKSINPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.DPGetPhysicalDisksInput)
+ ))
+_sym_db.RegisterMessage(DPGetPhysicalDisksInput)
+
+DPGetDisksPredictionInput = _reflection.GeneratedProtocolMessageType('DPGetDisksPredictionInput', (_message.Message,), dict(
+ DESCRIPTOR = _DPGETDISKSPREDICTIONINPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.DPGetDisksPredictionInput)
+ ))
+_sym_db.RegisterMessage(DPGetDisksPredictionInput)
+
+DPBinaryOutput = _reflection.GeneratedProtocolMessageType('DPBinaryOutput', (_message.Message,), dict(
+ DESCRIPTOR = _DPBINARYOUTPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.DPBinaryOutput)
+ ))
+_sym_db.RegisterMessage(DPBinaryOutput)
+
+CollectionHeartbeatOutput = _reflection.GeneratedProtocolMessageType('CollectionHeartbeatOutput', (_message.Message,), dict(
+ DESCRIPTOR = _COLLECTIONHEARTBEATOUTPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.CollectionHeartbeatOutput)
+ ))
+_sym_db.RegisterMessage(CollectionHeartbeatOutput)
+
+PostMetricsInput = _reflection.GeneratedProtocolMessageType('PostMetricsInput', (_message.Message,), dict(
+ DESCRIPTOR = _POSTMETRICSINPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.PostMetricsInput)
+ ))
+_sym_db.RegisterMessage(PostMetricsInput)
+
+PostDBRelayInput = _reflection.GeneratedProtocolMessageType('PostDBRelayInput', (_message.Message,), dict(
+ DESCRIPTOR = _POSTDBRELAYINPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.PostDBRelayInput)
+ ))
+_sym_db.RegisterMessage(PostDBRelayInput)
+
+CollectionMessageOutput = _reflection.GeneratedProtocolMessageType('CollectionMessageOutput', (_message.Message,), dict(
+ DESCRIPTOR = _COLLECTIONMESSAGEOUTPUT,
+ __module__ = 'mainServer_pb2'
+ # @@protoc_insertion_point(class_scope:proto.CollectionMessageOutput)
+ ))
+_sym_db.RegisterMessage(CollectionMessageOutput)
+
+
+_TESTOUTPUT_MAPVALUEENTRY.has_options = True
+_TESTOUTPUT_MAPVALUEENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
+
+_GENERAL = _descriptor.ServiceDescriptor(
+ name='General',
+ full_name='proto.General',
+ file=DESCRIPTOR,
+ index=0,
+ options=None,
+ serialized_start=2081,
+ serialized_end=2342,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='GeneralHeartbeat',
+ full_name='proto.General.GeneralHeartbeat',
+ index=0,
+ containing_service=None,
+ input_type=_EMPTY,
+ output_type=_GENERALHEARTBEATOUTPUT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\034\022\032/apis/v2/general/heartbeat')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='Ping',
+ full_name='proto.General.Ping',
+ index=1,
+ containing_service=None,
+ input_type=_EMPTY,
+ output_type=_PINGOUTOUT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\027\022\025/apis/v2/general/ping')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='Test',
+ full_name='proto.General.Test',
+ index=2,
+ containing_service=None,
+ input_type=_TESTINPUT,
+ output_type=_TESTOUTPUT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\032\"\025/apis/v2/general/test:\001*')),
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_GENERAL)
+
+DESCRIPTOR.services_by_name['General'] = _GENERAL
+
+
+_ACCOUNT = _descriptor.ServiceDescriptor(
+ name='Account',
+ full_name='proto.Account',
+ file=DESCRIPTOR,
+ index=1,
+ options=None,
+ serialized_start=2345,
+ serialized_end=3149,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='AccountHeartbeat',
+ full_name='proto.Account.AccountHeartbeat',
+ index=0,
+ containing_service=None,
+ input_type=_EMPTY,
+ output_type=_ACCOUNTHEARTBEATOUTPUT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\034\022\032/apis/v2/account/heartbeat')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='Login',
+ full_name='proto.Account.Login',
+ index=1,
+ containing_service=None,
+ input_type=_LOGININPUT,
+ output_type=_USEROUTPUT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\031\"\024/apis/v2/users/login:\001*')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='Signup',
+ full_name='proto.Account.Signup',
+ index=2,
+ containing_service=None,
+ input_type=_SINGUPINPUT,
+ output_type=_SINGUPOUTPUT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\032\"\025/apis/v2/users/signup:\001*')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='ResendConfirmCode',
+ full_name='proto.Account.ResendConfirmCode',
+ index=3,
+ containing_service=None,
+ input_type=_RESENDCONFIRMCODEINPUT,
+ output_type=_GENERALMSGOUTPUT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\037\"\032/apis/v2/users/confirmcode:\001*')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='Confirm',
+ full_name='proto.Account.Confirm',
+ index=4,
+ containing_service=None,
+ input_type=_CONFIRMINPUT,
+ output_type=_GENERALMSGOUTPUT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002 \"\033/apis/v2/users/confirmation:\001*')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='GetUsersByStatus',
+ full_name='proto.Account.GetUsersByStatus',
+ index=5,
+ containing_service=None,
+ input_type=_GETUSERSBYSTATUSINPUT,
+ output_type=_GETUSERSBYSTATUSOUTPUT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\020\022\016/apis/v2/users')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='DeleteUser',
+ full_name='proto.Account.DeleteUser',
+ index=6,
+ containing_service=None,
+ input_type=_DELETEUSERINPUT,
+ output_type=_GENERALMSGOUTPUT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\036*\034/apis/v2/users/{email}/{key}')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='UpdateUserStatus',
+ full_name='proto.Account.UpdateUserStatus',
+ index=7,
+ containing_service=None,
+ input_type=_UPDATEUSERSTATUSINPUT,
+ output_type=_GENERALMSGOUTPUT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\033\032\026/apis/v2/users/{email}:\001*')),
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_ACCOUNT)
+
+DESCRIPTOR.services_by_name['Account'] = _ACCOUNT
+
+
+_DISKPROPHET = _descriptor.ServiceDescriptor(
+ name='Diskprophet',
+ full_name='proto.Diskprophet',
+ file=DESCRIPTOR,
+ index=2,
+ options=None,
+ serialized_start=3152,
+ serialized_end=3487,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='DPHeartbeat',
+ full_name='proto.Diskprophet.DPHeartbeat',
+ index=0,
+ containing_service=None,
+ input_type=_EMPTY,
+ output_type=_DPHEARTBEATOUTPUT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\027\022\025/apis/v2/dp/heartbeat')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='DPGetPhysicalDisks',
+ full_name='proto.Diskprophet.DPGetPhysicalDisks',
+ index=1,
+ containing_service=None,
+ input_type=_DPGETPHYSICALDISKSINPUT,
+ output_type=_DPBINARYOUTPUT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\031\022\027/apis/v2/physical-disks')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='DPGetDisksPrediction',
+ full_name='proto.Diskprophet.DPGetDisksPrediction',
+ index=2,
+ containing_service=None,
+ input_type=_DPGETDISKSPREDICTIONINPUT,
+ output_type=_DPBINARYOUTPUT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002%\022#/apis/v2/physical-disks/predictions')),
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_DISKPROPHET)
+
+DESCRIPTOR.services_by_name['Diskprophet'] = _DISKPROPHET
+
+
+_COLLECTION = _descriptor.ServiceDescriptor(
+ name='Collection',
+ full_name='proto.Collection',
+ file=DESCRIPTOR,
+ index=3,
+ options=None,
+ serialized_start=3490,
+ serialized_end=3837,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='CollectionHeartbeat',
+ full_name='proto.Collection.CollectionHeartbeat',
+ index=0,
+ containing_service=None,
+ input_type=_EMPTY,
+ output_type=_COLLECTIONHEARTBEATOUTPUT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\037\022\035/apis/v2/collection/heartbeat')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='PostDBRelay',
+ full_name='proto.Collection.PostDBRelay',
+ index=1,
+ containing_service=None,
+ input_type=_POSTDBRELAYINPUT,
+ output_type=_COLLECTIONMESSAGEOUTPUT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002!\"\034/apis/v2/collection/relation:\001*')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='PostMetrics',
+ full_name='proto.Collection.PostMetrics',
+ index=2,
+ containing_service=None,
+ input_type=_POSTMETRICSINPUT,
+ output_type=_COLLECTIONMESSAGEOUTPUT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002 \"\033/apis/v2/collection/metrics:\001*')),
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_COLLECTION)
+
+DESCRIPTOR.services_by_name['Collection'] = _COLLECTION
+
+# @@protoc_insertion_point(module_scope)
diff --git a/src/pybind/mgr/diskprediction_cloud/common/client_pb2_grpc.py b/src/pybind/mgr/diskprediction_cloud/common/client_pb2_grpc.py
new file mode 100644
index 00000000..c1c32178
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/common/client_pb2_grpc.py
@@ -0,0 +1,395 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+import client_pb2 as mainServer__pb2
+
+
+class GeneralStub(object):
+ """-------------------------- General -------------------------------------
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.GeneralHeartbeat = channel.unary_unary(
+ '/proto.General/GeneralHeartbeat',
+ request_serializer=mainServer__pb2.Empty.SerializeToString,
+ response_deserializer=mainServer__pb2.GeneralHeartbeatOutput.FromString,
+ )
+ self.Ping = channel.unary_unary(
+ '/proto.General/Ping',
+ request_serializer=mainServer__pb2.Empty.SerializeToString,
+ response_deserializer=mainServer__pb2.PingOutout.FromString,
+ )
+ self.Test = channel.unary_unary(
+ '/proto.General/Test',
+ request_serializer=mainServer__pb2.TestInput.SerializeToString,
+ response_deserializer=mainServer__pb2.TestOutput.FromString,
+ )
+
+
+class GeneralServicer(object):
+ """-------------------------- General -------------------------------------
+ """
+
+ def GeneralHeartbeat(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def Ping(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def Test(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_GeneralServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'GeneralHeartbeat': grpc.unary_unary_rpc_method_handler(
+ servicer.GeneralHeartbeat,
+ request_deserializer=mainServer__pb2.Empty.FromString,
+ response_serializer=mainServer__pb2.GeneralHeartbeatOutput.SerializeToString,
+ ),
+ 'Ping': grpc.unary_unary_rpc_method_handler(
+ servicer.Ping,
+ request_deserializer=mainServer__pb2.Empty.FromString,
+ response_serializer=mainServer__pb2.PingOutout.SerializeToString,
+ ),
+ 'Test': grpc.unary_unary_rpc_method_handler(
+ servicer.Test,
+ request_deserializer=mainServer__pb2.TestInput.FromString,
+ response_serializer=mainServer__pb2.TestOutput.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'proto.General', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+class AccountStub(object):
+ """-------------------------- SERVER ACCOUNT ------------------------------
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.AccountHeartbeat = channel.unary_unary(
+ '/proto.Account/AccountHeartbeat',
+ request_serializer=mainServer__pb2.Empty.SerializeToString,
+ response_deserializer=mainServer__pb2.AccountHeartbeatOutput.FromString,
+ )
+ self.Login = channel.unary_unary(
+ '/proto.Account/Login',
+ request_serializer=mainServer__pb2.LoginInput.SerializeToString,
+ response_deserializer=mainServer__pb2.UserOutput.FromString,
+ )
+ self.Signup = channel.unary_unary(
+ '/proto.Account/Signup',
+ request_serializer=mainServer__pb2.SingupInput.SerializeToString,
+ response_deserializer=mainServer__pb2.SingupOutput.FromString,
+ )
+ self.ResendConfirmCode = channel.unary_unary(
+ '/proto.Account/ResendConfirmCode',
+ request_serializer=mainServer__pb2.ResendConfirmCodeInput.SerializeToString,
+ response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString,
+ )
+ self.Confirm = channel.unary_unary(
+ '/proto.Account/Confirm',
+ request_serializer=mainServer__pb2.ConfirmInput.SerializeToString,
+ response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString,
+ )
+ self.GetUsersByStatus = channel.unary_unary(
+ '/proto.Account/GetUsersByStatus',
+ request_serializer=mainServer__pb2.GetUsersByStatusInput.SerializeToString,
+ response_deserializer=mainServer__pb2.GetUsersByStatusOutput.FromString,
+ )
+ self.DeleteUser = channel.unary_unary(
+ '/proto.Account/DeleteUser',
+ request_serializer=mainServer__pb2.DeleteUserInput.SerializeToString,
+ response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString,
+ )
+ self.UpdateUserStatus = channel.unary_unary(
+ '/proto.Account/UpdateUserStatus',
+ request_serializer=mainServer__pb2.UpdateUserStatusInput.SerializeToString,
+ response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString,
+ )
+
+
+class AccountServicer(object):
+ """-------------------------- SERVER ACCOUNT ------------------------------
+ """
+
+ def AccountHeartbeat(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def Login(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def Signup(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def ResendConfirmCode(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def Confirm(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetUsersByStatus(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def DeleteUser(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def UpdateUserStatus(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_AccountServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'AccountHeartbeat': grpc.unary_unary_rpc_method_handler(
+ servicer.AccountHeartbeat,
+ request_deserializer=mainServer__pb2.Empty.FromString,
+ response_serializer=mainServer__pb2.AccountHeartbeatOutput.SerializeToString,
+ ),
+ 'Login': grpc.unary_unary_rpc_method_handler(
+ servicer.Login,
+ request_deserializer=mainServer__pb2.LoginInput.FromString,
+ response_serializer=mainServer__pb2.UserOutput.SerializeToString,
+ ),
+ 'Signup': grpc.unary_unary_rpc_method_handler(
+ servicer.Signup,
+ request_deserializer=mainServer__pb2.SingupInput.FromString,
+ response_serializer=mainServer__pb2.SingupOutput.SerializeToString,
+ ),
+ 'ResendConfirmCode': grpc.unary_unary_rpc_method_handler(
+ servicer.ResendConfirmCode,
+ request_deserializer=mainServer__pb2.ResendConfirmCodeInput.FromString,
+ response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString,
+ ),
+ 'Confirm': grpc.unary_unary_rpc_method_handler(
+ servicer.Confirm,
+ request_deserializer=mainServer__pb2.ConfirmInput.FromString,
+ response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString,
+ ),
+ 'GetUsersByStatus': grpc.unary_unary_rpc_method_handler(
+ servicer.GetUsersByStatus,
+ request_deserializer=mainServer__pb2.GetUsersByStatusInput.FromString,
+ response_serializer=mainServer__pb2.GetUsersByStatusOutput.SerializeToString,
+ ),
+ 'DeleteUser': grpc.unary_unary_rpc_method_handler(
+ servicer.DeleteUser,
+ request_deserializer=mainServer__pb2.DeleteUserInput.FromString,
+ response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString,
+ ),
+ 'UpdateUserStatus': grpc.unary_unary_rpc_method_handler(
+ servicer.UpdateUserStatus,
+ request_deserializer=mainServer__pb2.UpdateUserStatusInput.FromString,
+ response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'proto.Account', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+class DiskprophetStub(object):
+ """------------------------ SERVER DISKPROPHET ---------------------------
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.DPHeartbeat = channel.unary_unary(
+ '/proto.Diskprophet/DPHeartbeat',
+ request_serializer=mainServer__pb2.Empty.SerializeToString,
+ response_deserializer=mainServer__pb2.DPHeartbeatOutput.FromString,
+ )
+ self.DPGetPhysicalDisks = channel.unary_unary(
+ '/proto.Diskprophet/DPGetPhysicalDisks',
+ request_serializer=mainServer__pb2.DPGetPhysicalDisksInput.SerializeToString,
+ response_deserializer=mainServer__pb2.DPBinaryOutput.FromString,
+ )
+ self.DPGetDisksPrediction = channel.unary_unary(
+ '/proto.Diskprophet/DPGetDisksPrediction',
+ request_serializer=mainServer__pb2.DPGetDisksPredictionInput.SerializeToString,
+ response_deserializer=mainServer__pb2.DPBinaryOutput.FromString,
+ )
+
+
+class DiskprophetServicer(object):
+ """------------------------ SERVER DISKPROPHET ---------------------------
+ """
+
+ def DPHeartbeat(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def DPGetPhysicalDisks(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def DPGetDisksPrediction(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_DiskprophetServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'DPHeartbeat': grpc.unary_unary_rpc_method_handler(
+ servicer.DPHeartbeat,
+ request_deserializer=mainServer__pb2.Empty.FromString,
+ response_serializer=mainServer__pb2.DPHeartbeatOutput.SerializeToString,
+ ),
+ 'DPGetPhysicalDisks': grpc.unary_unary_rpc_method_handler(
+ servicer.DPGetPhysicalDisks,
+ request_deserializer=mainServer__pb2.DPGetPhysicalDisksInput.FromString,
+ response_serializer=mainServer__pb2.DPBinaryOutput.SerializeToString,
+ ),
+ 'DPGetDisksPrediction': grpc.unary_unary_rpc_method_handler(
+ servicer.DPGetDisksPrediction,
+ request_deserializer=mainServer__pb2.DPGetDisksPredictionInput.FromString,
+ response_serializer=mainServer__pb2.DPBinaryOutput.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'proto.Diskprophet', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+class CollectionStub(object):
+ """------------------------ SERVER Collection ---------------------------
+
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.CollectionHeartbeat = channel.unary_unary(
+ '/proto.Collection/CollectionHeartbeat',
+ request_serializer=mainServer__pb2.Empty.SerializeToString,
+ response_deserializer=mainServer__pb2.CollectionHeartbeatOutput.FromString,
+ )
+ self.PostDBRelay = channel.unary_unary(
+ '/proto.Collection/PostDBRelay',
+ request_serializer=mainServer__pb2.PostDBRelayInput.SerializeToString,
+ response_deserializer=mainServer__pb2.CollectionMessageOutput.FromString,
+ )
+ self.PostMetrics = channel.unary_unary(
+ '/proto.Collection/PostMetrics',
+ request_serializer=mainServer__pb2.PostMetricsInput.SerializeToString,
+ response_deserializer=mainServer__pb2.CollectionMessageOutput.FromString,
+ )
+
+
+class CollectionServicer(object):
+ """------------------------ SERVER Collection ---------------------------
+
+ """
+
+ def CollectionHeartbeat(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def PostDBRelay(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def PostMetrics(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_CollectionServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'CollectionHeartbeat': grpc.unary_unary_rpc_method_handler(
+ servicer.CollectionHeartbeat,
+ request_deserializer=mainServer__pb2.Empty.FromString,
+ response_serializer=mainServer__pb2.CollectionHeartbeatOutput.SerializeToString,
+ ),
+ 'PostDBRelay': grpc.unary_unary_rpc_method_handler(
+ servicer.PostDBRelay,
+ request_deserializer=mainServer__pb2.PostDBRelayInput.FromString,
+ response_serializer=mainServer__pb2.CollectionMessageOutput.SerializeToString,
+ ),
+ 'PostMetrics': grpc.unary_unary_rpc_method_handler(
+ servicer.PostMetrics,
+ request_deserializer=mainServer__pb2.PostMetricsInput.FromString,
+ response_serializer=mainServer__pb2.CollectionMessageOutput.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'proto.Collection', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
diff --git a/src/pybind/mgr/diskprediction_cloud/common/clusterdata.py b/src/pybind/mgr/diskprediction_cloud/common/clusterdata.py
new file mode 100644
index 00000000..45add693
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/common/clusterdata.py
@@ -0,0 +1,464 @@
+"""
+Ceph database API
+
+"""
+from __future__ import absolute_import
+
+import json
+import rbd
+from mgr_module import CommandResult
+
+GB = 1024 * 1024 * 1024
+
+
+RBD_FEATURES_NAME_MAPPING = {
+ rbd.RBD_FEATURE_LAYERING: 'layering',
+ rbd.RBD_FEATURE_STRIPINGV2: 'striping',
+ rbd.RBD_FEATURE_EXCLUSIVE_LOCK: 'exclusive-lock',
+ rbd.RBD_FEATURE_OBJECT_MAP: 'object-map',
+ rbd.RBD_FEATURE_FAST_DIFF: 'fast-diff',
+ rbd.RBD_FEATURE_DEEP_FLATTEN: 'deep-flatten',
+ rbd.RBD_FEATURE_JOURNALING: 'journaling',
+ rbd.RBD_FEATURE_DATA_POOL: 'data-pool',
+ rbd.RBD_FEATURE_OPERATIONS: 'operations',
+}
+
+
+def differentiate(data1, data2):
+ """
+ # >>> times = [0, 2]
+ # >>> values = [100, 101]
+ # >>> differentiate(*zip(times, values))
+ 0.5
+ """
+ return (data2[1] - data1[1]) / float(data2[0] - data1[0])
+
+
+class ClusterAPI(object):
+
+ def __init__(self, module_obj):
+ self.module = module_obj
+
+ @staticmethod
+ def format_bitmask(features):
+ """
+ Formats the bitmask:
+ # >>> format_bitmask(45)
+ ['deep-flatten', 'exclusive-lock', 'layering', 'object-map']
+ """
+ names = [val for key, val in RBD_FEATURES_NAME_MAPPING.items()
+ if key & features == key]
+ return sorted(names)
+
+ def _open_connection(self, pool_name='device_health_metrics'):
+ pools = self.module.rados.list_pools()
+ is_pool = False
+ for pool in pools:
+ if pool == pool_name:
+ is_pool = True
+ break
+ if not is_pool:
+ self.module.log.debug('create %s pool' % pool_name)
+ # create pool
+ result = CommandResult('')
+ self.module.send_command(result, 'mon', '', json.dumps({
+ 'prefix': 'osd pool create',
+ 'format': 'json',
+ 'pool': pool_name,
+ 'pg_num': 1,
+ }), '')
+ r, outb, outs = result.wait()
+ assert r == 0
+
+ # set pool application
+ result = CommandResult('')
+ self.module.send_command(result, 'mon', '', json.dumps({
+ 'prefix': 'osd pool application enable',
+ 'format': 'json',
+ 'pool': pool_name,
+ 'app': 'mgr_devicehealth',
+ }), '')
+ r, outb, outs = result.wait()
+ assert r == 0
+
+ ioctx = self.module.rados.open_ioctx(pool_name)
+ return ioctx
+
+ @classmethod
+ def _rbd_disk_usage(cls, image, snaps, whole_object=True):
+ class DUCallback(object):
+ def __init__(self):
+ self.used_size = 0
+
+ def __call__(self, offset, length, exists):
+ if exists:
+ self.used_size += length
+ snap_map = {}
+ prev_snap = None
+ total_used_size = 0
+ for _, size, name in snaps:
+ image.set_snap(name)
+ du_callb = DUCallback()
+ image.diff_iterate(0, size, prev_snap, du_callb,
+ whole_object=whole_object)
+ snap_map[name] = du_callb.used_size
+ total_used_size += du_callb.used_size
+ prev_snap = name
+ return total_used_size, snap_map
+
+ def _rbd_image(self, ioctx, pool_name, image_name):
+ with rbd.Image(ioctx, image_name) as img:
+ stat = img.stat()
+ stat['name'] = image_name
+ stat['id'] = img.id()
+ stat['pool_name'] = pool_name
+ features = img.features()
+ stat['features'] = features
+ stat['features_name'] = self.format_bitmask(features)
+
+ # the following keys are deprecated
+ del stat['parent_pool']
+ del stat['parent_name']
+ stat['timestamp'] = '{}Z'.format(img.create_timestamp()
+ .isoformat())
+ stat['stripe_count'] = img.stripe_count()
+ stat['stripe_unit'] = img.stripe_unit()
+ stat['data_pool'] = None
+ try:
+ parent_info = img.parent_info()
+ stat['parent'] = {
+ 'pool_name': parent_info[0],
+ 'image_name': parent_info[1],
+ 'snap_name': parent_info[2]
+ }
+ except rbd.ImageNotFound:
+ # no parent image
+ stat['parent'] = None
+ # snapshots
+ stat['snapshots'] = []
+ for snap in img.list_snaps():
+ snap['timestamp'] = '{}Z'.format(
+ img.get_snap_timestamp(snap['id']).isoformat())
+ snap['is_protected'] = img.is_protected_snap(snap['name'])
+ snap['used_bytes'] = None
+ snap['children'] = []
+ img.set_snap(snap['name'])
+ for child_pool_name, child_image_name in img.list_children():
+ snap['children'].append({
+ 'pool_name': child_pool_name,
+ 'image_name': child_image_name
+ })
+ stat['snapshots'].append(snap)
+ # disk usage
+ if 'fast-diff' in stat['features_name']:
+ snaps = [(s['id'], s['size'], s['name'])
+ for s in stat['snapshots']]
+ snaps.sort(key=lambda s: s[0])
+ snaps += [(snaps[-1][0]+1 if snaps else 0, stat['size'], None)]
+ total_prov_bytes, snaps_prov_bytes = self._rbd_disk_usage(
+ img, snaps, True)
+ stat['total_disk_usage'] = total_prov_bytes
+ for snap, prov_bytes in snaps_prov_bytes.items():
+ if snap is None:
+ stat['disk_usage'] = prov_bytes
+ continue
+ for ss in stat['snapshots']:
+ if ss['name'] == snap:
+ ss['disk_usage'] = prov_bytes
+ break
+ else:
+ stat['total_disk_usage'] = None
+ stat['disk_usage'] = None
+ return stat
+
+ def get_rbd_list(self, pool_name=None):
+ if pool_name:
+ pools = [pool_name]
+ else:
+ pools = []
+ for data in self.get_osd_pools():
+ pools.append(data['pool_name'])
+ result = []
+ for pool in pools:
+ rbd_inst = rbd.RBD()
+ with self._open_connection(str(pool)) as ioctx:
+ names = rbd_inst.list(ioctx)
+ for name in names:
+ try:
+ stat = self._rbd_image(ioctx, pool_name, name)
+ except rbd.ImageNotFound:
+ continue
+ result.append(stat)
+ return result
+
+ def get_object_pg_info(self, pool_name, object_name):
+ result = CommandResult('')
+ data_jaon = {}
+ self.module.send_command(
+ result, 'mon', '', json.dumps({
+ 'prefix': 'osd map',
+ 'format': 'json',
+ 'pool': pool_name,
+ 'object': object_name,
+ }), '')
+ ret, outb, outs = result.wait()
+ try:
+ if outb:
+ data_jaon = json.loads(outb)
+ else:
+ self.module.log.error('unable to get %s pg info' % pool_name)
+ except Exception as e:
+ self.module.log.error(
+ 'unable to get %s pg, error: %s' % (pool_name, str(e)))
+ return data_jaon
+
+ @staticmethod
+ def _list_objects(ioctx, image_id):
+ objects = []
+ object_iterator = ioctx.list_objects()
+ while True:
+ try:
+ rados_object = object_iterator.next()
+ if image_id is None:
+ objects.append(str(rados_object.key))
+ else:
+ v = str(rados_object.key).split('.')
+ if len(v) >= 2 and v[1] == image_id:
+ objects.append(str(rados_object.key))
+ except StopIteration:
+ break
+ return objects
+
+ def get_rbd_info(self, pool_name, image_name):
+ with self._open_connection(pool_name) as ioctx:
+ try:
+ stat = self._rbd_image(ioctx, pool_name, image_name)
+ if stat.get('id'):
+ objects = self._list_objects(ioctx, stat.get('id'))
+ if objects:
+ stat['objects'] = objects
+ stat['pgs'] = list()
+ for obj_name in objects:
+ pgs_data = self.get_object_pg_info(pool_name, obj_name)
+ stat['pgs'].extend([pgs_data])
+ except rbd.ImageNotFound:
+ stat = {}
+ return stat
+
+ def get_pool_objects(self, pool_name, image_id=None):
+ # list_objects
+ try:
+ with self._open_connection(pool_name) as ioctx:
+ objects = self._list_objects(ioctx, image_id)
+ except:
+ objects = []
+ return objects
+
+ def get_ceph_df_state(self):
+ ceph_stats = self.module.get('df').get('stats', {})
+ if not ceph_stats:
+ return {'total_size': 0, 'avail_size': 0, 'raw_used_size': 0, 'raw_used_percent': 0}
+ total_size = round(float(ceph_stats.get('total_bytes', 0)) / GB)
+ avail_size = round(float(ceph_stats.get('total_avail_bytes', 0)) / GB, 2)
+ raw_used_size = round(float(ceph_stats.get('total_used_bytes', 0)) / GB, 2)
+ if total_size != 0:
+ raw_used_percent = round(float(raw_used_size) / float(total_size) * 100, 2)
+ else:
+ raw_used_percent = 0
+ return {'total_size': total_size, 'avail_size': avail_size, 'raw_used_size': raw_used_size,
+ 'used_percent': raw_used_percent}
+
+ def get_osd_metadata(self, osd_id=None):
+ if osd_id is not None:
+ return self.module.get('osd_metadata')[str(osd_id)]
+ return self.module.get('osd_metadata')
+
+ def get_mgr_metadata(self, mgr_id):
+ return self.module.get_metadata('mgr', mgr_id)
+
+ def get_osd_epoch(self):
+ return self.module.get('osd_map').get('epoch', 0)
+
+ def get_osds(self):
+ return self.module.get('osd_map').get('osds', [])
+
+ def get_max_osd(self):
+ return self.module.get('osd_map').get('max_osd', '')
+
+ def get_osd_pools(self):
+ return self.module.get('osd_map').get('pools', [])
+
+ def get_pool_bytes_used(self, pool_id):
+ bytes_used = None
+ pools = self.module.get('df').get('pools', [])
+ for pool in pools:
+ if pool_id == pool['id']:
+ bytes_used = pool['stats']['bytes_used']
+ return bytes_used
+
+ def get_cluster_id(self):
+ return self.module.get('mon_map').get('fsid')
+
+ def get_health_status(self):
+ health = json.loads(self.module.get('health')['json'])
+ return health.get('status')
+
+ def get_health_checks(self):
+ health = json.loads(self.module.get('health')['json'])
+ if health.get('checks'):
+ message = ''
+ checks = health['checks']
+ for key in checks.keys():
+ if message:
+ message += ';'
+ if checks[key].get('summary', {}).get('message', ''):
+ message += checks[key]['summary']['message']
+ return message
+ else:
+ return ''
+
+ def get_mons(self):
+ return self.module.get('mon_map').get('mons', [])
+
+ def get_mon_status(self):
+ mon_status = json.loads(self.module.get('mon_status')['json'])
+ return mon_status
+
+ def get_osd_smart(self, osd_id, device_id=None):
+ osd_devices = []
+ osd_smart = {}
+ devices = self.module.get('devices')
+ for dev in devices.get('devices', []):
+ osd = ''
+ daemons = dev.get('daemons', [])
+ for daemon in daemons:
+ if daemon[4:] != str(osd_id):
+ continue
+ osd = daemon
+ if not osd:
+ continue
+ if dev.get('devid') and dev.get('devid') not in osd_devices:
+ osd_devices.append(dev.get('devid'))
+ for dev_id in osd_devices:
+ o_key = ''
+ if device_id and dev_id != device_id:
+ continue
+ smart_data = self.get_device_health(dev_id)
+ if smart_data:
+ o_key = sorted(smart_data.keys(), reverse=True)[0]
+ if o_key and smart_data and smart_data.values():
+ dev_smart = smart_data[o_key]
+ if dev_smart:
+ osd_smart[dev_id] = dev_smart
+ return osd_smart
+
+ def get_device_health(self, devid):
+ health_data = {}
+ try:
+ r, outb, outs = self.module.remote('devicehealth', 'show_device_metrics', devid=devid, sample='')
+ if r != 0:
+ self.module.log.error('failed to get device %s health', devid)
+ health_data = {}
+ else:
+ health_data = json.loads(outb)
+ except Exception as e:
+ self.module.log.error('failed to get device %s health data due to %s', devid, str(e))
+ return health_data
+
+ def get_osd_hostname(self, osd_id):
+ result = ''
+ osd_metadata = self.get_osd_metadata(osd_id)
+ if osd_metadata:
+ osd_host = osd_metadata.get('hostname', 'None')
+ result = osd_host
+ return result
+
+ def get_osd_device_id(self, osd_id):
+ result = {}
+ if not str(osd_id).isdigit():
+ if str(osd_id)[0:4] == 'osd.':
+ osdid = osd_id[4:]
+ else:
+ raise Exception('not a valid <osd.NNN> id or number')
+ else:
+ osdid = osd_id
+ osd_metadata = self.get_osd_metadata(osdid)
+ if osd_metadata:
+ osd_device_ids = osd_metadata.get('device_ids', '')
+ if osd_device_ids:
+ result = {}
+ for osd_device_id in osd_device_ids.split(','):
+ dev_name = ''
+ if len(str(osd_device_id).split('=')) >= 2:
+ dev_name = osd_device_id.split('=')[0]
+ dev_id = osd_device_id.split('=')[1]
+ else:
+ dev_id = osd_device_id
+ if dev_name:
+ result[dev_name] = {'dev_id': dev_id}
+ return result
+
+ def get_file_systems(self):
+ return self.module.get('fs_map').get('filesystems', [])
+
+ def set_device_life_expectancy(self, device_id, from_date, to_date=None):
+ result = CommandResult('')
+
+ if to_date is None:
+ self.module.send_command(result, 'mon', '', json.dumps({
+ 'prefix': 'device set-life-expectancy',
+ 'devid': device_id,
+ 'from': from_date
+ }), '')
+ else:
+ self.module.send_command(result, 'mon', '', json.dumps({
+ 'prefix': 'device set-life-expectancy',
+ 'devid': device_id,
+ 'from': from_date,
+ 'to': to_date
+ }), '')
+ ret, outb, outs = result.wait()
+ if ret != 0:
+ self.module.log.error(
+ 'failed to set device life expectancy, %s' % outs)
+ return ret
+
+ def reset_device_life_expectancy(self, device_id):
+ result = CommandResult('')
+ self.module.send_command(result, 'mon', '', json.dumps({
+ 'prefix': 'device rm-life-expectancy',
+ 'devid': device_id
+ }), '')
+ ret, outb, outs = result.wait()
+ if ret != 0:
+ self.module.log.error(
+ 'failed to reset device life expectancy, %s' % outs)
+ return ret
+
+ def get_server(self, hostname):
+ return self.module.get_server(hostname)
+
+ def get_configuration(self, key):
+ return self.module.get_configuration(key)
+
+ def get_rate(self, svc_type, svc_name, path):
+ """returns most recent rate"""
+ data = self.module.get_counter(svc_type, svc_name, path)[path]
+
+ if data and len(data) > 1:
+ return differentiate(*data[-2:])
+ return 0.0
+
+ def get_latest(self, daemon_type, daemon_name, counter):
+ return self.module.get_latest(daemon_type, daemon_name, counter)
+
+ def get_pgs_up_by_poolid(self, poolid):
+ pgs = {}
+ try:
+ osd_map = self.module.get_osdmap()
+ if not osd_map:
+ return {}
+ pgs = osd_map.map_pool_pgs_up(int(poolid))
+ return pgs
+ except:
+ return {}
diff --git a/src/pybind/mgr/diskprediction_cloud/common/cypher.py b/src/pybind/mgr/diskprediction_cloud/common/cypher.py
new file mode 100644
index 00000000..7b7b60e5
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/common/cypher.py
@@ -0,0 +1,71 @@
+from __future__ import absolute_import
+
+import time
+
+
+class NodeInfo(object):
+ """ Neo4j Node information """
+ def __init__(self, label, domain_id, name, meta):
+ self.label = label
+ self.domain_id = domain_id
+ self.name = name
+ self.meta = meta
+
+
+class CypherOP(object):
+ """ Cypher Operation """
+
+ @staticmethod
+ def update(node, key, value, timestamp=int(time.time()*(1000**3))):
+ result = ''
+ if isinstance(node, NodeInfo):
+ if key != 'time':
+ cy_value = '\'%s\'' % value
+ else:
+ cy_value = value
+ result = \
+ 'set %s.%s=case when %s.time >= %s then %s.%s ELSE %s end' % (
+ node.label, key, node.label, timestamp, node.label, key,
+ cy_value)
+ return result
+
+ @staticmethod
+ def create_or_merge(node, timestamp=int(time.time()*(1000**3))):
+ result = ''
+ if isinstance(node, NodeInfo):
+ meta_list = []
+ if isinstance(node.meta, dict):
+ for key, value in node.meta.items():
+ meta_list.append(CypherOP.update(node, key, value, timestamp))
+ domain_id = '{domainId:\'%s\'}' % node.domain_id
+ if meta_list:
+ result = 'merge (%s:%s %s) %s %s %s' % (
+ node.label, node.label,
+ domain_id,
+ CypherOP.update(node, 'name', node.name, timestamp),
+ ' '.join(meta_list),
+ CypherOP.update(node, 'time', timestamp, timestamp))
+ else:
+ result = 'merge (%s:%s %s) %s %s' % (
+ node.label, node.label,
+ domain_id,
+ CypherOP.update(node, 'name', node.name, timestamp),
+ CypherOP.update(node, 'time', timestamp, timestamp))
+ return result
+
+ @staticmethod
+ def add_link(snode, dnode, relationship, timestamp=None):
+ result = ''
+ if timestamp is None:
+ timestamp = int(time.time()*(1000**3))
+ if isinstance(snode, NodeInfo) and isinstance(dnode, NodeInfo):
+ cy_snode = CypherOP.create_or_merge(snode, timestamp)
+ cy_dnode = CypherOP.create_or_merge(dnode, timestamp)
+ target = snode.label + dnode.label
+ link = 'merge (%s)-[%s:%s]->(%s) set %s.time=case when %s.time >= %s then %s.time ELSE %s end' % (
+ snode.label, target, relationship,
+ dnode.label, target,
+ target, timestamp,
+ target, timestamp)
+ result = '%s %s %s' % (cy_snode, cy_dnode, link)
+ return result
diff --git a/src/pybind/mgr/diskprediction_cloud/common/grpcclient.py b/src/pybind/mgr/diskprediction_cloud/common/grpcclient.py
new file mode 100644
index 00000000..5a1d5e7e
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/common/grpcclient.py
@@ -0,0 +1,242 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+import grpc
+import json
+from logging import getLogger
+import time
+
+from . import DummyResonse
+from . import client_pb2
+from . import client_pb2_grpc
+
+
+def gen_configuration(**kwargs):
+ configuration = {
+ 'host': kwargs.get('host', 'api.diskprophet.com'),
+ 'user': kwargs.get('user'),
+ 'password': kwargs.get('password'),
+ 'port': kwargs.get('port', 31400),
+ 'mgr_inst': kwargs.get('mgr_inst', None),
+ 'cert_context': kwargs.get('cert_context'),
+ 'ssl_target_name': kwargs.get('ssl_target_name', 'api.diskprophet.com'),
+ 'default_authority': kwargs.get('default_authority', 'api.diskprophet.com')}
+ return configuration
+
+
+class GRPcClient(object):
+
+ def __init__(self, configuration):
+ self.auth = None
+ self.host = configuration.get('host')
+ self.port = configuration.get('port')
+ if configuration.get('user') and configuration.get('password'):
+ self.auth = (
+ ('account', configuration.get('user')),
+ ('password', configuration.get('password')))
+ self.cert_context = configuration.get('cert_context')
+ self.ssl_target_name = configuration.get('ssl_target_name')
+ self.default_authority = configuration.get('default_authority')
+ self.mgr_inst = configuration.get('mgr_inst')
+ if self.mgr_inst:
+ self._logger = self.mgr_inst.log
+ else:
+ self._logger = getLogger()
+ self._client = self._get_channel()
+
+ def close(self):
+ if self._client:
+ self._client.close()
+
+ @staticmethod
+ def connectivity_update(connectivity):
+ pass
+
+ def _get_channel(self):
+ try:
+ creds = grpc.ssl_channel_credentials(
+ root_certificates=self.cert_context)
+ channel = \
+ grpc.secure_channel('{}:{}'.format(
+ self.host, self.port), creds,
+ options=(('grpc.ssl_target_name_override', self.ssl_target_name,),
+ ('grpc.default_authority', self.default_authority),))
+ channel.subscribe(self.connectivity_update, try_to_connect=True)
+ return channel
+ except Exception as e:
+ self._logger.error(
+ 'failed to create connection exception: {}'.format(
+ ';'.join(str(e).split('\n\t'))))
+ return None
+
+ def test_connection(self):
+ try:
+ stub_accout = client_pb2_grpc.AccountStub(self._client)
+ result = stub_accout.AccountHeartbeat(client_pb2.Empty())
+ self._logger.debug('text connection result: {}'.format(str(result)))
+ if result and "is alive" in str(result.message):
+ return True
+ else:
+ return False
+ except Exception as e:
+ self._logger.error(
+ 'failed to test connection exception: {}'.format(
+ ';'.join(str(e).split('\n\t'))))
+ return False
+
+ def _send_metrics(self, data, measurement):
+ status_info = dict()
+ status_info['measurement'] = None
+ status_info['success_count'] = 0
+ status_info['failure_count'] = 0
+ for dp_data in data:
+ d_measurement = dp_data.measurement
+ if not d_measurement:
+ status_info['measurement'] = measurement
+ else:
+ status_info['measurement'] = d_measurement
+ tag_list = []
+ field_list = []
+ for name in dp_data.tags:
+ tag = '{}={}'.format(name, dp_data.tags[name])
+ tag_list.append(tag)
+ for name in dp_data.fields:
+ if dp_data.fields[name] is None:
+ continue
+ if isinstance(dp_data.fields[name], str):
+ field = '{}=\"{}\"'.format(name, dp_data.fields[name])
+ elif isinstance(dp_data.fields[name], bool):
+ field = '{}={}'.format(name,
+ str(dp_data.fields[name]).lower())
+ elif (isinstance(dp_data.fields[name], int) or
+ isinstance(dp_data.fields[name], long)):
+ field = '{}={}i'.format(name, dp_data.fields[name])
+ else:
+ field = '{}={}'.format(name, dp_data.fields[name])
+ field_list.append(field)
+ data = '{},{} {} {}'.format(
+ status_info['measurement'],
+ ','.join(tag_list),
+ ','.join(field_list),
+ int(time.time() * 1000 * 1000 * 1000))
+ try:
+ resp = self._send_info(data=[data], measurement=status_info['measurement'])
+ status_code = resp.status_code
+ if 200 <= status_code < 300:
+ self._logger.debug(
+ '{} send diskprediction api success(ret: {})'.format(
+ status_info['measurement'], status_code))
+ status_info['success_count'] += 1
+ else:
+ self._logger.error(
+ 'return code: {}, content: {}'.format(
+ status_code, resp.content))
+ status_info['failure_count'] += 1
+ except Exception as e:
+ status_info['failure_count'] += 1
+ self._logger.error(str(e))
+ return status_info
+
+ def _send_db_relay(self, data, measurement):
+ status_info = dict()
+ status_info['measurement'] = measurement
+ status_info['success_count'] = 0
+ status_info['failure_count'] = 0
+ for dp_data in data:
+ try:
+ resp = self._send_info(
+ data=[dp_data.fields['cmd']], measurement=measurement)
+ status_code = resp.status_code
+ if 200 <= status_code < 300:
+ self._logger.debug(
+ '{} send diskprediction api success(ret: {})'.format(
+ measurement, status_code))
+ status_info['success_count'] += 1
+ else:
+ self._logger.error(
+ 'return code: {}, content: {}'.format(
+ status_code, resp.content))
+ status_info['failure_count'] += 1
+ except Exception as e:
+ status_info['failure_count'] += 1
+ self._logger.error(str(e))
+ return status_info
+
+ def send_info(self, data, measurement):
+ """
+ :param data: data structure
+ :param measurement: data measurement class name
+ :return:
+ status_info = {
+ 'success_count': <count>,
+ 'failure_count': <count>
+ }
+ """
+ if measurement == 'db_relay':
+ return self._send_db_relay(data, measurement)
+ else:
+ return self._send_metrics(data, measurement)
+
+ def _send_info(self, data, measurement):
+ resp = DummyResonse()
+ try:
+ stub_collection = client_pb2_grpc.CollectionStub(self._client)
+ if measurement == 'db_relay':
+ result = stub_collection.PostDBRelay(
+ client_pb2.PostDBRelayInput(cmds=data), metadata=self.auth)
+ else:
+ result = stub_collection.PostMetrics(
+ client_pb2.PostMetricsInput(points=data), metadata=self.auth)
+ if result and 'success' in str(result.message).lower():
+ resp.status_code = 200
+ resp.content = ''
+ else:
+ resp.status_code = 400
+ resp.content = ';'.join(str(result).split('\n\t'))
+ self._logger.error(
+ 'failed to send info: {}'.format(resp.content))
+ except Exception as e:
+ resp.status_code = 400
+ resp.content = ';'.join(str(e).split('\n\t'))
+ self._logger.error(
+ 'failed to send info exception: {}'.format(resp.content))
+ return resp
+
+ def query_info(self, host_domain_id, disk_domain_id, measurement):
+ resp = DummyResonse()
+ try:
+ stub_dp = client_pb2_grpc.DiskprophetStub(self._client)
+ predicted = stub_dp.DPGetDisksPrediction(
+ client_pb2.DPGetDisksPredictionInput(
+ physicalDiskIds=disk_domain_id),
+ metadata=self.auth)
+ if predicted and hasattr(predicted, 'data'):
+ resp.status_code = 200
+ resp.content = ''
+ resp_json = json.loads(predicted.data)
+ rc = resp_json.get('results', [])
+ if rc:
+ series = rc[0].get('series', [])
+ if series:
+ values = series[0].get('values', [])
+ if not values:
+ resp.resp_json = {}
+ else:
+ columns = series[0].get('columns', [])
+ for item in values:
+ # get prediction key and value from server.
+ for name, value in zip(columns, item):
+ # process prediction data
+ resp.resp_json[name] = value
+ self._logger.debug("query {}:{} result:{}".format(host_domain_id, disk_domain_id, resp))
+ return resp
+ else:
+ resp.status_code = 400
+ resp.content = ''
+ resp.resp_json = {'error': ';'.join(str(predicted).split('\n\t'))}
+ self._logger.debug("query {}:{} result:{}".format(host_domain_id, disk_domain_id, resp))
+ return resp
+ except Exception as e:
+ resp.status_code = 400
+ resp.content = ';'.join(str(e).split('\n\t'))
+ resp.resp_json = {'error': resp.content}
+ self._logger.debug("query {}:{} result:{}".format(host_domain_id, disk_domain_id, resp))
+ return resp
diff --git a/src/pybind/mgr/diskprediction_cloud/common/server.crt b/src/pybind/mgr/diskprediction_cloud/common/server.crt
new file mode 100644
index 00000000..d72c9d2f
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/common/server.crt
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE-----
+MIICsjCCAZoCCQCKLjrHOzCTrDANBgkqhkiG9w0BAQsFADAbMRkwFwYDVQQDDBBh
+cGkuZmVkZXJhdG9yLmFpMB4XDTE4MDgwMjA2NDg0N1oXDTI4MDczMDA2NDg0N1ow
+GzEZMBcGA1UEAwwQYXBpLmZlZGVyYXRvci5haTCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBAJkDL/VoLbI+Rc1GXkZwpN8n4e7HhIk1iK98yhXegoH8g6ZZ
+uVVlUW/zNO0V5W9IgiSBqEWOEf9heWj7mIbbxl437W1LpR4V0LKR2dbY7ZMwlB3L
+ZJYxtziZYu1g4Fn9hDnVJIXVmXFpF62wHd2ZSY7FyUF/OGetxLSfoOMkTHY8A8HB
+92vQfoFjgx1e23lLgTO2VpucmU/qXiF+xI/K6kkrMnGJi4xBL29i3aKRRNktVUHf
+Zs6JhBKl4sbvkW5m5AECW4c0XxVJotTLoPUjx4rxp0k5S1aQSYSS+0z96eVY0w8J
+ungiWEj7lLqwEGKjOzfjDLsczZIcZZcQSQwb3qcCAwEAATANBgkqhkiG9w0BAQsF
+AAOCAQEADwfBrHsvPmUD8CTx8lpVcqrOlHc7ftW3hb11vWwwfJw4fBiJ8DoB496x
+SAP2CJyDnSLdyvVueKLjiRFBm96W76nbMeP9+CkktGRUbLjkByv/v+7WSxRrukDC
+yR6IXqQJe4ADcYkVYoUMx3frBQzFtS7hni0FPvl3AN55TvTXqed61CdN9zdw9Ezn
+yn0oy3BbT5h/zNHefTQBzgQhW62C5YdTRtS6VVWV/k1kLz0GVG1eMtAqueUCxFeM
+g1mXYz2/Cm5C8pszZfiP+a/QV1z/3QgRUp0i0yVLiteqNDCPv6bc767VQEuXok9p
+NDuKElVxdA0WD9cbnBXiyfeMOQnjQw==
+-----END CERTIFICATE-----
diff --git a/src/pybind/mgr/diskprediction_cloud/module.py b/src/pybind/mgr/diskprediction_cloud/module.py
new file mode 100644
index 00000000..5052d55e
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/module.py
@@ -0,0 +1,454 @@
+"""
+diskprediction with cloud predictor
+"""
+from __future__ import absolute_import
+
+import base64
+from datetime import datetime
+import errno
+import json
+from mgr_module import MgrModule
+import os
+from threading import Event
+
+try:
+ from string import maketrans
+except ImportError:
+ maketrans = str.maketrans
+
+from .common import DP_MGR_STAT_ENABLED, DP_MGR_STAT_DISABLED
+from .task import MetricsRunner, SmartRunner, PredictRunner, TestRunner
+
+TIME_DAYS = 24*60*60
+TIME_WEEK = TIME_DAYS * 7
+DP_AGENTS = [MetricsRunner, SmartRunner, PredictRunner]
+CUSTOMER_ALPHABET = "ABCDEFG&HIJKLMN@OQRS.TUV(WXYZabcd)efghijlmn-opqrstu*vwxyz0123=45"
+ORIGIN_ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
+
+
+def get_transtable():
+ transtable = maketrans(ORIGIN_ALPHABET, CUSTOMER_ALPHABET)
+ return transtable
+
+
+def get_reverse_transtable():
+ transtable = maketrans(CUSTOMER_ALPHABET, ORIGIN_ALPHABET)
+ return transtable
+
+
+def encode_string(value):
+ if len(value) == 0:
+ return ""
+ transtable = get_transtable()
+ e = str((base64.b64encode(str(value).encode())).decode("utf-8"))
+ e = e.rstrip("=")
+ return e.translate(transtable)
+
+
+class Module(MgrModule):
+
+ MODULE_OPTIONS = [
+ {
+ 'name': 'diskprediction_server',
+ 'default': ''
+ },
+ {
+ 'name': 'diskprediction_port',
+ 'default': '31400'
+ },
+ {
+ 'name': 'diskprediction_user',
+ 'default': ''
+ },
+ {
+ 'name': 'diskprediction_password',
+ 'default': ''
+ },
+ {
+ 'name': 'diskprediction_upload_metrics_interval',
+ 'default': '600'
+ },
+ {
+ 'name': 'diskprediction_upload_smart_interval',
+ 'default': '43200'
+ },
+ {
+ 'name': 'diskprediction_retrieve_prediction_interval',
+ 'default': '43200'
+ },
+ {
+ 'name': 'diskprediction_cert_context',
+ 'default': ''
+ },
+ {
+ 'name': 'diskprediction_ssl_target_name_override',
+ 'default': 'localhost'
+ },
+ {
+ 'name': 'diskprediction_default_authority',
+ 'default': 'localhost'
+ },
+ {
+ 'name': 'sleep_interval',
+ 'default': str(600),
+ }
+ ]
+
+ COMMANDS = [
+ {
+ 'cmd': 'device show-prediction-config',
+ 'desc': 'Prints diskprediction configuration',
+ 'perm': 'r'
+ },
+ {
+ 'cmd': 'device set-cloud-prediction-config '
+ 'name=server,type=CephString,req=true '
+ 'name=user,type=CephString,req=true '
+ 'name=password,type=CephString,req=true '
+ 'name=certfile,type=CephString,req=true '
+ 'name=port,type=CephString,req=false ',
+ 'desc': 'Configure Disk Prediction service',
+ 'perm': 'rw'
+ },
+ {
+ 'cmd': 'device debug metrics-forced',
+ 'desc': 'Run metrics agent forced',
+ 'perm': 'r'
+ },
+ {
+ 'cmd': 'device debug smart-forced',
+ 'desc': 'Run smart agent forced',
+ 'perm': 'r'
+ },
+ {
+ 'cmd': 'diskprediction_cloud status',
+ 'desc': 'Check diskprediction_cloud status',
+ 'perm': 'r'
+ }
+ ]
+
+ def __init__(self, *args, **kwargs):
+ super(Module, self).__init__(*args, **kwargs)
+ self.status = {'status': DP_MGR_STAT_DISABLED}
+ self._event = Event()
+ self._predict_event = Event()
+ self._agents = []
+ self._activated_cloud = False
+ self.prediction_result = {}
+ self.config = dict()
+ self._run = True
+
+ def config_notify(self):
+ for opt in self.MODULE_OPTIONS:
+ setattr(self,
+ opt['name'],
+ self.get_module_option(opt['name']))
+ self.log.debug(' %s = %s', opt['name'], getattr(self, opt['name']))
+ if not self._activated_cloud and self.get_ceph_option('device_failure_prediction_mode') == 'cloud':
+ self._event.set()
+ if self._activated_cloud and self.get_ceph_option('device_failure_prediction_mode') != 'cloud':
+ self._event.set()
+
+ @property
+ def config_keys(self):
+ return dict((o['name'], o.get('default', None)) for o in self.MODULE_OPTIONS)
+
+ def set_config_option(self, option, value):
+ if option not in self.config_keys.keys():
+ raise RuntimeError('{0} is a unknown configuration '
+ 'option'.format(option))
+
+ if option in ['diskprediction_port',
+ 'diskprediction_upload_metrics_interval',
+ 'diskprediction_upload_smart_interval',
+ 'diskprediction_retrieve_prediction_interval']:
+ if not str(value).isdigit():
+ raise RuntimeError('invalid {} configured. Please specify '
+ 'a valid integer {}'.format(option, value))
+
+ self.log.debug('Setting in-memory config option %s to: %s', option,
+ value)
+ self.set_module_option(option, value)
+ self.config[option] = value
+
+ return True
+
+ def get_configuration(self, key):
+ return self.get_module_option(key, self.config_keys[key])
+
+ @staticmethod
+ def _convert_timestamp(predicted_timestamp, life_expectancy_day):
+ """
+ :param predicted_timestamp: unit is nanoseconds
+ :param life_expectancy_day: unit is seconds
+ :return:
+ date format '%Y-%m-%d' ex. 2018-01-01
+ """
+ return datetime.fromtimestamp(
+ predicted_timestamp / (1000 ** 3) + life_expectancy_day).strftime('%Y-%m-%d')
+
+ def _show_prediction_config(self, cmd):
+ self.show_module_config()
+ return 0, json.dumps(self.config, indent=4), ''
+
+ def _set_ssl_target_name(self, cmd):
+ str_ssl_target = cmd.get('ssl_target_name', '')
+ try:
+ self.set_module_option('diskprediction_ssl_target_name_override', str_ssl_target)
+ return (0,
+ 'success to config ssl target name', 0)
+ except Exception as e:
+ return -errno.EINVAL, '', str(e)
+
+ def _set_ssl_default_authority(self, cmd):
+ str_ssl_authority = cmd.get('ssl_authority', '')
+ try:
+ self.set_module_option('diskprediction_default_authority', str_ssl_authority)
+ return 0, 'success to config ssl default authority', 0
+ except Exception as e:
+ return -errno.EINVAL, '', str(e)
+
+ def _set_cloud_prediction_config(self, cmd):
+ str_cert_path = cmd.get('certfile', '')
+ if os.path.exists(str_cert_path):
+ with open(str_cert_path, 'rb') as f:
+ trusted_certs = f.read()
+ self.set_config_option(
+ 'diskprediction_cert_context', trusted_certs)
+ for _agent in self._agents:
+ _agent.event.set()
+ self.set_module_option('diskprediction_server', cmd['server'])
+ self.set_module_option('diskprediction_user', cmd['user'])
+ self.set_module_option('diskprediction_password', encode_string(cmd['password']))
+ if cmd.get('port'):
+ self.set_module_option('diskprediction_port', cmd['port'])
+ return 0, 'succeed to config cloud mode connection', ''
+ else:
+ return -errno.EINVAL, '', 'certification file not existed'
+
+ def _debug_metrics_forced(self, cmd):
+ msg = ''
+ for _agent in self._agents:
+ if isinstance(_agent, MetricsRunner):
+ msg = 'run metrics agent successfully'
+ _agent.event.set()
+ return 0, msg, ''
+
+ def _debug_smart_forced(self, cmd):
+ msg = ' '
+ for _agent in self._agents:
+ if isinstance(_agent, SmartRunner):
+ msg = 'run smart agent successfully'
+ _agent.event.set()
+ return 0, msg, ''
+
+ def refresh_config(self):
+ for opt in self.MODULE_OPTIONS:
+ setattr(self,
+ opt['name'],
+ self.get_module_option(opt['name']))
+ self.log.debug(' %s = %s', opt['name'], getattr(self, opt['name']))
+
+ def _status(self, cmd):
+ return 0, json.dumps(self.status), ''
+
+ def _refresh_cloud_prediction_result(self):
+ for _agent in self._agents:
+ if isinstance(_agent, PredictRunner):
+ self._predict_event.clear()
+ _agent.event.set()
+ self._predict_event.wait(300)
+ if self._predict_event.is_set():
+ self._predict_event.clear()
+ break
+
+ def predict_life_expectancy(self, devid):
+ assert devid
+ result = self.get('device {}'.format(devid))
+ if not result:
+ return -1, '', 'device {} not found'.format(devid)
+ dev_info = result.get('device', {})
+ if not dev_info:
+ return -1, '', 'device {} not found'.format(devid)
+ self._refresh_cloud_prediction_result()
+ prediction_data = self.prediction_result.get(devid)
+ if not prediction_data:
+ return -1, '', 'device {} prediction data not ready'.format(devid)
+ elif prediction_data.get('near_failure', '').lower() == 'good':
+ return 0, '>6w', ''
+ elif prediction_data.get('near_failure', '').lower() == 'warning':
+ return 0, '>=2w and <=6w', ''
+ elif prediction_data.get('near_failure', '').lower() == 'bad':
+ return 0, '<2w', ''
+ else:
+ return 0, 'unknown', ''
+
+ def _update_device_life_expectancy_day(self, devid, prediction):
+ # Update osd life-expectancy
+ from .common.clusterdata import ClusterAPI
+ predicted = None
+ life_expectancy_day_min = None
+ life_expectancy_day_max = None
+ if prediction.get('predicted'):
+ predicted = int(prediction['predicted'])
+ if prediction.get('near_failure'):
+ if prediction['near_failure'].lower() == 'good':
+ life_expectancy_day_min = (TIME_WEEK * 6) + TIME_DAYS
+ life_expectancy_day_max = None
+ elif prediction['near_failure'].lower() == 'warning':
+ life_expectancy_day_min = (TIME_WEEK * 2)
+ life_expectancy_day_max = (TIME_WEEK * 6)
+ elif prediction['near_failure'].lower() == 'bad':
+ life_expectancy_day_min = 0
+ life_expectancy_day_max = (TIME_WEEK * 2) - TIME_DAYS
+ else:
+ # Near failure state is unknown.
+ predicted = None
+ life_expectancy_day_min = None
+ life_expectancy_day_max = None
+
+ obj_api = ClusterAPI(self)
+ if predicted and devid and life_expectancy_day_min is not None:
+ from_date = None
+ to_date = None
+ try:
+ if life_expectancy_day_min is not None:
+ from_date = self._convert_timestamp(predicted, life_expectancy_day_min)
+
+ if life_expectancy_day_max is not None:
+ to_date = self._convert_timestamp(predicted, life_expectancy_day_max)
+
+ obj_api.set_device_life_expectancy(devid, from_date, to_date)
+ self.log.info(
+ 'succeed to set device {} life expectancy from: {}, to: {}'.format(
+ devid, from_date, to_date))
+ except Exception as e:
+ self.log.error(
+ 'failed to set device {} life expectancy from: {}, to: {}, {}'.format(
+ devid, from_date, to_date, str(e)))
+ else:
+ obj_api.reset_device_life_expectancy(devid)
+
+ def predict_all_devices(self):
+ if not self._activated_cloud:
+ return -1, '', 'diskprecition_cloud not ready'
+ self.refresh_config()
+ result = self.get('devices')
+ if not result:
+ return -1, '', 'unable to get all devices for prediction'
+ self._refresh_cloud_prediction_result()
+ for dev in result.get('devices', []):
+ devid = dev.get('devid')
+ if not devid:
+ continue
+ prediction_data = self.prediction_result.get(devid)
+ if prediction_data:
+ break
+ if not prediction_data:
+ return -1, '', 'device {} prediction data not ready'.format(dev.get('devid'))
+ else:
+ self._update_device_life_expectancy_day(dev.get('devid'), prediction_data)
+ return 0, '', ''
+
+ def handle_command(self, _, cmd):
+ for o_cmd in self.COMMANDS:
+ if cmd['prefix'] == o_cmd['cmd'][:len(cmd['prefix'])]:
+ fun_name = ''
+ avgs = o_cmd['cmd'].split(' ')
+ for avg in avgs:
+ if avg.lower() == 'diskprediction_cloud':
+ continue
+ if avg.lower() == 'device':
+ continue
+ if '=' in avg or ',' in avg or not avg:
+ continue
+ fun_name += '_%s' % avg.replace('-', '_')
+ if fun_name:
+ fun = getattr(
+ self, fun_name)
+ if fun:
+ return fun(cmd)
+ return -errno.EINVAL, '', 'cmd not found'
+
+ def show_module_config(self):
+ for key, default in self.config_keys.items():
+ self.set_config_option(key, self.get_module_option(key, default))
+
+ def serve(self):
+ self.log.info('Starting diskprediction module')
+ self.config_notify()
+ self.status = {'status': DP_MGR_STAT_ENABLED}
+
+ while self._run:
+ self.refresh_config()
+ mode = self.get_ceph_option('device_failure_prediction_mode')
+ if mode == 'cloud':
+ if not self._activated_cloud:
+ self.start_cloud_disk_prediction()
+ else:
+ if self._activated_cloud:
+ self.stop_disk_prediction()
+
+ # Check agent hang is?
+ restart_agent = False
+ try:
+ for dp_agent in self._agents:
+ if dp_agent.is_timeout():
+ self.log.error('agent name: {] timeout'.format(dp_agent.task_name))
+ restart_agent = True
+ break
+ except Exception as IOError:
+ self.log.error('disk prediction plugin failed to started and try to restart')
+ restart_agent = True
+
+ if restart_agent:
+ self.stop_disk_prediction()
+ else:
+ sleep_interval = int(self.sleep_interval) or 60
+ self._event.wait(sleep_interval)
+ self._event.clear()
+ self.stop_disk_prediction()
+
+ def _agent_call_back(self):
+ self.log.debug('notify refresh devices prediction result')
+ self._predict_event.set()
+
+ def start_cloud_disk_prediction(self):
+ assert not self._activated_cloud
+ for dp_agent in DP_AGENTS:
+ if dp_agent == PredictRunner:
+ obj_agent = dp_agent(self, 300, self._agent_call_back)
+ else:
+ obj_agent = dp_agent(self, 300)
+ if obj_agent:
+ obj_agent.start()
+ else:
+ raise Exception('failed to start task %s' % obj_agent.task_name)
+ self._agents.append(obj_agent)
+ self._activated_cloud = True
+ self.log.info('start cloud disk prediction')
+
+ def stop_disk_prediction(self):
+ assert self._activated_cloud
+ try:
+ self.status = {'status': DP_MGR_STAT_DISABLED}
+ while self._agents:
+ dp_agent = self._agents.pop()
+ self.log.info('agent name: {}'.format(dp_agent.task_name))
+ dp_agent.terminate()
+ dp_agent.join(5)
+ del dp_agent
+ self._activated_cloud = False
+ self.log.info('stop disk prediction')
+ except Exception as IOError:
+ self.log.error('failed to stop disk prediction clould plugin')
+
+ def shutdown(self):
+ self._run = False
+ self._event.set()
+ super(Module, self).shutdown()
+
+ def self_test(self):
+ obj_test = TestRunner(self)
+ obj_test.run()
+ self.log.info('self test completed')
diff --git a/src/pybind/mgr/diskprediction_cloud/requirements.txt b/src/pybind/mgr/diskprediction_cloud/requirements.txt
new file mode 100644
index 00000000..4abc346a
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/requirements.txt
@@ -0,0 +1,12 @@
+google-api-python-client==1.7.3
+google-auth==1.5.0
+google-auth-httplib2==0.0.3
+googleapis-common-protos==1.5.3
+grpc==0.3.post19
+grpc-google-logging-v2==0.8.1
+grpc-google-pubsub-v1==0.8.1
+grpcio==1.14.1
+mock==2.0.0
+numpy==1.15.1
+scikit-learn==0.19.2
+scipy==1.1.0
diff --git a/src/pybind/mgr/diskprediction_cloud/task.py b/src/pybind/mgr/diskprediction_cloud/task.py
new file mode 100644
index 00000000..6ed04e60
--- /dev/null
+++ b/src/pybind/mgr/diskprediction_cloud/task.py
@@ -0,0 +1,181 @@
+from __future__ import absolute_import
+
+import time
+from threading import Event, Thread
+
+from .agent.predictor import PredictAgent
+from .agent.metrics.ceph_cluster import CephClusterAgent
+from .agent.metrics.ceph_mon_osd import CephMonOsdAgent
+from .agent.metrics.ceph_pool import CephPoolAgent
+from .agent.metrics.db_relay import DBRelayAgent
+from .agent.metrics.sai_agent import SAIAgent
+from .agent.metrics.sai_cluster import SAICluserAgent
+from .agent.metrics.sai_disk import SAIDiskAgent
+from .agent.metrics.sai_disk_smart import SAIDiskSmartAgent
+from .agent.metrics.sai_host import SAIHostAgent
+from .common import DP_MGR_STAT_FAILED, DP_MGR_STAT_OK, DP_MGR_STAT_WARNING
+
+
+class AgentRunner(Thread):
+
+ task_name = ''
+ interval_key = ''
+ agents = []
+
+ def __init__(self, mgr_module, agent_timeout=60, call_back=None):
+ """
+
+ :param mgr_module: parent ceph mgr module
+ :param agent_timeout: (unit seconds) agent execute timeout value, default: 60 secs
+ """
+ Thread.__init__(self)
+ self._agent_timeout = agent_timeout
+ self._module_inst = mgr_module
+ self._log = mgr_module.log
+ self._start_time = time.time()
+ self._th = None
+ self._call_back = call_back
+ self.exit = False
+ self.event = Event()
+ self.task_interval = \
+ int(self._module_inst.get_configuration(self.interval_key))
+
+ def terminate(self):
+ self.exit = True
+ self.event.set()
+ self._log.info('PDS terminate %s complete' % self.task_name)
+
+ def run(self):
+ self._start_time = time.time()
+ self._log.info(
+ 'start %s, interval: %s'
+ % (self.task_name, self.task_interval))
+ while not self.exit:
+ self.run_agents()
+ if self._call_back:
+ self._call_back()
+ if self.event:
+ self.event.wait(int(self.task_interval))
+ self.event.clear()
+ self._log.info(
+ 'completed %s(%s)' % (self.task_name, time.time()-self._start_time))
+
+ def run_agents(self):
+ obj_sender = None
+ try:
+ self._log.debug('run_agents %s' % self.task_name)
+ from .common.grpcclient import GRPcClient, gen_configuration
+ conf = gen_configuration(
+ host=self._module_inst.get_configuration('diskprediction_server'),
+ user=self._module_inst.get_configuration('diskprediction_user'),
+ password=self._module_inst.get_configuration(
+ 'diskprediction_password'),
+ port=self._module_inst.get_configuration('diskprediction_port'),
+ cert_context=self._module_inst.get_configuration('diskprediction_cert_context'),
+ mgr_inst=self._module_inst,
+ ssl_target_name=self._module_inst.get_configuration('diskprediction_ssl_target_name_override'),
+ default_authority=self._module_inst.get_configuration('diskprediction_default_authority'))
+ obj_sender = GRPcClient(conf)
+ if not obj_sender:
+ self._log.error('invalid diskprediction sender')
+ self._module_inst.status = \
+ {'status': DP_MGR_STAT_FAILED,
+ 'reason': 'invalid diskprediction sender'}
+ raise Exception('invalid diskprediction sender')
+ if obj_sender.test_connection():
+ self._module_inst.status = {'status': DP_MGR_STAT_OK}
+ self._log.debug('succeed to test connection')
+ self._run(self._module_inst, obj_sender)
+ else:
+ self._log.error('failed to test connection')
+ self._module_inst.status = \
+ {'status': DP_MGR_STAT_FAILED,
+ 'reason': 'failed to test connection'}
+ raise Exception('failed to test connection')
+ except Exception as e:
+ self._module_inst.status = \
+ {'status': DP_MGR_STAT_FAILED,
+ 'reason': 'failed to start %s agents, %s'
+ % (self.task_name, str(e))}
+ self._log.error(
+ 'failed to start %s agents, %s' % (self.task_name, str(e)))
+ raise
+ finally:
+ if obj_sender:
+ obj_sender.close()
+
+ def is_timeout(self):
+ now = time.time()
+ if (now - self._start_time) > self._agent_timeout:
+ return True
+ else:
+ return False
+
+ def _run(self, module_inst, sender):
+ self._log.debug('%s run' % self.task_name)
+ for agent in self.agents:
+ self._start_time = time.time()
+ retry_count = 3
+ while retry_count:
+ retry_count -= 1
+ try:
+ obj_agent = agent(module_inst, sender, self._agent_timeout)
+ obj_agent.run()
+ del obj_agent
+ break
+ except Exception as e:
+ if str(e).find('configuring') >= 0:
+ self._log.debug(
+ 'failed to execute {}, {}, retry again.'.format(
+ agent.measurement, str(e)))
+ time.sleep(1)
+ continue
+ else:
+ module_inst.status = \
+ {'status': DP_MGR_STAT_WARNING,
+ 'reason': 'failed to execute {}, {}'.format(
+ agent.measurement, ';'.join(str(e).split('\n\t')))}
+ self._log.warning(
+ 'failed to execute {}, {}'.format(
+ agent.measurement, ';'.join(str(e).split('\n\t'))))
+ break
+
+
+class MetricsRunner(AgentRunner):
+
+ task_name = 'Metrics Agent'
+ interval_key = 'diskprediction_upload_metrics_interval'
+ agents = [CephClusterAgent, CephMonOsdAgent, CephPoolAgent,
+ SAICluserAgent, SAIDiskAgent, SAIHostAgent, DBRelayAgent,
+ SAIAgent]
+
+
+class PredictRunner(AgentRunner):
+
+ task_name = 'Predictor Agent'
+ interval_key = 'diskprediction_retrieve_prediction_interval'
+ agents = [PredictAgent]
+
+
+class SmartRunner(AgentRunner):
+
+ task_name = 'Smart data Agent'
+ interval_key = 'diskprediction_upload_smart_interval'
+ agents = [SAIDiskSmartAgent]
+
+
+class TestRunner(object):
+ task_name = 'Test Agent'
+ interval_key = 'diskprediction_upload_metrics_interval'
+ agents = [CephClusterAgent, CephMonOsdAgent, CephPoolAgent,
+ SAICluserAgent, SAIDiskAgent, SAIHostAgent, DBRelayAgent,
+ SAIAgent, SAIDiskSmartAgent]
+
+ def __init__(self, mgr_module):
+ self._module_inst = mgr_module
+
+ def run(self):
+ for agent in self.agents:
+ obj_agent = agent(self._module_inst, None)
+ obj_agent.run()
+ del obj_agent