summaryrefslogtreecommitdiffstats
path: root/src/rocksdb/tools/advisor/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/rocksdb/tools/advisor/test')
-rw-r--r--src/rocksdb/tools/advisor/test/__init__.py0
-rw-r--r--src/rocksdb/tools/advisor/test/input_files/LOG-030
-rw-r--r--src/rocksdb/tools/advisor/test/input_files/LOG-125
-rw-r--r--src/rocksdb/tools/advisor/test/input_files/OPTIONS-00000549
-rw-r--r--src/rocksdb/tools/advisor/test/input_files/log_stats_parser_keys_ts3
-rw-r--r--src/rocksdb/tools/advisor/test/input_files/rules_err1.ini56
-rw-r--r--src/rocksdb/tools/advisor/test/input_files/rules_err2.ini15
-rw-r--r--src/rocksdb/tools/advisor/test/input_files/rules_err3.ini15
-rw-r--r--src/rocksdb/tools/advisor/test/input_files/rules_err4.ini15
-rw-r--r--src/rocksdb/tools/advisor/test/input_files/test_rules.ini47
-rw-r--r--src/rocksdb/tools/advisor/test/input_files/triggered_rules.ini83
-rw-r--r--src/rocksdb/tools/advisor/test/test_db_bench_runner.py147
-rw-r--r--src/rocksdb/tools/advisor/test/test_db_log_parser.py103
-rw-r--r--src/rocksdb/tools/advisor/test/test_db_options_parser.py216
-rw-r--r--src/rocksdb/tools/advisor/test/test_db_stats_fetcher.py126
-rw-r--r--src/rocksdb/tools/advisor/test/test_rule_parser.py234
16 files changed, 1164 insertions, 0 deletions
diff --git a/src/rocksdb/tools/advisor/test/__init__.py b/src/rocksdb/tools/advisor/test/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/rocksdb/tools/advisor/test/__init__.py
diff --git a/src/rocksdb/tools/advisor/test/input_files/LOG-0 b/src/rocksdb/tools/advisor/test/input_files/LOG-0
new file mode 100644
index 000000000..3c9d51641
--- /dev/null
+++ b/src/rocksdb/tools/advisor/test/input_files/LOG-0
@@ -0,0 +1,30 @@
+2018/05/25-14:30:05.601692 7f82bd676200 RocksDB version: 5.14.0
+2018/05/25-14:30:07.626719 7f82ba72e700 (Original Log Time 2018/05/25-14:30:07.621966) [db/db_impl_compaction_flush.cc:1424] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
+2018/05/25-14:30:07.626725 7f82ba72e700 [db/flush_job.cc:301] [default] [JOB 3] Flushing memtable with next log file: 8
+2018/05/25-14:30:07.626738 7f82ba72e700 EVENT_LOG_v1 {"time_micros": 1527283807626732, "job": 3, "event": "flush_started", "num_memtables": 1, "num_entries": 28018, "num_deletes": 0, "memory_usage": 4065512, "flush_reason": "Write Buffer Full"}
+2018/05/25-14:30:07.626740 7f82ba72e700 [db/flush_job.cc:331] [default] [JOB 3] Level-0 flush table #10: started
+2018/05/25-14:30:07.764232 7f82b2f20700 [db/db_impl_write.cc:1373] [default] New memtable created with log file: #11. Immutable memtables: 1.
+2018/05/25-14:30:07.764240 7f82b2f20700 [WARN] [db/column_family.cc:743] [default] Stopping writes because we have 2 immutable memtables (waiting for flush), max_write_buffer_number is set to 2
+2018/05/23-11:53:12.800143 7f9f36b40700 [WARN] [db/column_family.cc:799] [default] Stalling writes because we have 4 level-0 files rate 39886
+2018/05/23-11:53:12.800143 7f9f36b40700 [WARN] [db/column_family.cc:799] [default] Stopping writes because we have 4 level-0 files rate 39886
+2018/05/25-14:30:09.398302 7f82ba72e700 EVENT_LOG_v1 {"time_micros": 1527283809398276, "cf_name": "default", "job": 3, "event": "table_file_creation", "file_number": 10, "file_size": 1890434, "table_properties": {"data_size": 1876749, "index_size": 23346, "filter_size": 0, "raw_key_size": 663120, "raw_average_key_size": 24, "raw_value_size": 2763000, "raw_average_value_size": 100, "num_data_blocks": 838, "num_entries": 27630, "filter_policy_name": "", "kDeletedKeys": "0", "kMergeOperands": "0"}}
+2018/05/25-14:30:09.398351 7f82ba72e700 [db/flush_job.cc:371] [default] [JOB 3] Level-0 flush table #10: 1890434 bytes OK
+2018/05/25-14:30:25.491635 7f82ba72e700 [db/flush_job.cc:331] [default] [JOB 10] Level-0 flush table #23: started
+2018/05/25-14:30:25.643618 7f82b2f20700 [db/db_impl_write.cc:1373] [default] New memtable created with log file: #24. Immutable memtables: 1.
+2018/05/25-14:30:25.643633 7f82b2f20700 [WARN] [db/column_family.cc:743] [default] Stopping writes because we have 2 immutable memtables (waiting for flush), max_write_buffer_number is set to 2
+2018/05/25-14:30:27.288181 7f82ba72e700 EVENT_LOG_v1 {"time_micros": 1527283827288158, "cf_name": "default", "job": 10, "event": "table_file_creation", "file_number": 23, "file_size": 1893200, "table_properties": {"data_size": 1879460, "index_size": 23340, "filter_size": 0, "raw_key_size": 663360, "raw_average_key_size": 24, "raw_value_size": 2764000, "raw_average_value_size": 100, "num_data_blocks": 838, "num_entries": 27640, "filter_policy_name": "", "kDeletedKeys": "0", "kMergeOperands": "0"}}
+2018/05/25-14:30:27.288210 7f82ba72e700 [db/flush_job.cc:371] [default] [JOB 10] Level-0 flush table #23: 1893200 bytes OK
+2018/05/25-14:30:27.289353 7f82ba72e700 [WARN] [db/column_family.cc:764] [default] Stalling writes because of estimated pending compaction bytes 14410584
+2018/05/25-14:30:27.289390 7f82ba72e700 (Original Log Time 2018/05/25-14:30:27.288829) [db/memtable_list.cc:377] [default] Level-0 commit table #23 started
+2018/05/25-14:30:27.289393 7f82ba72e700 (Original Log Time 2018/05/25-14:30:27.289332) [db/memtable_list.cc:409] [default] Level-0 commit table #23: memtable #1 done
+2018/05/25-14:34:21.047206 7f82ba72e700 EVENT_LOG_v1 {"time_micros": 1527284061047181, "cf_name": "default", "job": 44, "event": "table_file_creation", "file_number": 84, "file_size": 1890780, "table_properties": {"data_size": 1877100, "index_size": 23309, "filter_size": 0, "raw_key_size": 662808, "raw_average_key_size": 24, "raw_value_size": 2761700, "raw_average_value_size": 100, "num_data_blocks": 837, "num_entries": 27617, "filter_policy_name": "", "kDeletedKeys": "0", "kMergeOperands": "0"}}
+2018/05/25-14:34:21.047233 7f82ba72e700 [db/flush_job.cc:371] [default] [JOB 44] Level-0 flush table #84: 1890780 bytes OK
+2018/05/25-14:34:21.048017 7f82ba72e700 (Original Log Time 2018/05/25-14:34:21.048005) EVENT_LOG_v1 {"time_micros": 1527284061047997, "job": 44, "event": "flush_finished", "output_compression": "Snappy", "lsm_state": [2, 1, 0, 0, 0, 0, 0], "immutable_memtables": 1}
+2018/05/25-14:34:21.048592 7f82bd676200 [DEBUG] [db/db_impl_files.cc:261] [JOB 45] Delete /tmp/rocksdbtest-155919/dbbench/000084.sst type=2 #84 -- OK
+2018/05/25-14:34:21.048603 7f82bd676200 EVENT_LOG_v1 {"time_micros": 1527284061048600, "job": 45, "event": "table_file_deletion", "file_number": 84}
+2018/05/25-14:34:21.048981 7f82bd676200 [db/db_impl.cc:398] Shutdown complete
+2018/05/25-14:34:21.049000 7f82bd676200 [db/db_impl.cc:563] [col-fam-A] random log message for testing
+2018/05/25-14:34:21.049010 7f82bd676200 [db/db_impl.cc:234] [col-fam-B] log continuing on next line
+remaining part of the log
+2018/05/25-14:34:21.049020 7f82bd676200 [db/db_impl.cc:653] [col-fam-A] another random log message
+2018/05/25-14:34:21.049025 7f82bd676200 [db/db_impl.cc:331] [unknown] random log message no column family
diff --git a/src/rocksdb/tools/advisor/test/input_files/LOG-1 b/src/rocksdb/tools/advisor/test/input_files/LOG-1
new file mode 100644
index 000000000..b163f9a99
--- /dev/null
+++ b/src/rocksdb/tools/advisor/test/input_files/LOG-1
@@ -0,0 +1,25 @@
+2018/05/25-14:30:05.601692 7f82bd676200 RocksDB version: 5.14.0
+2018/05/25-14:30:07.626719 7f82ba72e700 (Original Log Time 2018/05/25-14:30:07.621966) [db/db_impl_compaction_flush.cc:1424] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
+2018/05/25-14:30:07.626725 7f82ba72e700 [db/flush_job.cc:301] [default] [JOB 3] Flushing memtable with next log file: 8
+2018/05/25-14:30:07.626738 7f82ba72e700 EVENT_LOG_v1 {"time_micros": 1527283807626732, "job": 3, "event": "flush_started", "num_memtables": 1, "num_entries": 28018, "num_deletes": 0, "memory_usage": 4065512, "flush_reason": "Write Buffer Full"}
+2018/05/25-14:30:07.626740 7f82ba72e700 [db/flush_job.cc:331] [default] [JOB 3] Level-0 flush table #10: started
+2018/05/25-14:30:07.764232 7f82b2f20700 [db/db_impl_write.cc:1373] [default] New memtable created with log file: #11. Immutable memtables: 1.
+2018/05/25-14:30:07.764240 7f82b2f20700 [WARN] [db/column_family.cc:743] [default] Stopping writes because we have 2 immutable memtables (waiting for flush), max_write_buffer_number is set to 2
+2018/05/23-11:53:12.800143 7f9f36b40700 [WARN] [db/column_family.cc:799] [default] Stalling writes because we have 4 level-0 files rate 39886
+2018/05/23-11:53:12.800143 7f9f36b40700 [WARN] [db/column_family.cc:799] [default] Stopping writes because we have 4 level-0 files rate 39886
+2018/05/25-14:30:09.398302 7f82ba72e700 EVENT_LOG_v1 {"time_micros": 1527283809398276, "cf_name": "default", "job": 3, "event": "table_file_creation", "file_number": 10, "file_size": 1890434, "table_properties": {"data_size": 1876749, "index_size": 23346, "filter_size": 0, "raw_key_size": 663120, "raw_average_key_size": 24, "raw_value_size": 2763000, "raw_average_value_size": 100, "num_data_blocks": 838, "num_entries": 27630, "filter_policy_name": "", "kDeletedKeys": "0", "kMergeOperands": "0"}}
+2018/05/25-14:30:09.398351 7f82ba72e700 [db/flush_job.cc:371] [default] [JOB 3] Level-0 flush table #10: 1890434 bytes OK
+2018/05/25-14:30:25.491635 7f82ba72e700 [db/flush_job.cc:331] [default] [JOB 10] Level-0 flush table #23: started
+2018/05/25-14:30:25.643618 7f82b2f20700 [db/db_impl_write.cc:1373] [default] New memtable created with log file: #24. Immutable memtables: 1.
+2018/05/25-14:30:25.643633 7f82b2f20700 [WARN] [db/column_family.cc:743] [default] Stopping writes because we have 2 immutable memtables (waiting for flush), max_write_buffer_number is set to 2
+2018/05/25-14:30:27.288181 7f82ba72e700 EVENT_LOG_v1 {"time_micros": 1527283827288158, "cf_name": "default", "job": 10, "event": "table_file_creation", "file_number": 23, "file_size": 1893200, "table_properties": {"data_size": 1879460, "index_size": 23340, "filter_size": 0, "raw_key_size": 663360, "raw_average_key_size": 24, "raw_value_size": 2764000, "raw_average_value_size": 100, "num_data_blocks": 838, "num_entries": 27640, "filter_policy_name": "", "kDeletedKeys": "0", "kMergeOperands": "0"}}
+2018/05/25-14:30:27.288210 7f82ba72e700 [db/flush_job.cc:371] [default] [JOB 10] Level-0 flush table #23: 1893200 bytes OK
+2018/05/25-14:30:27.289353 7f82ba72e700 [WARN] [db/column_family.cc:764] [default] Stopping writes because of estimated pending compaction bytes 14410584
+2018/05/25-14:30:27.289390 7f82ba72e700 (Original Log Time 2018/05/25-14:30:27.288829) [db/memtable_list.cc:377] [default] Level-0 commit table #23 started
+2018/05/25-14:30:27.289393 7f82ba72e700 (Original Log Time 2018/05/25-14:30:27.289332) [db/memtable_list.cc:409] [default] Level-0 commit table #23: memtable #1 done
+2018/05/25-14:34:21.047206 7f82ba72e700 EVENT_LOG_v1 {"time_micros": 1527284061047181, "cf_name": "default", "job": 44, "event": "table_file_creation", "file_number": 84, "file_size": 1890780, "table_properties": {"data_size": 1877100, "index_size": 23309, "filter_size": 0, "raw_key_size": 662808, "raw_average_key_size": 24, "raw_value_size": 2761700, "raw_average_value_size": 100, "num_data_blocks": 837, "num_entries": 27617, "filter_policy_name": "", "kDeletedKeys": "0", "kMergeOperands": "0"}}
+2018/05/25-14:34:21.047233 7f82ba72e700 [db/flush_job.cc:371] [default] [JOB 44] Level-0 flush table #84: 1890780 bytes OK
+2018/05/25-14:34:21.048017 7f82ba72e700 (Original Log Time 2018/05/25-14:34:21.048005) EVENT_LOG_v1 {"time_micros": 1527284061047997, "job": 44, "event": "flush_finished", "output_compression": "Snappy", "lsm_state": [2, 1, 0, 0, 0, 0, 0], "immutable_memtables": 1}
+2018/05/25-14:34:21.048592 7f82bd676200 [DEBUG] [db/db_impl_files.cc:261] [JOB 45] Delete /tmp/rocksdbtest-155919/dbbench/000084.sst type=2 #84 -- OK
+2018/05/25-14:34:21.048603 7f82bd676200 EVENT_LOG_v1 {"time_micros": 1527284061048600, "job": 45, "event": "table_file_deletion", "file_number": 84}
+2018/05/25-14:34:21.048981 7f82bd676200 [db/db_impl.cc:398] Shutdown complete
diff --git a/src/rocksdb/tools/advisor/test/input_files/OPTIONS-000005 b/src/rocksdb/tools/advisor/test/input_files/OPTIONS-000005
new file mode 100644
index 000000000..009edb04d
--- /dev/null
+++ b/src/rocksdb/tools/advisor/test/input_files/OPTIONS-000005
@@ -0,0 +1,49 @@
+# This is a RocksDB option file.
+#
+# For detailed file format spec, please refer to the example file
+# in examples/rocksdb_option_file_example.ini
+#
+
+[Version]
+ rocksdb_version=5.14.0
+ options_file_version=1.1
+
+[DBOptions]
+ manual_wal_flush=false
+ allow_ingest_behind=false
+ db_write_buffer_size=0
+ db_log_dir=
+ random_access_max_buffer_size=1048576
+
+[CFOptions "default"]
+ ttl=0
+ max_bytes_for_level_base=268435456
+ max_bytes_for_level_multiplier=10.000000
+ level0_file_num_compaction_trigger=4
+ level0_stop_writes_trigger=36
+ write_buffer_size=4194000
+ min_write_buffer_number_to_merge=1
+ num_levels=7
+ compaction_filter_factory=nullptr
+ compaction_style=kCompactionStyleLevel
+
+[TableOptions/BlockBasedTable "default"]
+ block_align=false
+ index_type=kBinarySearch
+
+[CFOptions "col_fam_A"]
+ttl=0
+max_bytes_for_level_base=268435456
+max_bytes_for_level_multiplier=10.000000
+level0_file_num_compaction_trigger=5
+level0_stop_writes_trigger=36
+write_buffer_size=1024000
+min_write_buffer_number_to_merge=1
+num_levels=5
+compaction_filter_factory=nullptr
+compaction_style=kCompactionStyleLevel
+
+[TableOptions/BlockBasedTable "col_fam_A"]
+block_align=true
+block_restart_interval=16
+index_type=kBinarySearch
diff --git a/src/rocksdb/tools/advisor/test/input_files/log_stats_parser_keys_ts b/src/rocksdb/tools/advisor/test/input_files/log_stats_parser_keys_ts
new file mode 100644
index 000000000..e8ade9e3e
--- /dev/null
+++ b/src/rocksdb/tools/advisor/test/input_files/log_stats_parser_keys_ts
@@ -0,0 +1,3 @@
+rocksdb.number.block.decompressed.count: 1530896335 88.0, 1530896361 788338.0, 1530896387 1539256.0, 1530896414 2255696.0, 1530896440 3009325.0, 1530896466 3767183.0, 1530896492 4529775.0, 1530896518 5297809.0, 1530896545 6033802.0, 1530896570 6794129.0
+rocksdb.db.get.micros.p50: 1530896335 295.5, 1530896361 16.561841, 1530896387 16.20677, 1530896414 16.31508, 1530896440 16.346602, 1530896466 16.284669, 1530896492 16.16005, 1530896518 16.069096, 1530896545 16.028746, 1530896570 15.9638
+rocksdb.manifest.file.sync.micros.p99: 1530896335 649.0, 1530896361 835.0, 1530896387 1435.0, 1530896414 9938.0, 1530896440 9938.0, 1530896466 9938.0, 1530896492 9938.0, 1530896518 1882.0, 1530896545 1837.0, 1530896570 1792.0
diff --git a/src/rocksdb/tools/advisor/test/input_files/rules_err1.ini b/src/rocksdb/tools/advisor/test/input_files/rules_err1.ini
new file mode 100644
index 000000000..23be55dde
--- /dev/null
+++ b/src/rocksdb/tools/advisor/test/input_files/rules_err1.ini
@@ -0,0 +1,56 @@
+[Rule "missing-suggestions"]
+suggestions=
+conditions=missing-source
+
+[Condition "normal-rule"]
+source=LOG
+regex=Stopping writes because we have \d+ immutable memtables \(waiting for flush\), max_write_buffer_number is set to \d+
+
+[Suggestion "inc-bg-flush"]
+option=DBOptions.max_background_flushes
+action=increase
+
+[Suggestion "inc-write-buffer"]
+option=CFOptions.max_write_buffer_number
+action=increase
+
+[Rule "missing-conditions"]
+conditions=
+suggestions=missing-description
+
+[Condition "missing-options"]
+source=OPTIONS
+options=
+evaluate=int(options[0])*int(options[1])-int(options[2])<(-251659456) # should evaluate to a boolean
+
+[Rule "missing-expression"]
+conditions=missing-expression
+suggestions=missing-description
+
+[Condition "missing-expression"]
+source=OPTIONS
+options=CFOptions.level0_file_num_compaction_trigger:CFOptions.write_buffer_size:CFOptions.max_bytes_for_level_base
+evaluate=
+
+[Suggestion "missing-description"]
+description=
+
+[Rule "stop-too-many-L0"]
+suggestions=inc-max-bg-compactions:missing-action:inc-l0-stop-writes-trigger
+conditions=missing-regex
+
+[Condition "missing-regex"]
+source=LOG
+regex=
+
+[Suggestion "missing-option"]
+option=
+action=increase
+
+[Suggestion "normal-suggestion"]
+option=CFOptions.write_buffer_size
+action=increase
+
+[Suggestion "inc-l0-stop-writes-trigger"]
+option=CFOptions.level0_stop_writes_trigger
+action=increase
diff --git a/src/rocksdb/tools/advisor/test/input_files/rules_err2.ini b/src/rocksdb/tools/advisor/test/input_files/rules_err2.ini
new file mode 100644
index 000000000..bce21dba9
--- /dev/null
+++ b/src/rocksdb/tools/advisor/test/input_files/rules_err2.ini
@@ -0,0 +1,15 @@
+[Rule "normal-rule"]
+suggestions=inc-bg-flush:inc-write-buffer
+conditions=missing-source
+
+[Condition "missing-source"]
+source=
+regex=Stopping writes because we have \d+ immutable memtables \(waiting for flush\), max_write_buffer_number is set to \d+
+
+[Suggestion "inc-bg-flush"]
+option=DBOptions.max_background_flushes
+action=increase
+
+[Suggestion "inc-write-buffer"]
+option=CFOptions.max_write_buffer_number
+action=increase
diff --git a/src/rocksdb/tools/advisor/test/input_files/rules_err3.ini b/src/rocksdb/tools/advisor/test/input_files/rules_err3.ini
new file mode 100644
index 000000000..73c06e469
--- /dev/null
+++ b/src/rocksdb/tools/advisor/test/input_files/rules_err3.ini
@@ -0,0 +1,15 @@
+[Rule "normal-rule"]
+suggestions=missing-action:inc-write-buffer
+conditions=missing-source
+
+[Condition "normal-condition"]
+source=LOG
+regex=Stopping writes because we have \d+ immutable memtables \(waiting for flush\), max_write_buffer_number is set to \d+
+
+[Suggestion "missing-action"]
+option=DBOptions.max_background_flushes
+action=
+
+[Suggestion "inc-write-buffer"]
+option=CFOptions.max_write_buffer_number
+action=increase
diff --git a/src/rocksdb/tools/advisor/test/input_files/rules_err4.ini b/src/rocksdb/tools/advisor/test/input_files/rules_err4.ini
new file mode 100644
index 000000000..4d4aa3c70
--- /dev/null
+++ b/src/rocksdb/tools/advisor/test/input_files/rules_err4.ini
@@ -0,0 +1,15 @@
+[Rule "normal-rule"]
+suggestions=inc-bg-flush
+conditions=missing-source
+
+[Condition "normal-condition"]
+source=LOG
+regex=Stopping writes because we have \d+ immutable memtables \(waiting for flush\), max_write_buffer_number is set to \d+
+
+[Suggestion "inc-bg-flush"]
+option=DBOptions.max_background_flushes
+action=increase
+
+[Suggestion] # missing section name
+option=CFOptions.max_write_buffer_number
+action=increase
diff --git a/src/rocksdb/tools/advisor/test/input_files/test_rules.ini b/src/rocksdb/tools/advisor/test/input_files/test_rules.ini
new file mode 100644
index 000000000..97b9374fc
--- /dev/null
+++ b/src/rocksdb/tools/advisor/test/input_files/test_rules.ini
@@ -0,0 +1,47 @@
+[Rule "single-condition-false"]
+suggestions=inc-bg-flush:inc-write-buffer
+conditions=log-4-false
+
+[Rule "multiple-conds-true"]
+suggestions=inc-write-buffer
+conditions=log-1-true:log-2-true:log-3-true
+
+[Rule "multiple-conds-one-false"]
+suggestions=inc-bg-flush
+conditions=log-1-true:log-4-false:log-3-true
+
+[Rule "multiple-conds-all-false"]
+suggestions=l0-l1-ratio-health-check
+conditions=log-4-false:options-1-false
+
+[Condition "log-1-true"]
+source=LOG
+regex=Stopping writes because we have \d+ immutable memtables \(waiting for flush\), max_write_buffer_number is set to \d+
+
+[Condition "log-2-true"]
+source=LOG
+regex=Stalling writes because we have \d+ level-0 files
+
+[Condition "log-3-true"]
+source=LOG
+regex=Stopping writes because we have \d+ level-0 files
+
+[Condition "log-4-false"]
+source=LOG
+regex=Stalling writes because of estimated pending compaction bytes \d+
+
+[Condition "options-1-false"]
+source=OPTIONS
+options=CFOptions.level0_file_num_compaction_trigger:CFOptions.write_buffer_size:DBOptions.random_access_max_buffer_size
+evaluate=int(options[0])*int(options[1])-int(options[2])<0 # should evaluate to a boolean
+
+[Suggestion "inc-bg-flush"]
+option=DBOptions.max_background_flushes
+action=increase
+
+[Suggestion "inc-write-buffer"]
+option=CFOptions.max_write_buffer_number
+action=increase
+
+[Suggestion "l0-l1-ratio-health-check"]
+description='modify options such that (level0_file_num_compaction_trigger * write_buffer_size - max_bytes_for_level_base < 5) is satisfied'
diff --git a/src/rocksdb/tools/advisor/test/input_files/triggered_rules.ini b/src/rocksdb/tools/advisor/test/input_files/triggered_rules.ini
new file mode 100644
index 000000000..83b96da2b
--- /dev/null
+++ b/src/rocksdb/tools/advisor/test/input_files/triggered_rules.ini
@@ -0,0 +1,83 @@
+[Rule "stall-too-many-memtables"]
+suggestions=inc-bg-flush:inc-write-buffer
+conditions=stall-too-many-memtables
+
+[Condition "stall-too-many-memtables"]
+source=LOG
+regex=Stopping writes because we have \d+ immutable memtables \(waiting for flush\), max_write_buffer_number is set to \d+
+
+[Rule "stall-too-many-L0"]
+suggestions=inc-max-subcompactions:inc-max-bg-compactions:inc-write-buffer-size:dec-max-bytes-for-level-base:inc-l0-slowdown-writes-trigger
+conditions=stall-too-many-L0
+
+[Condition "stall-too-many-L0"]
+source=LOG
+regex=Stalling writes because we have \d+ level-0 files
+
+[Rule "stop-too-many-L0"]
+suggestions=inc-max-bg-compactions:inc-write-buffer-size:inc-l0-stop-writes-trigger
+conditions=stop-too-many-L0
+
+[Condition "stop-too-many-L0"]
+source=LOG
+regex=Stopping writes because we have \d+ level-0 files
+
+[Rule "stall-too-many-compaction-bytes"]
+suggestions=inc-max-bg-compactions:inc-write-buffer-size:inc-hard-pending-compaction-bytes-limit:inc-soft-pending-compaction-bytes-limit
+conditions=stall-too-many-compaction-bytes
+
+[Condition "stall-too-many-compaction-bytes"]
+source=LOG
+regex=Stalling writes because of estimated pending compaction bytes \d+
+
+[Suggestion "inc-bg-flush"]
+option=DBOptions.max_background_flushes
+action=increase
+
+[Suggestion "inc-write-buffer"]
+option=CFOptions.max_write_buffer_number
+action=increase
+
+[Suggestion "inc-max-subcompactions"]
+option=DBOptions.max_subcompactions
+action=increase
+
+[Suggestion "inc-max-bg-compactions"]
+option=DBOptions.max_background_compactions
+action=increase
+
+[Suggestion "inc-write-buffer-size"]
+option=CFOptions.write_buffer_size
+action=increase
+
+[Suggestion "dec-max-bytes-for-level-base"]
+option=CFOptions.max_bytes_for_level_base
+action=decrease
+
+[Suggestion "inc-l0-slowdown-writes-trigger"]
+option=CFOptions.level0_slowdown_writes_trigger
+action=increase
+
+[Suggestion "inc-l0-stop-writes-trigger"]
+option=CFOptions.level0_stop_writes_trigger
+action=increase
+
+[Suggestion "inc-hard-pending-compaction-bytes-limit"]
+option=CFOptions.hard_pending_compaction_bytes_limit
+action=increase
+
+[Suggestion "inc-soft-pending-compaction-bytes-limit"]
+option=CFOptions.soft_pending_compaction_bytes_limit
+action=increase
+
+[Rule "level0-level1-ratio"]
+conditions=level0-level1-ratio
+suggestions=l0-l1-ratio-health-check
+
+[Condition "level0-level1-ratio"]
+source=OPTIONS
+options=CFOptions.level0_file_num_compaction_trigger:CFOptions.write_buffer_size:CFOptions.max_bytes_for_level_base
+evaluate=int(options[0])*int(options[1])-int(options[2])>=-268173312 # should evaluate to a boolean, condition triggered if evaluates to true
+
+[Suggestion "l0-l1-ratio-health-check"]
+description='modify options such that (level0_file_num_compaction_trigger * write_buffer_size - max_bytes_for_level_base < -268173312) is satisfied'
diff --git a/src/rocksdb/tools/advisor/test/test_db_bench_runner.py b/src/rocksdb/tools/advisor/test/test_db_bench_runner.py
new file mode 100644
index 000000000..1c4f77d50
--- /dev/null
+++ b/src/rocksdb/tools/advisor/test/test_db_bench_runner.py
@@ -0,0 +1,147 @@
+# Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+# This source code is licensed under both the GPLv2 (found in the
+# COPYING file in the root directory) and Apache 2.0 License
+# (found in the LICENSE.Apache file in the root directory).
+
+from advisor.db_bench_runner import DBBenchRunner
+from advisor.db_log_parser import NO_COL_FAMILY, DataSource
+from advisor.db_options_parser import DatabaseOptions
+import os
+import unittest
+
+
+class TestDBBenchRunnerMethods(unittest.TestCase):
+ def setUp(self):
+ self.pos_args = [
+ './../../db_bench',
+ 'overwrite',
+ 'use_existing_db=true',
+ 'duration=10'
+ ]
+ self.bench_runner = DBBenchRunner(self.pos_args)
+ this_path = os.path.abspath(os.path.dirname(__file__))
+ options_path = os.path.join(this_path, 'input_files/OPTIONS-000005')
+ self.db_options = DatabaseOptions(options_path)
+
+ def test_setup(self):
+ self.assertEqual(self.bench_runner.db_bench_binary, self.pos_args[0])
+ self.assertEqual(self.bench_runner.benchmark, self.pos_args[1])
+ self.assertSetEqual(
+ set(self.bench_runner.db_bench_args), set(self.pos_args[2:])
+ )
+
+ def test_get_info_log_file_name(self):
+ log_file_name = DBBenchRunner.get_info_log_file_name(
+ None, 'random_path'
+ )
+ self.assertEqual(log_file_name, 'LOG')
+
+ log_file_name = DBBenchRunner.get_info_log_file_name(
+ '/dev/shm/', '/tmp/rocksdbtest-155919/dbbench/'
+ )
+ self.assertEqual(log_file_name, 'tmp_rocksdbtest-155919_dbbench_LOG')
+
+ def test_get_opt_args_str(self):
+ misc_opt_dict = {'bloom_bits': 2, 'empty_opt': None, 'rate_limiter': 3}
+ optional_args_str = DBBenchRunner.get_opt_args_str(misc_opt_dict)
+ self.assertEqual(optional_args_str, ' --bloom_bits=2 --rate_limiter=3')
+
+ def test_get_log_options(self):
+ db_path = '/tmp/rocksdb-155919/dbbench'
+ # when db_log_dir is present in the db_options
+ update_dict = {
+ 'DBOptions.db_log_dir': {NO_COL_FAMILY: '/dev/shm'},
+ 'DBOptions.stats_dump_period_sec': {NO_COL_FAMILY: '20'}
+ }
+ self.db_options.update_options(update_dict)
+ log_file_prefix, stats_freq = self.bench_runner.get_log_options(
+ self.db_options, db_path
+ )
+ self.assertEqual(
+ log_file_prefix, '/dev/shm/tmp_rocksdb-155919_dbbench_LOG'
+ )
+ self.assertEqual(stats_freq, 20)
+
+ update_dict = {
+ 'DBOptions.db_log_dir': {NO_COL_FAMILY: None},
+ 'DBOptions.stats_dump_period_sec': {NO_COL_FAMILY: '30'}
+ }
+ self.db_options.update_options(update_dict)
+ log_file_prefix, stats_freq = self.bench_runner.get_log_options(
+ self.db_options, db_path
+ )
+ self.assertEqual(log_file_prefix, '/tmp/rocksdb-155919/dbbench/LOG')
+ self.assertEqual(stats_freq, 30)
+
+ def test_build_experiment_command(self):
+ # add some misc_options to db_options
+ update_dict = {
+ 'bloom_bits': {NO_COL_FAMILY: 2},
+ 'rate_limiter_bytes_per_sec': {NO_COL_FAMILY: 128000000}
+ }
+ self.db_options.update_options(update_dict)
+ db_path = '/dev/shm'
+ experiment_command = self.bench_runner._build_experiment_command(
+ self.db_options, db_path
+ )
+ opt_args_str = DBBenchRunner.get_opt_args_str(
+ self.db_options.get_misc_options()
+ )
+ opt_args_str += (
+ ' --options_file=' +
+ self.db_options.generate_options_config('12345')
+ )
+ for arg in self.pos_args[2:]:
+ opt_args_str += (' --' + arg)
+ expected_command = (
+ self.pos_args[0] + ' --benchmarks=' + self.pos_args[1] +
+ ' --statistics --perf_level=3 --db=' + db_path + opt_args_str
+ )
+ self.assertEqual(experiment_command, expected_command)
+
+
+class TestDBBenchRunner(unittest.TestCase):
+ def setUp(self):
+ # Note: the db_bench binary should be present in the rocksdb/ directory
+ self.pos_args = [
+ './../../db_bench',
+ 'overwrite',
+ 'use_existing_db=true',
+ 'duration=20'
+ ]
+ self.bench_runner = DBBenchRunner(self.pos_args)
+ this_path = os.path.abspath(os.path.dirname(__file__))
+ options_path = os.path.join(this_path, 'input_files/OPTIONS-000005')
+ self.db_options = DatabaseOptions(options_path)
+
+ def test_experiment_output(self):
+ update_dict = {'bloom_bits': {NO_COL_FAMILY: 2}}
+ self.db_options.update_options(update_dict)
+ db_path = '/dev/shm'
+ data_sources, throughput = self.bench_runner.run_experiment(
+ self.db_options, db_path
+ )
+ self.assertEqual(
+ data_sources[DataSource.Type.DB_OPTIONS][0].type,
+ DataSource.Type.DB_OPTIONS
+ )
+ self.assertEqual(
+ data_sources[DataSource.Type.LOG][0].type,
+ DataSource.Type.LOG
+ )
+ self.assertEqual(len(data_sources[DataSource.Type.TIME_SERIES]), 2)
+ self.assertEqual(
+ data_sources[DataSource.Type.TIME_SERIES][0].type,
+ DataSource.Type.TIME_SERIES
+ )
+ self.assertEqual(
+ data_sources[DataSource.Type.TIME_SERIES][1].type,
+ DataSource.Type.TIME_SERIES
+ )
+ self.assertEqual(
+ data_sources[DataSource.Type.TIME_SERIES][1].stats_freq_sec, 0
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/rocksdb/tools/advisor/test/test_db_log_parser.py b/src/rocksdb/tools/advisor/test/test_db_log_parser.py
new file mode 100644
index 000000000..b70430433
--- /dev/null
+++ b/src/rocksdb/tools/advisor/test/test_db_log_parser.py
@@ -0,0 +1,103 @@
+# Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+# This source code is licensed under both the GPLv2 (found in the
+# COPYING file in the root directory) and Apache 2.0 License
+# (found in the LICENSE.Apache file in the root directory).
+
+from advisor.db_log_parser import DatabaseLogs, Log, NO_COL_FAMILY
+from advisor.rule_parser import Condition, LogCondition
+import os
+import unittest
+
+
+class TestLog(unittest.TestCase):
+ def setUp(self):
+ self.column_families = ['default', 'col_fam_A']
+
+ def test_get_column_family(self):
+ test_log = (
+ "2018/05/25-14:34:21.047233 7f82ba72e700 [db/flush_job.cc:371] " +
+ "[col_fam_A] [JOB 44] Level-0 flush table #84: 1890780 bytes OK"
+ )
+ db_log = Log(test_log, self.column_families)
+ self.assertEqual('col_fam_A', db_log.get_column_family())
+
+ test_log = (
+ "2018/05/25-14:34:21.047233 7f82ba72e700 [db/flush_job.cc:371] " +
+ "[JOB 44] Level-0 flush table #84: 1890780 bytes OK"
+ )
+ db_log = Log(test_log, self.column_families)
+ db_log.append_message('[default] some remaining part of log')
+ self.assertEqual(NO_COL_FAMILY, db_log.get_column_family())
+
+ def test_get_methods(self):
+ hr_time = "2018/05/25-14:30:25.491635"
+ context = "7f82ba72e700"
+ message = (
+ "[db/flush_job.cc:331] [default] [JOB 10] Level-0 flush table " +
+ "#23: started"
+ )
+ test_log = hr_time + " " + context + " " + message
+ db_log = Log(test_log, self.column_families)
+ self.assertEqual(db_log.get_message(), message)
+ remaining_message = "[col_fam_A] some more logs"
+ db_log.append_message(remaining_message)
+ self.assertEqual(
+ db_log.get_human_readable_time(), "2018/05/25-14:30:25.491635"
+ )
+ self.assertEqual(db_log.get_context(), "7f82ba72e700")
+ self.assertEqual(db_log.get_timestamp(), 1527258625)
+ self.assertEqual(
+ db_log.get_message(), str(message + '\n' + remaining_message)
+ )
+
+ def test_is_new_log(self):
+ new_log = "2018/05/25-14:34:21.047233 context random new log"
+ remaining_log = "2018/05/25 not really a new log"
+ self.assertTrue(Log.is_new_log(new_log))
+ self.assertFalse(Log.is_new_log(remaining_log))
+
+
+class TestDatabaseLogs(unittest.TestCase):
+ def test_check_and_trigger_conditions(self):
+ this_path = os.path.abspath(os.path.dirname(__file__))
+ logs_path_prefix = os.path.join(this_path, 'input_files/LOG-0')
+ column_families = ['default', 'col-fam-A', 'col-fam-B']
+ db_logs = DatabaseLogs(logs_path_prefix, column_families)
+ # matches, has 2 col_fams
+ condition1 = LogCondition.create(Condition('cond-A'))
+ condition1.set_parameter('regex', 'random log message')
+ # matches, multiple lines message
+ condition2 = LogCondition.create(Condition('cond-B'))
+ condition2.set_parameter('regex', 'continuing on next line')
+ # does not match
+ condition3 = LogCondition.create(Condition('cond-C'))
+ condition3.set_parameter('regex', 'this should match no log')
+ db_logs.check_and_trigger_conditions(
+ [condition1, condition2, condition3]
+ )
+ cond1_trigger = condition1.get_trigger()
+ self.assertEqual(2, len(cond1_trigger.keys()))
+ self.assertSetEqual(
+ {'col-fam-A', NO_COL_FAMILY}, set(cond1_trigger.keys())
+ )
+ self.assertEqual(2, len(cond1_trigger['col-fam-A']))
+ messages = [
+ "[db/db_impl.cc:563] [col-fam-A] random log message for testing",
+ "[db/db_impl.cc:653] [col-fam-A] another random log message"
+ ]
+ self.assertIn(cond1_trigger['col-fam-A'][0].get_message(), messages)
+ self.assertIn(cond1_trigger['col-fam-A'][1].get_message(), messages)
+ self.assertEqual(1, len(cond1_trigger[NO_COL_FAMILY]))
+ self.assertEqual(
+ cond1_trigger[NO_COL_FAMILY][0].get_message(),
+ "[db/db_impl.cc:331] [unknown] random log message no column family"
+ )
+ cond2_trigger = condition2.get_trigger()
+ self.assertEqual(['col-fam-B'], list(cond2_trigger.keys()))
+ self.assertEqual(1, len(cond2_trigger['col-fam-B']))
+ self.assertEqual(
+ cond2_trigger['col-fam-B'][0].get_message(),
+ "[db/db_impl.cc:234] [col-fam-B] log continuing on next line\n" +
+ "remaining part of the log"
+ )
+ self.assertIsNone(condition3.get_trigger())
diff --git a/src/rocksdb/tools/advisor/test/test_db_options_parser.py b/src/rocksdb/tools/advisor/test/test_db_options_parser.py
new file mode 100644
index 000000000..d53a9bdb5
--- /dev/null
+++ b/src/rocksdb/tools/advisor/test/test_db_options_parser.py
@@ -0,0 +1,216 @@
+# Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+# This source code is licensed under both the GPLv2 (found in the
+# COPYING file in the root directory) and Apache 2.0 License
+# (found in the LICENSE.Apache file in the root directory).
+
+from advisor.db_log_parser import NO_COL_FAMILY
+from advisor.db_options_parser import DatabaseOptions
+from advisor.rule_parser import Condition, OptionCondition
+import os
+import unittest
+
+
+class TestDatabaseOptions(unittest.TestCase):
+ def setUp(self):
+ self.this_path = os.path.abspath(os.path.dirname(__file__))
+ self.og_options = os.path.join(
+ self.this_path, 'input_files/OPTIONS-000005'
+ )
+ misc_options = [
+ 'bloom_bits = 4', 'rate_limiter_bytes_per_sec = 1024000'
+ ]
+ # create the options object
+ self.db_options = DatabaseOptions(self.og_options, misc_options)
+ # perform clean-up before running tests
+ self.generated_options = os.path.join(
+ self.this_path, '../temp/OPTIONS_testing.tmp'
+ )
+ if os.path.isfile(self.generated_options):
+ os.remove(self.generated_options)
+
+ def test_get_options_diff(self):
+ old_opt = {
+ 'DBOptions.stats_dump_freq_sec': {NO_COL_FAMILY: '20'},
+ 'CFOptions.write_buffer_size': {
+ 'default': '1024000',
+ 'col_fam_A': '128000',
+ 'col_fam_B': '128000000'
+ },
+ 'DBOptions.use_fsync': {NO_COL_FAMILY: 'true'},
+ 'DBOptions.max_log_file_size': {NO_COL_FAMILY: '128000000'}
+ }
+ new_opt = {
+ 'bloom_bits': {NO_COL_FAMILY: '4'},
+ 'CFOptions.write_buffer_size': {
+ 'default': '128000000',
+ 'col_fam_A': '128000',
+ 'col_fam_C': '128000000'
+ },
+ 'DBOptions.use_fsync': {NO_COL_FAMILY: 'true'},
+ 'DBOptions.max_log_file_size': {NO_COL_FAMILY: '0'}
+ }
+ diff = DatabaseOptions.get_options_diff(old_opt, new_opt)
+
+ expected_diff = {
+ 'DBOptions.stats_dump_freq_sec': {NO_COL_FAMILY: ('20', None)},
+ 'bloom_bits': {NO_COL_FAMILY: (None, '4')},
+ 'CFOptions.write_buffer_size': {
+ 'default': ('1024000', '128000000'),
+ 'col_fam_B': ('128000000', None),
+ 'col_fam_C': (None, '128000000')
+ },
+ 'DBOptions.max_log_file_size': {NO_COL_FAMILY: ('128000000', '0')}
+ }
+ self.assertDictEqual(diff, expected_diff)
+
+ def test_is_misc_option(self):
+ self.assertTrue(DatabaseOptions.is_misc_option('bloom_bits'))
+ self.assertFalse(
+ DatabaseOptions.is_misc_option('DBOptions.stats_dump_freq_sec')
+ )
+
+ def test_set_up(self):
+ options = self.db_options.get_all_options()
+ self.assertEqual(22, len(options.keys()))
+ expected_misc_options = {
+ 'bloom_bits': '4', 'rate_limiter_bytes_per_sec': '1024000'
+ }
+ self.assertDictEqual(
+ expected_misc_options, self.db_options.get_misc_options()
+ )
+ self.assertListEqual(
+ ['default', 'col_fam_A'], self.db_options.get_column_families()
+ )
+
+ def test_get_options(self):
+ opt_to_get = [
+ 'DBOptions.manual_wal_flush', 'DBOptions.db_write_buffer_size',
+ 'bloom_bits', 'CFOptions.compaction_filter_factory',
+ 'CFOptions.num_levels', 'rate_limiter_bytes_per_sec',
+ 'TableOptions.BlockBasedTable.block_align', 'random_option'
+ ]
+ options = self.db_options.get_options(opt_to_get)
+ expected_options = {
+ 'DBOptions.manual_wal_flush': {NO_COL_FAMILY: 'false'},
+ 'DBOptions.db_write_buffer_size': {NO_COL_FAMILY: '0'},
+ 'bloom_bits': {NO_COL_FAMILY: '4'},
+ 'CFOptions.compaction_filter_factory': {
+ 'default': 'nullptr', 'col_fam_A': 'nullptr'
+ },
+ 'CFOptions.num_levels': {'default': '7', 'col_fam_A': '5'},
+ 'rate_limiter_bytes_per_sec': {NO_COL_FAMILY: '1024000'},
+ 'TableOptions.BlockBasedTable.block_align': {
+ 'default': 'false', 'col_fam_A': 'true'
+ }
+ }
+ self.assertDictEqual(expected_options, options)
+
+ def test_update_options(self):
+ # add new, update old, set old
+ # before updating
+ expected_old_opts = {
+ 'DBOptions.db_log_dir': {NO_COL_FAMILY: None},
+ 'DBOptions.manual_wal_flush': {NO_COL_FAMILY: 'false'},
+ 'bloom_bits': {NO_COL_FAMILY: '4'},
+ 'CFOptions.num_levels': {'default': '7', 'col_fam_A': '5'},
+ 'TableOptions.BlockBasedTable.block_restart_interval': {
+ 'col_fam_A': '16'
+ }
+ }
+ get_opts = list(expected_old_opts.keys())
+ options = self.db_options.get_options(get_opts)
+ self.assertEqual(expected_old_opts, options)
+ # after updating options
+ update_opts = {
+ 'DBOptions.db_log_dir': {NO_COL_FAMILY: '/dev/shm'},
+ 'DBOptions.manual_wal_flush': {NO_COL_FAMILY: 'true'},
+ 'bloom_bits': {NO_COL_FAMILY: '2'},
+ 'CFOptions.num_levels': {'col_fam_A': '7'},
+ 'TableOptions.BlockBasedTable.block_restart_interval': {
+ 'default': '32'
+ },
+ 'random_misc_option': {NO_COL_FAMILY: 'something'}
+ }
+ self.db_options.update_options(update_opts)
+ update_opts['CFOptions.num_levels']['default'] = '7'
+ update_opts['TableOptions.BlockBasedTable.block_restart_interval'] = {
+ 'default': '32', 'col_fam_A': '16'
+ }
+ get_opts.append('random_misc_option')
+ options = self.db_options.get_options(get_opts)
+ self.assertDictEqual(update_opts, options)
+ expected_misc_options = {
+ 'bloom_bits': '2',
+ 'rate_limiter_bytes_per_sec': '1024000',
+ 'random_misc_option': 'something'
+ }
+ self.assertDictEqual(
+ expected_misc_options, self.db_options.get_misc_options()
+ )
+
+ def test_generate_options_config(self):
+ # make sure file does not exist from before
+ self.assertFalse(os.path.isfile(self.generated_options))
+ self.db_options.generate_options_config('testing')
+ self.assertTrue(os.path.isfile(self.generated_options))
+
+ def test_check_and_trigger_conditions(self):
+ # options only from CFOptions
+ # setup the OptionCondition objects to check and trigger
+ update_dict = {
+ 'CFOptions.level0_file_num_compaction_trigger': {'col_fam_A': '4'},
+ 'CFOptions.max_bytes_for_level_base': {'col_fam_A': '10'}
+ }
+ self.db_options.update_options(update_dict)
+ cond1 = Condition('opt-cond-1')
+ cond1 = OptionCondition.create(cond1)
+ cond1.set_parameter(
+ 'options', [
+ 'CFOptions.level0_file_num_compaction_trigger',
+ 'TableOptions.BlockBasedTable.block_restart_interval',
+ 'CFOptions.max_bytes_for_level_base'
+ ]
+ )
+ cond1.set_parameter(
+ 'evaluate',
+ 'int(options[0])*int(options[1])-int(options[2])>=0'
+ )
+ # only DBOptions
+ cond2 = Condition('opt-cond-2')
+ cond2 = OptionCondition.create(cond2)
+ cond2.set_parameter(
+ 'options', [
+ 'DBOptions.db_write_buffer_size',
+ 'bloom_bits',
+ 'rate_limiter_bytes_per_sec'
+ ]
+ )
+ cond2.set_parameter(
+ 'evaluate',
+ '(int(options[2]) * int(options[1]) * int(options[0]))==0'
+ )
+ # mix of CFOptions and DBOptions
+ cond3 = Condition('opt-cond-3')
+ cond3 = OptionCondition.create(cond3)
+ cond3.set_parameter(
+ 'options', [
+ 'DBOptions.db_write_buffer_size', # 0
+ 'CFOptions.num_levels', # 5, 7
+ 'bloom_bits' # 4
+ ]
+ )
+ cond3.set_parameter(
+ 'evaluate', 'int(options[2])*int(options[0])+int(options[1])>6'
+ )
+ self.db_options.check_and_trigger_conditions([cond1, cond2, cond3])
+
+ cond1_trigger = {'col_fam_A': ['4', '16', '10']}
+ self.assertDictEqual(cond1_trigger, cond1.get_trigger())
+ cond2_trigger = {NO_COL_FAMILY: ['0', '4', '1024000']}
+ self.assertDictEqual(cond2_trigger, cond2.get_trigger())
+ cond3_trigger = {'default': ['0', '7', '4']}
+ self.assertDictEqual(cond3_trigger, cond3.get_trigger())
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/rocksdb/tools/advisor/test/test_db_stats_fetcher.py b/src/rocksdb/tools/advisor/test/test_db_stats_fetcher.py
new file mode 100644
index 000000000..afbbe8339
--- /dev/null
+++ b/src/rocksdb/tools/advisor/test/test_db_stats_fetcher.py
@@ -0,0 +1,126 @@
+# Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+# This source code is licensed under both the GPLv2 (found in the
+# COPYING file in the root directory) and Apache 2.0 License
+# (found in the LICENSE.Apache file in the root directory).
+
+from advisor.db_stats_fetcher import LogStatsParser, DatabasePerfContext
+from advisor.db_timeseries_parser import NO_ENTITY
+from advisor.rule_parser import Condition, TimeSeriesCondition
+import os
+import time
+import unittest
+from unittest.mock import MagicMock
+
+
+class TestLogStatsParser(unittest.TestCase):
+ def setUp(self):
+ this_path = os.path.abspath(os.path.dirname(__file__))
+ stats_file = os.path.join(
+ this_path, 'input_files/log_stats_parser_keys_ts'
+ )
+ # populate the keys_ts dictionary of LogStatsParser
+ self.stats_dict = {NO_ENTITY: {}}
+ with open(stats_file, 'r') as fp:
+ for line in fp:
+ stat_name = line.split(':')[0].strip()
+ self.stats_dict[NO_ENTITY][stat_name] = {}
+ token_list = line.split(':')[1].strip().split(',')
+ for token in token_list:
+ timestamp = int(token.split()[0])
+ value = float(token.split()[1])
+ self.stats_dict[NO_ENTITY][stat_name][timestamp] = value
+ self.log_stats_parser = LogStatsParser('dummy_log_file', 20)
+ self.log_stats_parser.keys_ts = self.stats_dict
+
+ def test_check_and_trigger_conditions_bursty(self):
+ # mock fetch_timeseries() because 'keys_ts' has been pre-populated
+ self.log_stats_parser.fetch_timeseries = MagicMock()
+ # condition: bursty
+ cond1 = Condition('cond-1')
+ cond1 = TimeSeriesCondition.create(cond1)
+ cond1.set_parameter('keys', 'rocksdb.db.get.micros.p50')
+ cond1.set_parameter('behavior', 'bursty')
+ cond1.set_parameter('window_sec', 40)
+ cond1.set_parameter('rate_threshold', 0)
+ self.log_stats_parser.check_and_trigger_conditions([cond1])
+ expected_cond_trigger = {
+ NO_ENTITY: {1530896440: 0.9767546362322214}
+ }
+ self.assertDictEqual(expected_cond_trigger, cond1.get_trigger())
+ # ensure that fetch_timeseries() was called once
+ self.log_stats_parser.fetch_timeseries.assert_called_once()
+
+ def test_check_and_trigger_conditions_eval_agg(self):
+ # mock fetch_timeseries() because 'keys_ts' has been pre-populated
+ self.log_stats_parser.fetch_timeseries = MagicMock()
+ # condition: evaluate_expression
+ cond1 = Condition('cond-1')
+ cond1 = TimeSeriesCondition.create(cond1)
+ cond1.set_parameter('keys', 'rocksdb.db.get.micros.p50')
+ cond1.set_parameter('behavior', 'evaluate_expression')
+ keys = [
+ 'rocksdb.manifest.file.sync.micros.p99',
+ 'rocksdb.db.get.micros.p50'
+ ]
+ cond1.set_parameter('keys', keys)
+ cond1.set_parameter('aggregation_op', 'latest')
+ # condition evaluates to FALSE
+ cond1.set_parameter('evaluate', 'keys[0]-(keys[1]*100)>200')
+ self.log_stats_parser.check_and_trigger_conditions([cond1])
+ expected_cond_trigger = {NO_ENTITY: [1792.0, 15.9638]}
+ self.assertIsNone(cond1.get_trigger())
+ # condition evaluates to TRUE
+ cond1.set_parameter('evaluate', 'keys[0]-(keys[1]*100)<200')
+ self.log_stats_parser.check_and_trigger_conditions([cond1])
+ expected_cond_trigger = {NO_ENTITY: [1792.0, 15.9638]}
+ self.assertDictEqual(expected_cond_trigger, cond1.get_trigger())
+ # ensure that fetch_timeseries() was called
+ self.log_stats_parser.fetch_timeseries.assert_called()
+
+ def test_check_and_trigger_conditions_eval(self):
+ # mock fetch_timeseries() because 'keys_ts' has been pre-populated
+ self.log_stats_parser.fetch_timeseries = MagicMock()
+ # condition: evaluate_expression
+ cond1 = Condition('cond-1')
+ cond1 = TimeSeriesCondition.create(cond1)
+ cond1.set_parameter('keys', 'rocksdb.db.get.micros.p50')
+ cond1.set_parameter('behavior', 'evaluate_expression')
+ keys = [
+ 'rocksdb.manifest.file.sync.micros.p99',
+ 'rocksdb.db.get.micros.p50'
+ ]
+ cond1.set_parameter('keys', keys)
+ cond1.set_parameter('evaluate', 'keys[0]-(keys[1]*100)>500')
+ self.log_stats_parser.check_and_trigger_conditions([cond1])
+ expected_trigger = {NO_ENTITY: {
+ 1530896414: [9938.0, 16.31508],
+ 1530896440: [9938.0, 16.346602],
+ 1530896466: [9938.0, 16.284669],
+ 1530896492: [9938.0, 16.16005]
+ }}
+ self.assertDictEqual(expected_trigger, cond1.get_trigger())
+ self.log_stats_parser.fetch_timeseries.assert_called_once()
+
+
+class TestDatabasePerfContext(unittest.TestCase):
+ def test_unaccumulate_metrics(self):
+ perf_dict = {
+ "user_key_comparison_count": 675903942,
+ "block_cache_hit_count": 830086,
+ }
+ timestamp = int(time.time())
+ perf_ts = {}
+ for key in perf_dict:
+ perf_ts[key] = {}
+ start_val = perf_dict[key]
+ for ix in range(5):
+ perf_ts[key][timestamp+(ix*10)] = start_val + (2 * ix * ix)
+ db_perf_context = DatabasePerfContext(perf_ts, 10, True)
+ timestamps = [timestamp+(ix*10) for ix in range(1, 5, 1)]
+ values = [val for val in range(2, 15, 4)]
+ inner_dict = {timestamps[ix]: values[ix] for ix in range(4)}
+ expected_keys_ts = {NO_ENTITY: {
+ 'user_key_comparison_count': inner_dict,
+ 'block_cache_hit_count': inner_dict
+ }}
+ self.assertDictEqual(expected_keys_ts, db_perf_context.keys_ts)
diff --git a/src/rocksdb/tools/advisor/test/test_rule_parser.py b/src/rocksdb/tools/advisor/test/test_rule_parser.py
new file mode 100644
index 000000000..9f1d0bf5c
--- /dev/null
+++ b/src/rocksdb/tools/advisor/test/test_rule_parser.py
@@ -0,0 +1,234 @@
+# Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+# This source code is licensed under both the GPLv2 (found in the
+# COPYING file in the root directory) and Apache 2.0 License
+# (found in the LICENSE.Apache file in the root directory).
+
+import os
+import unittest
+from advisor.rule_parser import RulesSpec
+from advisor.db_log_parser import DatabaseLogs, DataSource
+from advisor.db_options_parser import DatabaseOptions
+
+RuleToSuggestions = {
+ "stall-too-many-memtables": [
+ 'inc-bg-flush',
+ 'inc-write-buffer'
+ ],
+ "stall-too-many-L0": [
+ 'inc-max-subcompactions',
+ 'inc-max-bg-compactions',
+ 'inc-write-buffer-size',
+ 'dec-max-bytes-for-level-base',
+ 'inc-l0-slowdown-writes-trigger'
+ ],
+ "stop-too-many-L0": [
+ 'inc-max-bg-compactions',
+ 'inc-write-buffer-size',
+ 'inc-l0-stop-writes-trigger'
+ ],
+ "stall-too-many-compaction-bytes": [
+ 'inc-max-bg-compactions',
+ 'inc-write-buffer-size',
+ 'inc-hard-pending-compaction-bytes-limit',
+ 'inc-soft-pending-compaction-bytes-limit'
+ ],
+ "level0-level1-ratio": [
+ 'l0-l1-ratio-health-check'
+ ]
+}
+
+
+class TestAllRulesTriggered(unittest.TestCase):
+ def setUp(self):
+ # load the Rules
+ this_path = os.path.abspath(os.path.dirname(__file__))
+ ini_path = os.path.join(this_path, 'input_files/triggered_rules.ini')
+ self.db_rules = RulesSpec(ini_path)
+ self.db_rules.load_rules_from_spec()
+ self.db_rules.perform_section_checks()
+ # load the data sources: LOG and OPTIONS
+ log_path = os.path.join(this_path, 'input_files/LOG-0')
+ options_path = os.path.join(this_path, 'input_files/OPTIONS-000005')
+ db_options_parser = DatabaseOptions(options_path)
+ self.column_families = db_options_parser.get_column_families()
+ db_logs_parser = DatabaseLogs(log_path, self.column_families)
+ self.data_sources = {
+ DataSource.Type.DB_OPTIONS: [db_options_parser],
+ DataSource.Type.LOG: [db_logs_parser]
+ }
+
+ def test_triggered_conditions(self):
+ conditions_dict = self.db_rules.get_conditions_dict()
+ rules_dict = self.db_rules.get_rules_dict()
+ # Make sure none of the conditions is triggered beforehand
+ for cond in conditions_dict.values():
+ self.assertFalse(cond.is_triggered(), repr(cond))
+ for rule in rules_dict.values():
+ self.assertFalse(
+ rule.is_triggered(conditions_dict, self.column_families),
+ repr(rule)
+ )
+
+ # # Trigger the conditions as per the data sources.
+ # trigger_conditions(, conditions_dict)
+
+ # Get the set of rules that have been triggered
+ triggered_rules = self.db_rules.get_triggered_rules(
+ self.data_sources, self.column_families
+ )
+
+ # Make sure each condition and rule is triggered
+ for cond in conditions_dict.values():
+ if cond.get_data_source() is DataSource.Type.TIME_SERIES:
+ continue
+ self.assertTrue(cond.is_triggered(), repr(cond))
+
+ for rule in rules_dict.values():
+ self.assertIn(rule, triggered_rules)
+ # Check the suggestions made by the triggered rules
+ for sugg in rule.get_suggestions():
+ self.assertIn(sugg, RuleToSuggestions[rule.name])
+
+ for rule in triggered_rules:
+ self.assertIn(rule, rules_dict.values())
+ for sugg in RuleToSuggestions[rule.name]:
+ self.assertIn(sugg, rule.get_suggestions())
+
+
+class TestConditionsConjunctions(unittest.TestCase):
+ def setUp(self):
+ # load the Rules
+ this_path = os.path.abspath(os.path.dirname(__file__))
+ ini_path = os.path.join(this_path, 'input_files/test_rules.ini')
+ self.db_rules = RulesSpec(ini_path)
+ self.db_rules.load_rules_from_spec()
+ self.db_rules.perform_section_checks()
+ # load the data sources: LOG and OPTIONS
+ log_path = os.path.join(this_path, 'input_files/LOG-1')
+ options_path = os.path.join(this_path, 'input_files/OPTIONS-000005')
+ db_options_parser = DatabaseOptions(options_path)
+ self.column_families = db_options_parser.get_column_families()
+ db_logs_parser = DatabaseLogs(log_path, self.column_families)
+ self.data_sources = {
+ DataSource.Type.DB_OPTIONS: [db_options_parser],
+ DataSource.Type.LOG: [db_logs_parser]
+ }
+
+ def test_condition_conjunctions(self):
+ conditions_dict = self.db_rules.get_conditions_dict()
+ rules_dict = self.db_rules.get_rules_dict()
+ # Make sure none of the conditions is triggered beforehand
+ for cond in conditions_dict.values():
+ self.assertFalse(cond.is_triggered(), repr(cond))
+ for rule in rules_dict.values():
+ self.assertFalse(
+ rule.is_triggered(conditions_dict, self.column_families),
+ repr(rule)
+ )
+
+ # Trigger the conditions as per the data sources.
+ self.db_rules.trigger_conditions(self.data_sources)
+
+ # Check for the conditions
+ conds_triggered = ['log-1-true', 'log-2-true', 'log-3-true']
+ conds_not_triggered = ['log-4-false', 'options-1-false']
+ for cond in conds_triggered:
+ self.assertTrue(conditions_dict[cond].is_triggered(), repr(cond))
+ for cond in conds_not_triggered:
+ self.assertFalse(conditions_dict[cond].is_triggered(), repr(cond))
+
+ # Check for the rules
+ rules_triggered = ['multiple-conds-true']
+ rules_not_triggered = [
+ 'single-condition-false',
+ 'multiple-conds-one-false',
+ 'multiple-conds-all-false'
+ ]
+ for rule_name in rules_triggered:
+ rule = rules_dict[rule_name]
+ self.assertTrue(
+ rule.is_triggered(conditions_dict, self.column_families),
+ repr(rule)
+ )
+ for rule_name in rules_not_triggered:
+ rule = rules_dict[rule_name]
+ self.assertFalse(
+ rule.is_triggered(conditions_dict, self.column_families),
+ repr(rule)
+ )
+
+
+class TestSanityChecker(unittest.TestCase):
+ def setUp(self):
+ this_path = os.path.abspath(os.path.dirname(__file__))
+ ini_path = os.path.join(this_path, 'input_files/rules_err1.ini')
+ db_rules = RulesSpec(ini_path)
+ db_rules.load_rules_from_spec()
+ self.rules_dict = db_rules.get_rules_dict()
+ self.conditions_dict = db_rules.get_conditions_dict()
+ self.suggestions_dict = db_rules.get_suggestions_dict()
+
+ def test_rule_missing_suggestions(self):
+ regex = '.*rule must have at least one suggestion.*'
+ with self.assertRaisesRegex(ValueError, regex):
+ self.rules_dict['missing-suggestions'].perform_checks()
+
+ def test_rule_missing_conditions(self):
+ regex = '.*rule must have at least one condition.*'
+ with self.assertRaisesRegex(ValueError, regex):
+ self.rules_dict['missing-conditions'].perform_checks()
+
+ def test_condition_missing_regex(self):
+ regex = '.*provide regex for log condition.*'
+ with self.assertRaisesRegex(ValueError, regex):
+ self.conditions_dict['missing-regex'].perform_checks()
+
+ def test_condition_missing_options(self):
+ regex = '.*options missing in condition.*'
+ with self.assertRaisesRegex(ValueError, regex):
+ self.conditions_dict['missing-options'].perform_checks()
+
+ def test_condition_missing_expression(self):
+ regex = '.*expression missing in condition.*'
+ with self.assertRaisesRegex(ValueError, regex):
+ self.conditions_dict['missing-expression'].perform_checks()
+
+ def test_suggestion_missing_option(self):
+ regex = '.*provide option or description.*'
+ with self.assertRaisesRegex(ValueError, regex):
+ self.suggestions_dict['missing-option'].perform_checks()
+
+ def test_suggestion_missing_description(self):
+ regex = '.*provide option or description.*'
+ with self.assertRaisesRegex(ValueError, regex):
+ self.suggestions_dict['missing-description'].perform_checks()
+
+
+class TestParsingErrors(unittest.TestCase):
+ def setUp(self):
+ self.this_path = os.path.abspath(os.path.dirname(__file__))
+
+ def test_condition_missing_source(self):
+ ini_path = os.path.join(self.this_path, 'input_files/rules_err2.ini')
+ db_rules = RulesSpec(ini_path)
+ regex = '.*provide source for condition.*'
+ with self.assertRaisesRegex(NotImplementedError, regex):
+ db_rules.load_rules_from_spec()
+
+ def test_suggestion_missing_action(self):
+ ini_path = os.path.join(self.this_path, 'input_files/rules_err3.ini')
+ db_rules = RulesSpec(ini_path)
+ regex = '.*provide action for option.*'
+ with self.assertRaisesRegex(ValueError, regex):
+ db_rules.load_rules_from_spec()
+
+ def test_section_no_name(self):
+ ini_path = os.path.join(self.this_path, 'input_files/rules_err4.ini')
+ db_rules = RulesSpec(ini_path)
+ regex = 'Parsing error: needed section header:.*'
+ with self.assertRaisesRegex(ValueError, regex):
+ db_rules.load_rules_from_spec()
+
+
+if __name__ == '__main__':
+ unittest.main()