summaryrefslogtreecommitdiffstats
path: root/src/rocksdb/java
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/rocksdb/java/CMakeLists.txt500
-rw-r--r--src/rocksdb/java/HISTORY-JAVA.md86
-rw-r--r--src/rocksdb/java/Makefile319
-rw-r--r--src/rocksdb/java/RELEASE.md59
-rw-r--r--src/rocksdb/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java1653
-rw-r--r--src/rocksdb/java/crossbuild/Vagrantfile51
-rwxr-xr-xsrc/rocksdb/java/crossbuild/build-linux-alpine.sh70
-rwxr-xr-xsrc/rocksdb/java/crossbuild/build-linux-centos.sh38
-rwxr-xr-xsrc/rocksdb/java/crossbuild/build-linux.sh15
-rwxr-xr-xsrc/rocksdb/java/crossbuild/docker-build-linux-alpine.sh18
-rwxr-xr-xsrc/rocksdb/java/crossbuild/docker-build-linux-centos.sh34
-rwxr-xr-xsrc/rocksdb/java/jdb_bench.sh13
-rw-r--r--src/rocksdb/java/jmh/LICENSE-HEADER.txt5
-rw-r--r--src/rocksdb/java/jmh/README.md18
-rw-r--r--src/rocksdb/java/jmh/pom.xml138
-rw-r--r--src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/ComparatorBenchmarks.java139
-rw-r--r--src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/GetBenchmarks.java139
-rw-r--r--src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/MultiGetBenchmarks.java158
-rw-r--r--src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/PutBenchmarks.java112
-rw-r--r--src/rocksdb/java/jmh/src/main/java/org/rocksdb/util/FileUtils.java59
-rw-r--r--src/rocksdb/java/jmh/src/main/java/org/rocksdb/util/KVUtils.java58
-rw-r--r--src/rocksdb/java/rocksjni.pom150
-rw-r--r--src/rocksdb/java/rocksjni/backupablejni.cc363
-rw-r--r--src/rocksdb/java/rocksjni/backupenginejni.cc277
-rw-r--r--src/rocksdb/java/rocksjni/cassandra_compactionfilterjni.cc24
-rw-r--r--src/rocksdb/java/rocksjni/cassandra_value_operator.cc48
-rw-r--r--src/rocksdb/java/rocksjni/checkpoint.cc68
-rw-r--r--src/rocksdb/java/rocksjni/clock_cache.cc40
-rw-r--r--src/rocksdb/java/rocksjni/columnfamilyhandle.cc72
-rw-r--r--src/rocksdb/java/rocksjni/compact_range_options.cc211
-rw-r--r--src/rocksdb/java/rocksjni/compaction_filter.cc28
-rw-r--r--src/rocksdb/java/rocksjni/compaction_filter_factory.cc40
-rw-r--r--src/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.cc76
-rw-r--r--src/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.h35
-rw-r--r--src/rocksdb/java/rocksjni/compaction_job_info.cc231
-rw-r--r--src/rocksdb/java/rocksjni/compaction_job_stats.cc361
-rw-r--r--src/rocksdb/java/rocksjni/compaction_options.cc116
-rw-r--r--src/rocksdb/java/rocksjni/compaction_options_fifo.cc81
-rw-r--r--src/rocksdb/java/rocksjni/compaction_options_universal.cc209
-rw-r--r--src/rocksdb/java/rocksjni/comparator.cc57
-rw-r--r--src/rocksdb/java/rocksjni/comparatorjnicallback.cc638
-rw-r--r--src/rocksdb/java/rocksjni/comparatorjnicallback.h137
-rw-r--r--src/rocksdb/java/rocksjni/compression_options.cc164
-rw-r--r--src/rocksdb/java/rocksjni/env.cc238
-rw-r--r--src/rocksdb/java/rocksjni/env_options.cc298
-rw-r--r--src/rocksdb/java/rocksjni/filter.cc45
-rw-r--r--src/rocksdb/java/rocksjni/ingest_external_file_options.cc196
-rw-r--r--src/rocksdb/java/rocksjni/iterator.cc252
-rw-r--r--src/rocksdb/java/rocksjni/jnicallback.cc53
-rw-r--r--src/rocksdb/java/rocksjni/jnicallback.h31
-rw-r--r--src/rocksdb/java/rocksjni/loggerjnicallback.cc297
-rw-r--r--src/rocksdb/java/rocksjni/loggerjnicallback.h49
-rw-r--r--src/rocksdb/java/rocksjni/lru_cache.cc43
-rw-r--r--src/rocksdb/java/rocksjni/memory_util.cc107
-rw-r--r--src/rocksdb/java/rocksjni/memtablejni.cc93
-rw-r--r--src/rocksdb/java/rocksjni/merge_operator.cc81
-rw-r--r--src/rocksdb/java/rocksjni/native_comparator_wrapper_test.cc44
-rw-r--r--src/rocksdb/java/rocksjni/optimistic_transaction_db.cc284
-rw-r--r--src/rocksdb/java/rocksjni/optimistic_transaction_options.cc78
-rw-r--r--src/rocksdb/java/rocksjni/options.cc7240
-rw-r--r--src/rocksdb/java/rocksjni/options_util.cc134
-rw-r--r--src/rocksdb/java/rocksjni/persistent_cache.cc57
-rw-r--r--src/rocksdb/java/rocksjni/portal.h7534
-rw-r--r--src/rocksdb/java/rocksjni/ratelimiterjni.cc127
-rw-r--r--src/rocksdb/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc23
-rw-r--r--src/rocksdb/java/rocksjni/restorejni.cc40
-rw-r--r--src/rocksdb/java/rocksjni/rocks_callback_object.cc31
-rw-r--r--src/rocksdb/java/rocksjni/rocksdb_exception_test.cc82
-rw-r--r--src/rocksdb/java/rocksjni/rocksjni.cc3406
-rw-r--r--src/rocksdb/java/rocksjni/slice.cc360
-rw-r--r--src/rocksdb/java/rocksjni/snapshot.cc27
-rw-r--r--src/rocksdb/java/rocksjni/sst_file_manager.cc247
-rw-r--r--src/rocksdb/java/rocksjni/sst_file_reader_iterator.cc253
-rw-r--r--src/rocksdb/java/rocksjni/sst_file_readerjni.cc116
-rw-r--r--src/rocksdb/java/rocksjni/sst_file_writerjni.cc308
-rw-r--r--src/rocksdb/java/rocksjni/statistics.cc264
-rw-r--r--src/rocksdb/java/rocksjni/statisticsjni.cc32
-rw-r--r--src/rocksdb/java/rocksjni/statisticsjni.h34
-rw-r--r--src/rocksdb/java/rocksjni/table.cc150
-rw-r--r--src/rocksdb/java/rocksjni/table_filter.cc25
-rw-r--r--src/rocksdb/java/rocksjni/table_filter_jnicallback.cc66
-rw-r--r--src/rocksdb/java/rocksjni/table_filter_jnicallback.h36
-rw-r--r--src/rocksdb/java/rocksjni/thread_status.cc125
-rw-r--r--src/rocksdb/java/rocksjni/trace_writer.cc23
-rw-r--r--src/rocksdb/java/rocksjni/trace_writer_jnicallback.cc115
-rw-r--r--src/rocksdb/java/rocksjni/trace_writer_jnicallback.h36
-rw-r--r--src/rocksdb/java/rocksjni/transaction.cc1646
-rw-r--r--src/rocksdb/java/rocksjni/transaction_db.cc463
-rw-r--r--src/rocksdb/java/rocksjni/transaction_db_options.cc170
-rw-r--r--src/rocksdb/java/rocksjni/transaction_log.cc79
-rw-r--r--src/rocksdb/java/rocksjni/transaction_notifier.cc43
-rw-r--r--src/rocksdb/java/rocksjni/transaction_notifier_jnicallback.cc39
-rw-r--r--src/rocksdb/java/rocksjni/transaction_notifier_jnicallback.h42
-rw-r--r--src/rocksdb/java/rocksjni/transaction_options.cc191
-rw-r--r--src/rocksdb/java/rocksjni/ttl.cc207
-rw-r--r--src/rocksdb/java/rocksjni/wal_filter.cc23
-rw-r--r--src/rocksdb/java/rocksjni/wal_filter_jnicallback.cc144
-rw-r--r--src/rocksdb/java/rocksjni/wal_filter_jnicallback.h42
-rw-r--r--src/rocksdb/java/rocksjni/write_batch.cc674
-rw-r--r--src/rocksdb/java/rocksjni/write_batch_test.cc198
-rw-r--r--src/rocksdb/java/rocksjni/write_batch_with_index.cc862
-rw-r--r--src/rocksdb/java/rocksjni/write_buffer_manager.cc42
-rw-r--r--src/rocksdb/java/rocksjni/writebatchhandlerjnicallback.cc548
-rw-r--r--src/rocksdb/java/rocksjni/writebatchhandlerjnicallback.h89
-rw-r--r--src/rocksdb/java/samples/src/main/java/OptimisticTransactionSample.java184
-rw-r--r--src/rocksdb/java/samples/src/main/java/RocksDBColumnFamilySample.java78
-rw-r--r--src/rocksdb/java/samples/src/main/java/RocksDBSample.java303
-rw-r--r--src/rocksdb/java/samples/src/main/java/TransactionSample.java183
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java59
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java77
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/AbstractComparator.java124
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java125
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java67
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/AbstractMutableOptions.java256
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/AbstractNativeReference.java76
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/AbstractRocksIterator.java126
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/AbstractSlice.java191
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/AbstractTableFilter.java20
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/AbstractTraceWriter.java70
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java54
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/AbstractWalFilter.java49
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/AbstractWriteBatch.java216
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/AccessHint.java53
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java465
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java464
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/BackupEngine.java259
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/BackupInfo.java76
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/BackupableDBOptions.java465
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java987
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/BloomFilter.java79
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/BuiltinComparator.java20
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/Cache.java13
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java19
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java25
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/Checkpoint.java66
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/ChecksumType.java39
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/ClockCache.java59
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java109
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java115
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyMetaData.java70
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java1001
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java449
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/CompactRangeOptions.java237
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/CompactionJobInfo.java159
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/CompactionJobStats.java295
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptions.java121
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java89
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java273
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/CompactionPriority.java73
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/CompactionReason.java115
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/CompactionStopStyle.java55
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/CompactionStyle.java80
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/ComparatorOptions.java133
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/ComparatorType.java48
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/CompressionOptions.java151
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/CompressionType.java99
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/DBOptions.java1403
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/DBOptionsInterface.java1564
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/DataBlockIndexType.java32
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/DbPath.java47
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/DirectSlice.java132
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/EncodingType.java55
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/Env.java167
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/EnvOptions.java366
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/Experimental.java23
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/Filter.java36
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/FlushOptions.java90
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java174
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java106
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/HdfsEnv.java27
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/HistogramData.java75
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/HistogramType.java198
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/Holder.java46
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/IndexType.java41
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/InfoLogLevel.java49
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java227
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/LRUCache.java82
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/LevelMetaData.java56
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/LiveFileMetaData.java55
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/LogFile.java75
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/Logger.java122
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/MemTableConfig.java29
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/MemoryUsageType.java72
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/MemoryUtil.java60
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/MergeOperator.java18
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java469
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java158
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptions.java325
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java443
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/MutableOptionKey.java16
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/MutableOptionValue.java376
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/NativeComparatorWrapper.java59
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/NativeLibraryLoader.java125
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/OperationStage.java59
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/OperationType.java54
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java226
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java53
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/Options.java2183
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/OptionsUtil.java142
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/PersistentCache.java26
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/PlainTableConfig.java251
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/Priority.java49
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/Range.java19
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/RateLimiter.java227
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/RateLimiterMode.java52
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/ReadOptions.java622
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/ReadTier.java49
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java18
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/RestoreOptions.java32
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/ReusedSynchronisationType.java65
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/RocksCallbackObject.java50
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/RocksDB.java4522
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/RocksDBException.java44
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/RocksEnv.java32
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/RocksIterator.java118
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/RocksIteratorInterface.java117
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/RocksMemEnv.java39
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/RocksMutableObject.java87
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/RocksObject.java41
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/SizeApproximationFlag.java31
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java51
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/Slice.java136
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/Snapshot.java41
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/SstFileManager.java251
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/SstFileMetaData.java162
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/SstFileReader.java82
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/SstFileReaderIterator.java120
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/SstFileWriter.java290
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/StateType.java53
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/Statistics.java152
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollector.java111
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java32
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/StatsCollectorInput.java35
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/StatsLevel.java65
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/Status.java138
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/StringAppendOperator.java24
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/TableFilter.java21
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/TableFormatConfig.java22
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/TableProperties.java366
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/ThreadStatus.java224
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/ThreadType.java65
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/TickerType.java760
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/TimedEnv.java30
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/TraceOptions.java32
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/TraceWriter.java36
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/Transaction.java2012
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/TransactionDB.java404
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/TransactionDBOptions.java217
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/TransactionLogIterator.java112
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/TransactionOptions.java189
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/TransactionalDB.java68
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/TransactionalOptions.java31
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/TtlDB.java245
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java62
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/UInt64AddOperator.java19
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/VectorMemTableConfig.java46
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/WALRecoveryMode.java83
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/WBWIRocksIterator.java197
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/WalFileType.java55
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/WalFilter.java87
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/WalProcessingOption.java54
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/WriteBatch.java394
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/WriteBatchInterface.java305
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java318
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/WriteBufferManager.java33
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/WriteOptions.java219
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/util/ByteUtil.java46
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/util/BytewiseComparator.java121
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/util/Environment.java152
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/util/IntComparator.java67
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java88
-rw-r--r--src/rocksdb/java/src/main/java/org/rocksdb/util/SizeUnit.java16
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/AbstractTransactionTest.java902
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/BackupEngineTest.java261
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java351
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java393
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/BuiltinComparatorTest.java145
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/CheckPointTest.java83
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/ClockCacheTest.java26
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java625
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyTest.java734
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java98
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/CompactionFilterFactoryTest.java68
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/CompactionJobInfoTest.java114
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/CompactionJobStatsTest.java196
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java35
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsTest.java52
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java80
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/CompactionPriorityTest.java31
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java31
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java58
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/CompressionOptionsTest.java71
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/CompressionTypesTest.java20
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/DBOptionsTest.java813
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/DefaultEnvTest.java113
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/DirectSliceTest.java93
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/EnvOptionsTest.java145
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/FilterTest.java39
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/FlushOptionsTest.java31
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/FlushTest.java49
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/HdfsEnvTest.java45
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/InfoLogLevelTest.java109
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java107
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/KeyMayExistTest.java192
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/LRUCacheTest.java27
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/LoggerTest.java239
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/MemTableTest.java111
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/MemoryUtilTest.java143
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/MergeTest.java440
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/MixedOptionsTest.java55
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java88
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/MutableDBOptionsTest.java85
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/NativeComparatorWrapperTest.java92
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java41
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionDBTest.java131
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionOptionsTest.java38
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionTest.java350
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/OptionsTest.java1311
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/OptionsUtilTest.java126
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/PlainTableConfigTest.java89
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/PlatformRandomHelper.java58
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/RateLimiterTest.java65
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/ReadOnlyTest.java305
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/ReadOptionsTest.java323
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/RocksDBExceptionTest.java115
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/RocksDBTest.java1669
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/RocksIteratorTest.java203
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/RocksMemEnvTest.java146
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/RocksNativeLibraryResource.java18
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/SliceTest.java80
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/SnapshotTest.java169
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/SstFileManagerTest.java66
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/SstFileReaderTest.java155
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/SstFileWriterTest.java241
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java55
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/StatisticsTest.java168
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/StatsCallbackMock.java20
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/TableFilterTest.java106
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/TimedEnvTest.java43
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/TransactionDBOptionsTest.java64
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/TransactionDBTest.java178
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java139
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/TransactionOptionsTest.java72
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/TransactionTest.java308
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/TtlDBTest.java112
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/Types.java43
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java22
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/WalFilterTest.java165
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java76
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchTest.java528
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java104
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java566
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/WriteOptionsTest.java69
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/test/RemoveEmptyValueCompactionFilterFactory.java21
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java174
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorIntTest.java267
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java519
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java172
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/util/EnvironmentTest.java259
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/util/IntComparatorTest.java266
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/util/JNIComparatorTest.java174
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/util/ReverseBytewiseComparatorIntTest.java270
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/util/SizeUnitTest.java27
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/util/TestUtil.java61
-rw-r--r--src/rocksdb/java/src/test/java/org/rocksdb/util/WriteBatchGetter.java134
365 files changed, 88525 insertions, 0 deletions
diff --git a/src/rocksdb/java/CMakeLists.txt b/src/rocksdb/java/CMakeLists.txt
new file mode 100644
index 000000000..04d592bcd
--- /dev/null
+++ b/src/rocksdb/java/CMakeLists.txt
@@ -0,0 +1,500 @@
+cmake_minimum_required(VERSION 3.4)
+
+if(${CMAKE_VERSION} VERSION_LESS "3.11.4")
+ message("Please consider switching to CMake 3.11.4 or newer")
+endif()
+
+set(JNI_NATIVE_SOURCES
+ rocksjni/backupablejni.cc
+ rocksjni/backupenginejni.cc
+ rocksjni/cassandra_compactionfilterjni.cc
+ rocksjni/cassandra_value_operator.cc
+ rocksjni/checkpoint.cc
+ rocksjni/clock_cache.cc
+ rocksjni/columnfamilyhandle.cc
+ rocksjni/compaction_filter.cc
+ rocksjni/compaction_filter_factory.cc
+ rocksjni/compaction_filter_factory_jnicallback.cc
+ rocksjni/compaction_job_info.cc
+ rocksjni/compaction_job_stats.cc
+ rocksjni/compaction_options.cc
+ rocksjni/compaction_options_fifo.cc
+ rocksjni/compaction_options_universal.cc
+ rocksjni/compact_range_options.cc
+ rocksjni/comparator.cc
+ rocksjni/comparatorjnicallback.cc
+ rocksjni/compression_options.cc
+ rocksjni/env.cc
+ rocksjni/env_options.cc
+ rocksjni/filter.cc
+ rocksjni/ingest_external_file_options.cc
+ rocksjni/iterator.cc
+ rocksjni/jnicallback.cc
+ rocksjni/loggerjnicallback.cc
+ rocksjni/lru_cache.cc
+ rocksjni/memory_util.cc
+ rocksjni/memtablejni.cc
+ rocksjni/merge_operator.cc
+ rocksjni/native_comparator_wrapper_test.cc
+ rocksjni/optimistic_transaction_db.cc
+ rocksjni/optimistic_transaction_options.cc
+ rocksjni/options.cc
+ rocksjni/options_util.cc
+ rocksjni/persistent_cache.cc
+ rocksjni/ratelimiterjni.cc
+ rocksjni/remove_emptyvalue_compactionfilterjni.cc
+ rocksjni/restorejni.cc
+ rocksjni/rocks_callback_object.cc
+ rocksjni/rocksdb_exception_test.cc
+ rocksjni/rocksjni.cc
+ rocksjni/slice.cc
+ rocksjni/snapshot.cc
+ rocksjni/sst_file_manager.cc
+ rocksjni/sst_file_writerjni.cc
+ rocksjni/sst_file_readerjni.cc
+ rocksjni/sst_file_reader_iterator.cc
+ rocksjni/statistics.cc
+ rocksjni/statisticsjni.cc
+ rocksjni/table.cc
+ rocksjni/table_filter.cc
+ rocksjni/table_filter_jnicallback.cc
+ rocksjni/thread_status.cc
+ rocksjni/trace_writer.cc
+ rocksjni/trace_writer_jnicallback.cc
+ rocksjni/transaction.cc
+ rocksjni/transaction_db.cc
+ rocksjni/transaction_db_options.cc
+ rocksjni/transaction_log.cc
+ rocksjni/transaction_notifier.cc
+ rocksjni/transaction_notifier_jnicallback.cc
+ rocksjni/transaction_options.cc
+ rocksjni/ttl.cc
+ rocksjni/wal_filter.cc
+ rocksjni/wal_filter_jnicallback.cc
+ rocksjni/write_batch.cc
+ rocksjni/writebatchhandlerjnicallback.cc
+ rocksjni/write_batch_test.cc
+ rocksjni/write_batch_with_index.cc
+ rocksjni/write_buffer_manager.cc
+)
+
+set(JAVA_MAIN_CLASSES
+ src/main/java/org/rocksdb/AbstractCompactionFilter.java
+ src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java
+ src/main/java/org/rocksdb/AbstractComparator.java
+ src/main/java/org/rocksdb/AbstractImmutableNativeReference.java
+ src/main/java/org/rocksdb/AbstractMutableOptions.java
+ src/main/java/org/rocksdb/AbstractNativeReference.java
+ src/main/java/org/rocksdb/AbstractRocksIterator.java
+ src/main/java/org/rocksdb/AbstractSlice.java
+ src/main/java/org/rocksdb/AbstractTableFilter.java
+ src/main/java/org/rocksdb/AbstractTraceWriter.java
+ src/main/java/org/rocksdb/AbstractTransactionNotifier.java
+ src/main/java/org/rocksdb/AbstractWalFilter.java
+ src/main/java/org/rocksdb/AbstractWriteBatch.java
+ src/main/java/org/rocksdb/AccessHint.java
+ src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java
+ src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java
+ src/main/java/org/rocksdb/BackupableDBOptions.java
+ src/main/java/org/rocksdb/BackupEngine.java
+ src/main/java/org/rocksdb/BackupInfo.java
+ src/main/java/org/rocksdb/BlockBasedTableConfig.java
+ src/main/java/org/rocksdb/BloomFilter.java
+ src/main/java/org/rocksdb/BuiltinComparator.java
+ src/main/java/org/rocksdb/Cache.java
+ src/main/java/org/rocksdb/CassandraCompactionFilter.java
+ src/main/java/org/rocksdb/CassandraValueMergeOperator.java
+ src/main/java/org/rocksdb/Checkpoint.java
+ src/main/java/org/rocksdb/ChecksumType.java
+ src/main/java/org/rocksdb/ClockCache.java
+ src/main/java/org/rocksdb/ColumnFamilyDescriptor.java
+ src/main/java/org/rocksdb/ColumnFamilyHandle.java
+ src/main/java/org/rocksdb/ColumnFamilyMetaData.java
+ src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
+ src/main/java/org/rocksdb/ColumnFamilyOptions.java
+ src/main/java/org/rocksdb/CompactionJobInfo.java
+ src/main/java/org/rocksdb/CompactionJobStats.java
+ src/main/java/org/rocksdb/CompactionOptions.java
+ src/main/java/org/rocksdb/CompactionOptionsFIFO.java
+ src/main/java/org/rocksdb/CompactionOptionsUniversal.java
+ src/main/java/org/rocksdb/CompactionPriority.java
+ src/main/java/org/rocksdb/CompactionReason.java
+ src/main/java/org/rocksdb/CompactRangeOptions.java
+ src/main/java/org/rocksdb/CompactionStopStyle.java
+ src/main/java/org/rocksdb/CompactionStyle.java
+ src/main/java/org/rocksdb/ComparatorOptions.java
+ src/main/java/org/rocksdb/ComparatorType.java
+ src/main/java/org/rocksdb/CompressionOptions.java
+ src/main/java/org/rocksdb/CompressionType.java
+ src/main/java/org/rocksdb/DataBlockIndexType.java
+ src/main/java/org/rocksdb/DBOptionsInterface.java
+ src/main/java/org/rocksdb/DBOptions.java
+ src/main/java/org/rocksdb/DbPath.java
+ src/main/java/org/rocksdb/DirectSlice.java
+ src/main/java/org/rocksdb/EncodingType.java
+ src/main/java/org/rocksdb/Env.java
+ src/main/java/org/rocksdb/EnvOptions.java
+ src/main/java/org/rocksdb/Experimental.java
+ src/main/java/org/rocksdb/Filter.java
+ src/main/java/org/rocksdb/FlushOptions.java
+ src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java
+ src/main/java/org/rocksdb/HashSkipListMemTableConfig.java
+ src/main/java/org/rocksdb/HdfsEnv.java
+ src/main/java/org/rocksdb/HistogramData.java
+ src/main/java/org/rocksdb/HistogramType.java
+ src/main/java/org/rocksdb/Holder.java
+ src/main/java/org/rocksdb/IndexType.java
+ src/main/java/org/rocksdb/InfoLogLevel.java
+ src/main/java/org/rocksdb/IngestExternalFileOptions.java
+ src/main/java/org/rocksdb/LevelMetaData.java
+ src/main/java/org/rocksdb/LiveFileMetaData.java
+ src/main/java/org/rocksdb/LogFile.java
+ src/main/java/org/rocksdb/Logger.java
+ src/main/java/org/rocksdb/LRUCache.java
+ src/main/java/org/rocksdb/MemoryUsageType.java
+ src/main/java/org/rocksdb/MemoryUtil.java
+ src/main/java/org/rocksdb/MemTableConfig.java
+ src/main/java/org/rocksdb/MergeOperator.java
+ src/main/java/org/rocksdb/MutableColumnFamilyOptions.java
+ src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java
+ src/main/java/org/rocksdb/MutableDBOptions.java
+ src/main/java/org/rocksdb/MutableDBOptionsInterface.java
+ src/main/java/org/rocksdb/MutableOptionKey.java
+ src/main/java/org/rocksdb/MutableOptionValue.java
+ src/main/java/org/rocksdb/NativeComparatorWrapper.java
+ src/main/java/org/rocksdb/NativeLibraryLoader.java
+ src/main/java/org/rocksdb/OperationStage.java
+ src/main/java/org/rocksdb/OperationType.java
+ src/main/java/org/rocksdb/OptimisticTransactionDB.java
+ src/main/java/org/rocksdb/OptimisticTransactionOptions.java
+ src/main/java/org/rocksdb/Options.java
+ src/main/java/org/rocksdb/OptionsUtil.java
+ src/main/java/org/rocksdb/PersistentCache.java
+ src/main/java/org/rocksdb/PlainTableConfig.java
+ src/main/java/org/rocksdb/Priority.java
+ src/main/java/org/rocksdb/Range.java
+ src/main/java/org/rocksdb/RateLimiter.java
+ src/main/java/org/rocksdb/RateLimiterMode.java
+ src/main/java/org/rocksdb/ReadOptions.java
+ src/main/java/org/rocksdb/ReadTier.java
+ src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java
+ src/main/java/org/rocksdb/RestoreOptions.java
+ src/main/java/org/rocksdb/ReusedSynchronisationType.java
+ src/main/java/org/rocksdb/RocksCallbackObject.java
+ src/main/java/org/rocksdb/RocksDBException.java
+ src/main/java/org/rocksdb/RocksDB.java
+ src/main/java/org/rocksdb/RocksEnv.java
+ src/main/java/org/rocksdb/RocksIteratorInterface.java
+ src/main/java/org/rocksdb/RocksIterator.java
+ src/main/java/org/rocksdb/RocksMemEnv.java
+ src/main/java/org/rocksdb/RocksMutableObject.java
+ src/main/java/org/rocksdb/RocksObject.java
+ src/main/java/org/rocksdb/SizeApproximationFlag.java
+ src/main/java/org/rocksdb/SkipListMemTableConfig.java
+ src/main/java/org/rocksdb/Slice.java
+ src/main/java/org/rocksdb/Snapshot.java
+ src/main/java/org/rocksdb/SstFileManager.java
+ src/main/java/org/rocksdb/SstFileMetaData.java
+ src/main/java/org/rocksdb/SstFileWriter.java
+ src/main/java/org/rocksdb/SstFileReader.java
+ src/main/java/org/rocksdb/SstFileReaderIterator.java
+ src/main/java/org/rocksdb/StateType.java
+ src/main/java/org/rocksdb/StatisticsCollectorCallback.java
+ src/main/java/org/rocksdb/StatisticsCollector.java
+ src/main/java/org/rocksdb/Statistics.java
+ src/main/java/org/rocksdb/StatsCollectorInput.java
+ src/main/java/org/rocksdb/StatsLevel.java
+ src/main/java/org/rocksdb/Status.java
+ src/main/java/org/rocksdb/StringAppendOperator.java
+ src/main/java/org/rocksdb/TableFilter.java
+ src/main/java/org/rocksdb/TableProperties.java
+ src/main/java/org/rocksdb/TableFormatConfig.java
+ src/main/java/org/rocksdb/ThreadType.java
+ src/main/java/org/rocksdb/ThreadStatus.java
+ src/main/java/org/rocksdb/TickerType.java
+ src/main/java/org/rocksdb/TimedEnv.java
+ src/main/java/org/rocksdb/TraceOptions.java
+ src/main/java/org/rocksdb/TraceWriter.java
+ src/main/java/org/rocksdb/TransactionalDB.java
+ src/main/java/org/rocksdb/TransactionalOptions.java
+ src/main/java/org/rocksdb/TransactionDB.java
+ src/main/java/org/rocksdb/TransactionDBOptions.java
+ src/main/java/org/rocksdb/Transaction.java
+ src/main/java/org/rocksdb/TransactionLogIterator.java
+ src/main/java/org/rocksdb/TransactionOptions.java
+ src/main/java/org/rocksdb/TtlDB.java
+ src/main/java/org/rocksdb/TxnDBWritePolicy.java
+ src/main/java/org/rocksdb/VectorMemTableConfig.java
+ src/main/java/org/rocksdb/WalFileType.java
+ src/main/java/org/rocksdb/WalFilter.java
+ src/main/java/org/rocksdb/WalProcessingOption.java
+ src/main/java/org/rocksdb/WALRecoveryMode.java
+ src/main/java/org/rocksdb/WBWIRocksIterator.java
+ src/main/java/org/rocksdb/WriteBatch.java
+ src/main/java/org/rocksdb/WriteBatchInterface.java
+ src/main/java/org/rocksdb/WriteBatchWithIndex.java
+ src/main/java/org/rocksdb/WriteOptions.java
+ src/main/java/org/rocksdb/WriteBufferManager.java
+ src/main/java/org/rocksdb/util/ByteUtil.java
+ src/main/java/org/rocksdb/util/BytewiseComparator.java
+ src/main/java/org/rocksdb/util/Environment.java
+ src/main/java/org/rocksdb/util/IntComparator.java
+ src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java
+ src/main/java/org/rocksdb/util/SizeUnit.java
+ src/main/java/org/rocksdb/UInt64AddOperator.java
+)
+
+set(JAVA_TEST_CLASSES
+ src/test/java/org/rocksdb/BackupEngineTest.java
+ src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java
+ src/test/java/org/rocksdb/NativeComparatorWrapperTest.java
+ src/test/java/org/rocksdb/PlatformRandomHelper.java
+ src/test/java/org/rocksdb/RocksDBExceptionTest.java
+ src/test/java/org/rocksdb/RocksNativeLibraryResource.java
+ src/test/java/org/rocksdb/SnapshotTest.java
+ src/test/java/org/rocksdb/WriteBatchTest.java
+ src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java
+ src/test/java/org/rocksdb/util/WriteBatchGetter.java
+)
+
+include(FindJava)
+include(UseJava)
+find_package(JNI)
+
+include_directories(${JNI_INCLUDE_DIRS})
+include_directories(${PROJECT_SOURCE_DIR}/java)
+
+set(JAVA_TEST_LIBDIR ${PROJECT_SOURCE_DIR}/java/test-libs)
+set(JAVA_TMP_JAR ${JAVA_TEST_LIBDIR}/tmp.jar)
+set(JAVA_JUNIT_JAR ${JAVA_TEST_LIBDIR}/junit-4.12.jar)
+set(JAVA_HAMCR_JAR ${JAVA_TEST_LIBDIR}/hamcrest-core-1.3.jar)
+set(JAVA_MOCKITO_JAR ${JAVA_TEST_LIBDIR}/mockito-all-1.10.19.jar)
+set(JAVA_CGLIB_JAR ${JAVA_TEST_LIBDIR}/cglib-2.2.2.jar)
+set(JAVA_ASSERTJ_JAR ${JAVA_TEST_LIBDIR}/assertj-core-1.7.1.jar)
+set(JAVA_TESTCLASSPATH ${JAVA_JUNIT_JAR} ${JAVA_HAMCR_JAR} ${JAVA_MOCKITO_JAR} ${JAVA_CGLIB_JAR} ${JAVA_ASSERTJ_JAR})
+
+set(JNI_OUTPUT_DIR ${PROJECT_SOURCE_DIR}/java/include)
+file(MAKE_DIRECTORY ${JNI_OUTPUT_DIR})
+
+if(${Java_VERSION_MAJOR} VERSION_GREATER_EQUAL "10" AND ${CMAKE_VERSION} VERSION_LESS "3.11.4")
+ # Java 10 and newer don't have javah, but the alternative GENERATE_NATIVE_HEADERS requires CMake 3.11.4 or newer
+ message(FATAL_ERROR "Detected Java 10 or newer (${Java_VERSION_STRING}), to build with CMake please upgrade CMake to 3.11.4 or newer")
+
+elseif(${CMAKE_VERSION} VERSION_LESS "3.11.4" OR (${Java_VERSION_MINOR} STREQUAL "7" AND ${Java_VERSION_MAJOR} STREQUAL "1"))
+ # Old CMake or Java 1.7 prepare the JAR...
+ message("Preparing Jar for Java 7")
+ add_jar(
+ rocksdbjni_classes
+ SOURCES
+ ${JAVA_MAIN_CLASSES}
+ ${JAVA_TEST_CLASSES}
+ INCLUDE_JARS ${JAVA_TESTCLASSPATH}
+ )
+
+else ()
+ # Java 1.8 or newer prepare the JAR...
+ message("Preparing Jar for JDK ${Java_VERSION_STRING}")
+ add_jar(
+ rocksdbjni_classes
+ SOURCES
+ ${JAVA_MAIN_CLASSES}
+ ${JAVA_TEST_CLASSES}
+ INCLUDE_JARS ${JAVA_TESTCLASSPATH}
+ GENERATE_NATIVE_HEADERS rocksdbjni_headers DESTINATION ${JNI_OUTPUT_DIR}
+ )
+
+endif()
+
+if(NOT EXISTS ${PROJECT_SOURCE_DIR}/java/classes)
+ file(MAKE_DIRECTORY ${PROJECT_SOURCE_DIR}/java/classes)
+endif()
+
+if(NOT EXISTS ${JAVA_TEST_LIBDIR})
+ file(MAKE_DIRECTORY mkdir ${JAVA_TEST_LIBDIR})
+endif()
+
+if (DEFINED CUSTOM_DEPS_URL)
+ set(DEPS_URL ${CUSTOM_DEPS_URL}/)
+else ()
+ # Using a Facebook AWS account for S3 storage. (maven.org has a history
+ # of failing in Travis builds.)
+ set(DEPS_URL "https://rocksdb-deps.s3-us-west-2.amazonaws.com/jars")
+endif()
+
+if(NOT EXISTS ${JAVA_JUNIT_JAR})
+ message("Downloading ${JAVA_JUNIT_JAR}")
+ file(DOWNLOAD ${DEPS_URL}/junit-4.12.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
+ list(GET downloadStatus 0 error_code)
+ if(NOT error_code EQUAL 0)
+ message(FATAL_ERROR "Failed downloading ${JAVA_JUNIT_JAR}")
+ endif()
+ file(RENAME ${JAVA_TMP_JAR} ${JAVA_JUNIT_JAR})
+endif()
+if(NOT EXISTS ${JAVA_HAMCR_JAR})
+ message("Downloading ${JAVA_HAMCR_JAR}")
+ file(DOWNLOAD ${DEPS_URL}/hamcrest-core-1.3.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
+ list(GET downloadStatus 0 error_code)
+ if(NOT error_code EQUAL 0)
+ message(FATAL_ERROR "Failed downloading ${JAVA_HAMCR_JAR}")
+ endif()
+ file(RENAME ${JAVA_TMP_JAR} ${JAVA_HAMCR_JAR})
+endif()
+if(NOT EXISTS ${JAVA_MOCKITO_JAR})
+ message("Downloading ${JAVA_MOCKITO_JAR}")
+ file(DOWNLOAD ${DEPS_URL}/mockito-all-1.10.19.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
+ list(GET downloadStatus 0 error_code)
+ if(NOT error_code EQUAL 0)
+ message(FATAL_ERROR "Failed downloading ${JAVA_MOCKITO_JAR}")
+ endif()
+ file(RENAME ${JAVA_TMP_JAR} ${JAVA_MOCKITO_JAR})
+endif()
+if(NOT EXISTS ${JAVA_CGLIB_JAR})
+ message("Downloading ${JAVA_CGLIB_JAR}")
+ file(DOWNLOAD ${DEPS_URL}/cglib-2.2.2.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
+ list(GET downloadStatus 0 error_code)
+ if(NOT error_code EQUAL 0)
+ message(FATAL_ERROR "Failed downloading ${JAVA_CGLIB_JAR}")
+ endif()
+ file(RENAME ${JAVA_TMP_JAR} ${JAVA_CGLIB_JAR})
+endif()
+if(NOT EXISTS ${JAVA_ASSERTJ_JAR})
+ message("Downloading ${JAVA_ASSERTJ_JAR}")
+ file(DOWNLOAD ${DEPS_URL}/assertj-core-1.7.1.jar ${JAVA_TMP_JAR} STATUS downloadStatus)
+ list(GET downloadStatus 0 error_code)
+ if(NOT error_code EQUAL 0)
+ message(FATAL_ERROR "Failed downloading ${JAVA_ASSERTJ_JAR}")
+ endif()
+ file(RENAME ${JAVA_TMP_JAR} ${JAVA_ASSERTJ_JAR})
+endif()
+
+if(${CMAKE_VERSION} VERSION_LESS "3.11.4" OR (${Java_VERSION_MINOR} STREQUAL "7" AND ${Java_VERSION_MAJOR} STREQUAL "1"))
+ # Old CMake or Java 1.7 ONLY generate JNI headers, Java 1.8+ JNI is handled in add_jar step above
+ message("Preparing JNI headers for Java 7")
+ set(NATIVE_JAVA_CLASSES
+ org.rocksdb.AbstractCompactionFilter
+ org.rocksdb.AbstractCompactionFilterFactory
+ org.rocksdb.AbstractComparator
+ org.rocksdb.AbstractImmutableNativeReference
+ org.rocksdb.AbstractNativeReference
+ org.rocksdb.AbstractRocksIterator
+ org.rocksdb.AbstractSlice
+ org.rocksdb.AbstractTableFilter
+ org.rocksdb.AbstractTraceWriter
+ org.rocksdb.AbstractTransactionNotifier
+ org.rocksdb.AbstractWalFilter
+ org.rocksdb.BackupableDBOptions
+ org.rocksdb.BackupEngine
+ org.rocksdb.BlockBasedTableConfig
+ org.rocksdb.BloomFilter
+ org.rocksdb.CassandraCompactionFilter
+ org.rocksdb.CassandraValueMergeOperator
+ org.rocksdb.Checkpoint
+ org.rocksdb.ClockCache
+ org.rocksdb.ColumnFamilyHandle
+ org.rocksdb.ColumnFamilyOptions
+ org.rocksdb.CompactionJobInfo
+ org.rocksdb.CompactionJobStats
+ org.rocksdb.CompactionOptions
+ org.rocksdb.CompactionOptionsFIFO
+ org.rocksdb.CompactionOptionsUniversal
+ org.rocksdb.CompactRangeOptions
+ org.rocksdb.ComparatorOptions
+ org.rocksdb.CompressionOptions
+ org.rocksdb.DBOptions
+ org.rocksdb.DirectSlice
+ org.rocksdb.Env
+ org.rocksdb.EnvOptions
+ org.rocksdb.Filter
+ org.rocksdb.FlushOptions
+ org.rocksdb.HashLinkedListMemTableConfig
+ org.rocksdb.HashSkipListMemTableConfig
+ org.rocksdb.HdfsEnv
+ org.rocksdb.IngestExternalFileOptions
+ org.rocksdb.Logger
+ org.rocksdb.LRUCache
+ org.rocksdb.MemoryUtil
+ org.rocksdb.MemTableConfig
+ org.rocksdb.NativeComparatorWrapper
+ org.rocksdb.NativeLibraryLoader
+ org.rocksdb.OptimisticTransactionDB
+ org.rocksdb.OptimisticTransactionOptions
+ org.rocksdb.Options
+ org.rocksdb.OptionsUtil
+ org.rocksdb.PersistentCache
+ org.rocksdb.PlainTableConfig
+ org.rocksdb.RateLimiter
+ org.rocksdb.ReadOptions
+ org.rocksdb.RemoveEmptyValueCompactionFilter
+ org.rocksdb.RestoreOptions
+ org.rocksdb.RocksCallbackObject
+ org.rocksdb.RocksDB
+ org.rocksdb.RocksEnv
+ org.rocksdb.RocksIterator
+ org.rocksdb.RocksIteratorInterface
+ org.rocksdb.RocksMemEnv
+ org.rocksdb.RocksMutableObject
+ org.rocksdb.RocksObject
+ org.rocksdb.SkipListMemTableConfig
+ org.rocksdb.Slice
+ org.rocksdb.Snapshot
+ org.rocksdb.SstFileManager
+ org.rocksdb.SstFileWriter
+ org.rocksdb.SstFileReader
+ org.rocksdb.SstFileReaderIterator
+ org.rocksdb.Statistics
+ org.rocksdb.StringAppendOperator
+ org.rocksdb.TableFormatConfig
+ org.rocksdb.ThreadStatus
+ org.rocksdb.TimedEnv
+ org.rocksdb.Transaction
+ org.rocksdb.TransactionDB
+ org.rocksdb.TransactionDBOptions
+ org.rocksdb.TransactionLogIterator
+ org.rocksdb.TransactionOptions
+ org.rocksdb.TtlDB
+ org.rocksdb.UInt64AddOperator
+ org.rocksdb.VectorMemTableConfig
+ org.rocksdb.WBWIRocksIterator
+ org.rocksdb.WriteBatch
+ org.rocksdb.WriteBatch.Handler
+ org.rocksdb.WriteBatchInterface
+ org.rocksdb.WriteBatchWithIndex
+ org.rocksdb.WriteOptions
+ org.rocksdb.NativeComparatorWrapperTest
+ org.rocksdb.RocksDBExceptionTest
+ org.rocksdb.SnapshotTest
+ org.rocksdb.WriteBatchTest
+ org.rocksdb.WriteBatchTestInternalHelper
+ org.rocksdb.WriteBufferManager
+ )
+
+ create_javah(
+ TARGET rocksdbjni_headers
+ CLASSES ${NATIVE_JAVA_CLASSES}
+ CLASSPATH rocksdbjni_classes ${JAVA_TESTCLASSPATH}
+ OUTPUT_DIR ${JNI_OUTPUT_DIR}
+ )
+endif()
+
+if(NOT MSVC)
+ set_property(TARGET ${ROCKSDB_STATIC_LIB} PROPERTY POSITION_INDEPENDENT_CODE ON)
+endif()
+
+set(ROCKSDBJNI_STATIC_LIB rocksdbjni${ARTIFACT_SUFFIX})
+add_library(${ROCKSDBJNI_STATIC_LIB} ${JNI_NATIVE_SOURCES})
+add_dependencies(${ROCKSDBJNI_STATIC_LIB} rocksdbjni_headers)
+target_link_libraries(${ROCKSDBJNI_STATIC_LIB} ${ROCKSDB_STATIC_LIB} ${ROCKSDB_LIB})
+
+if(NOT MINGW)
+ set(ROCKSDBJNI_SHARED_LIB rocksdbjni-shared${ARTIFACT_SUFFIX})
+ add_library(${ROCKSDBJNI_SHARED_LIB} SHARED ${JNI_NATIVE_SOURCES})
+ add_dependencies(${ROCKSDBJNI_SHARED_LIB} rocksdbjni_headers)
+ target_link_libraries(${ROCKSDBJNI_SHARED_LIB} ${ROCKSDB_STATIC_LIB} ${ROCKSDB_LIB})
+
+ set_target_properties(
+ ${ROCKSDBJNI_SHARED_LIB}
+ PROPERTIES
+ COMPILE_PDB_OUTPUT_DIRECTORY ${CMAKE_CFG_INTDIR}
+ COMPILE_PDB_NAME ${ROCKSDBJNI_STATIC_LIB}.pdb
+ )
+endif()
diff --git a/src/rocksdb/java/HISTORY-JAVA.md b/src/rocksdb/java/HISTORY-JAVA.md
new file mode 100644
index 000000000..731886a61
--- /dev/null
+++ b/src/rocksdb/java/HISTORY-JAVA.md
@@ -0,0 +1,86 @@
+# RocksJava Change Log
+
+## 3.13 (8/4/2015)
+### New Features
+* Exposed BackupEngine API.
+* Added CappedPrefixExtractor support. To use such extractor, simply call useCappedPrefixExtractor in either Options or ColumnFamilyOptions.
+* Added RemoveEmptyValueCompactionFilter.
+
+## 3.10.0 (3/24/2015)
+### New Features
+* Added compression per level API.
+* MemEnv is now available in RocksJava via RocksMemEnv class.
+* lz4 compression is now included in rocksjava static library when running `make rocksdbjavastatic`.
+
+### Public API Changes
+* Overflowing a size_t when setting rocksdb options now throws an IllegalArgumentException, which removes the necessity for a developer to catch these Exceptions explicitly.
+* The set and get functions for tableCacheRemoveScanCountLimit are deprecated.
+
+
+## By 01/31/2015
+### New Features
+* WriteBatchWithIndex support.
+* Iterator support for WriteBatch and WriteBatchWithIndex
+* GetUpdatesSince support.
+* Snapshots carry now information about the related sequence number.
+* TTL DB support.
+
+## By 11/14/2014
+### New Features
+* Full support for Column Family.
+* Slice and Comparator support.
+* Default merge operator support.
+* RateLimiter support.
+
+## By 06/15/2014
+### New Features
+* Added basic Java binding for rocksdb::Env such that multiple RocksDB can share the same thread pool and environment.
+* Added RestoreBackupableDB
+
+## By 05/30/2014
+### Internal Framework Improvement
+* Added disOwnNativeHandle to RocksObject, which allows a RocksObject to give-up the ownership of its native handle. This method is useful when sharing and transferring the ownership of RocksDB C++ resources.
+
+## By 05/15/2014
+### New Features
+* Added RocksObject --- the base class of all RocksDB classes which holds some RocksDB resources in the C++ side.
+* Use environmental variable JAVA_HOME in Makefile for RocksJava
+### Public API changes
+* Renamed org.rocksdb.Iterator to org.rocksdb.RocksIterator to avoid potential confliction with Java built-in Iterator.
+
+## By 04/30/2014
+### New Features
+* Added Java binding for MultiGet.
+* Added static method RocksDB.loadLibrary(), which loads necessary library files.
+* Added Java bindings for 60+ rocksdb::Options.
+* Added Java binding for BloomFilter.
+* Added Java binding for ReadOptions.
+* Added Java binding for memtables.
+* Added Java binding for sst formats.
+* Added Java binding for RocksDB Iterator which enables sequential scan operation.
+* Added Java binding for Statistics
+* Added Java binding for BackupableDB.
+
+### DB Benchmark
+* Added filluniquerandom, readseq benchmark.
+* 70+ command-line options.
+* Enabled BloomFilter configuration.
+
+## By 04/15/2014
+### New Features
+* Added Java binding for WriteOptions.
+* Added Java binding for WriteBatch, which enables batch-write.
+* Added Java binding for rocksdb::Options.
+* Added Java binding for block cache.
+* Added Java version DB Benchmark.
+
+### DB Benchmark
+* Added readwhilewriting benchmark.
+
+### Internal Framework Improvement
+* Avoid a potential byte-array-copy between c++ and Java in RocksDB.get.
+* Added SizeUnit in org.rocksdb.util to store consts like KB and GB.
+
+### 03/28/2014
+* RocksJava project started.
+* Added Java binding for RocksDB, which supports Open, Close, Get and Put.
diff --git a/src/rocksdb/java/Makefile b/src/rocksdb/java/Makefile
new file mode 100644
index 000000000..155db694d
--- /dev/null
+++ b/src/rocksdb/java/Makefile
@@ -0,0 +1,319 @@
+NATIVE_JAVA_CLASSES = \
+ org.rocksdb.AbstractCompactionFilter\
+ org.rocksdb.AbstractCompactionFilterFactory\
+ org.rocksdb.AbstractComparator\
+ org.rocksdb.AbstractSlice\
+ org.rocksdb.AbstractTableFilter\
+ org.rocksdb.AbstractTraceWriter\
+ org.rocksdb.AbstractTransactionNotifier\
+ org.rocksdb.AbstractWalFilter\
+ org.rocksdb.BackupEngine\
+ org.rocksdb.BackupableDBOptions\
+ org.rocksdb.BlockBasedTableConfig\
+ org.rocksdb.BloomFilter\
+ org.rocksdb.Checkpoint\
+ org.rocksdb.ClockCache\
+ org.rocksdb.CassandraCompactionFilter\
+ org.rocksdb.CassandraValueMergeOperator\
+ org.rocksdb.ColumnFamilyHandle\
+ org.rocksdb.ColumnFamilyOptions\
+ org.rocksdb.CompactionJobInfo\
+ org.rocksdb.CompactionJobStats\
+ org.rocksdb.CompactionOptions\
+ org.rocksdb.CompactionOptionsFIFO\
+ org.rocksdb.CompactionOptionsUniversal\
+ org.rocksdb.CompactRangeOptions\
+ org.rocksdb.ComparatorOptions\
+ org.rocksdb.CompressionOptions\
+ org.rocksdb.DBOptions\
+ org.rocksdb.DirectSlice\
+ org.rocksdb.Env\
+ org.rocksdb.EnvOptions\
+ org.rocksdb.FlushOptions\
+ org.rocksdb.Filter\
+ org.rocksdb.IngestExternalFileOptions\
+ org.rocksdb.HashLinkedListMemTableConfig\
+ org.rocksdb.HashSkipListMemTableConfig\
+ org.rocksdb.HdfsEnv\
+ org.rocksdb.Logger\
+ org.rocksdb.LRUCache\
+ org.rocksdb.MemoryUsageType\
+ org.rocksdb.MemoryUtil\
+ org.rocksdb.MergeOperator\
+ org.rocksdb.NativeComparatorWrapper\
+ org.rocksdb.OptimisticTransactionDB\
+ org.rocksdb.OptimisticTransactionOptions\
+ org.rocksdb.Options\
+ org.rocksdb.OptionsUtil\
+ org.rocksdb.PersistentCache\
+ org.rocksdb.PlainTableConfig\
+ org.rocksdb.RateLimiter\
+ org.rocksdb.ReadOptions\
+ org.rocksdb.RemoveEmptyValueCompactionFilter\
+ org.rocksdb.RestoreOptions\
+ org.rocksdb.RocksCallbackObject\
+ org.rocksdb.RocksDB\
+ org.rocksdb.RocksEnv\
+ org.rocksdb.RocksIterator\
+ org.rocksdb.RocksMemEnv\
+ org.rocksdb.SkipListMemTableConfig\
+ org.rocksdb.Slice\
+ org.rocksdb.SstFileManager\
+ org.rocksdb.SstFileWriter\
+ org.rocksdb.SstFileReader\
+ org.rocksdb.SstFileReaderIterator\
+ org.rocksdb.Statistics\
+ org.rocksdb.ThreadStatus\
+ org.rocksdb.TimedEnv\
+ org.rocksdb.Transaction\
+ org.rocksdb.TransactionDB\
+ org.rocksdb.TransactionDBOptions\
+ org.rocksdb.TransactionOptions\
+ org.rocksdb.TransactionLogIterator\
+ org.rocksdb.TtlDB\
+ org.rocksdb.VectorMemTableConfig\
+ org.rocksdb.Snapshot\
+ org.rocksdb.StringAppendOperator\
+ org.rocksdb.UInt64AddOperator\
+ org.rocksdb.WriteBatch\
+ org.rocksdb.WriteBatch.Handler\
+ org.rocksdb.WriteOptions\
+ org.rocksdb.WriteBatchWithIndex\
+ org.rocksdb.WriteBufferManager\
+ org.rocksdb.WBWIRocksIterator
+
+NATIVE_JAVA_TEST_CLASSES = org.rocksdb.RocksDBExceptionTest\
+ org.rocksdb.NativeComparatorWrapperTest.NativeStringComparatorWrapper\
+ org.rocksdb.WriteBatchTest\
+ org.rocksdb.WriteBatchTestInternalHelper
+
+ROCKSDB_MAJOR = $(shell egrep "ROCKSDB_MAJOR.[0-9]" ../include/rocksdb/version.h | cut -d ' ' -f 3)
+ROCKSDB_MINOR = $(shell egrep "ROCKSDB_MINOR.[0-9]" ../include/rocksdb/version.h | cut -d ' ' -f 3)
+ROCKSDB_PATCH = $(shell egrep "ROCKSDB_PATCH.[0-9]" ../include/rocksdb/version.h | cut -d ' ' -f 3)
+
+NATIVE_INCLUDE = ./include
+ARCH := $(shell getconf LONG_BIT)
+ROCKSDB_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-linux$(ARCH).jar
+ifeq ($(PLATFORM), OS_MACOSX)
+ROCKSDB_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-osx.jar
+endif
+
+JAVA_TESTS = \
+ org.rocksdb.BackupableDBOptionsTest\
+ org.rocksdb.BackupEngineTest\
+ org.rocksdb.BlockBasedTableConfigTest\
+ org.rocksdb.BuiltinComparatorTest\
+ org.rocksdb.util.BytewiseComparatorTest\
+ org.rocksdb.util.BytewiseComparatorIntTest\
+ org.rocksdb.CheckPointTest\
+ org.rocksdb.ClockCacheTest\
+ org.rocksdb.ColumnFamilyOptionsTest\
+ org.rocksdb.ColumnFamilyTest\
+ org.rocksdb.CompactionFilterFactoryTest\
+ org.rocksdb.CompactionJobInfoTest\
+ org.rocksdb.CompactionJobStatsTest\
+ org.rocksdb.CompactionOptionsTest\
+ org.rocksdb.CompactionOptionsFIFOTest\
+ org.rocksdb.CompactionOptionsUniversalTest\
+ org.rocksdb.CompactionPriorityTest\
+ org.rocksdb.CompactionStopStyleTest\
+ org.rocksdb.ComparatorOptionsTest\
+ org.rocksdb.CompressionOptionsTest\
+ org.rocksdb.CompressionTypesTest\
+ org.rocksdb.DBOptionsTest\
+ org.rocksdb.DirectSliceTest\
+ org.rocksdb.util.EnvironmentTest\
+ org.rocksdb.EnvOptionsTest\
+ org.rocksdb.HdfsEnvTest\
+ org.rocksdb.IngestExternalFileOptionsTest\
+ org.rocksdb.util.IntComparatorTest\
+ org.rocksdb.util.JNIComparatorTest\
+ org.rocksdb.FilterTest\
+ org.rocksdb.FlushTest\
+ org.rocksdb.InfoLogLevelTest\
+ org.rocksdb.KeyMayExistTest\
+ org.rocksdb.LoggerTest\
+ org.rocksdb.LRUCacheTest\
+ org.rocksdb.MemoryUtilTest\
+ org.rocksdb.MemTableTest\
+ org.rocksdb.MergeTest\
+ org.rocksdb.MixedOptionsTest\
+ org.rocksdb.MutableColumnFamilyOptionsTest\
+ org.rocksdb.MutableDBOptionsTest\
+ org.rocksdb.NativeComparatorWrapperTest\
+ org.rocksdb.NativeLibraryLoaderTest\
+ org.rocksdb.OptimisticTransactionTest\
+ org.rocksdb.OptimisticTransactionDBTest\
+ org.rocksdb.OptimisticTransactionOptionsTest\
+ org.rocksdb.OptionsUtilTest\
+ org.rocksdb.OptionsTest\
+ org.rocksdb.PlainTableConfigTest\
+ org.rocksdb.RateLimiterTest\
+ org.rocksdb.ReadOnlyTest\
+ org.rocksdb.ReadOptionsTest\
+ org.rocksdb.util.ReverseBytewiseComparatorIntTest\
+ org.rocksdb.RocksDBTest\
+ org.rocksdb.RocksDBExceptionTest\
+ org.rocksdb.DefaultEnvTest\
+ org.rocksdb.RocksIteratorTest\
+ org.rocksdb.RocksMemEnvTest\
+ org.rocksdb.util.SizeUnitTest\
+ org.rocksdb.SliceTest\
+ org.rocksdb.SnapshotTest\
+ org.rocksdb.SstFileManagerTest\
+ org.rocksdb.SstFileWriterTest\
+ org.rocksdb.SstFileReaderTest\
+ org.rocksdb.TableFilterTest\
+ org.rocksdb.TimedEnvTest\
+ org.rocksdb.TransactionTest\
+ org.rocksdb.TransactionDBTest\
+ org.rocksdb.TransactionOptionsTest\
+ org.rocksdb.TransactionDBOptionsTest\
+ org.rocksdb.TransactionLogIteratorTest\
+ org.rocksdb.TtlDBTest\
+ org.rocksdb.StatisticsTest\
+ org.rocksdb.StatisticsCollectorTest\
+ org.rocksdb.WalFilterTest\
+ org.rocksdb.WALRecoveryModeTest\
+ org.rocksdb.WriteBatchHandlerTest\
+ org.rocksdb.WriteBatchTest\
+ org.rocksdb.WriteBatchThreadedTest\
+ org.rocksdb.WriteOptionsTest\
+ org.rocksdb.WriteBatchWithIndexTest
+
+MAIN_SRC = src/main/java
+TEST_SRC = src/test/java
+OUTPUT = target
+MAIN_CLASSES = $(OUTPUT)/classes
+TEST_CLASSES = $(OUTPUT)/test-classes
+JAVADOC = $(OUTPUT)/apidocs
+
+BENCHMARK_MAIN_SRC = benchmark/src/main/java
+BENCHMARK_OUTPUT = benchmark/target
+BENCHMARK_MAIN_CLASSES = $(BENCHMARK_OUTPUT)/classes
+
+SAMPLES_MAIN_SRC = samples/src/main/java
+SAMPLES_OUTPUT = samples/target
+SAMPLES_MAIN_CLASSES = $(SAMPLES_OUTPUT)/classes
+
+JAVA_TEST_LIBDIR = test-libs
+JAVA_JUNIT_JAR = $(JAVA_TEST_LIBDIR)/junit-4.12.jar
+JAVA_HAMCR_JAR = $(JAVA_TEST_LIBDIR)/hamcrest-core-1.3.jar
+JAVA_MOCKITO_JAR = $(JAVA_TEST_LIBDIR)/mockito-all-1.10.19.jar
+JAVA_CGLIB_JAR = $(JAVA_TEST_LIBDIR)/cglib-2.2.2.jar
+JAVA_ASSERTJ_JAR = $(JAVA_TEST_LIBDIR)/assertj-core-1.7.1.jar
+JAVA_TESTCLASSPATH = $(JAVA_JUNIT_JAR):$(JAVA_HAMCR_JAR):$(JAVA_MOCKITO_JAR):$(JAVA_CGLIB_JAR):$(JAVA_ASSERTJ_JAR)
+
+MVN_LOCAL = ~/.m2/repository
+
+# Set the default JAVA_ARGS to "" for DEBUG_LEVEL=0
+JAVA_ARGS? =
+
+JAVAC_ARGS? =
+
+# When debugging add -Xcheck:jni to the java args
+ifneq ($(DEBUG_LEVEL),0)
+ JAVA_ARGS = -ea -Xcheck:jni
+ JAVAC_ARGS = -Xlint:deprecation -Xlint:unchecked
+endif
+
+# Using a Facebook AWS account for S3 storage. (maven.org has a history
+# of failing in Travis builds.)
+DEPS_URL?=https://rocksdb-deps.s3-us-west-2.amazonaws.com/jars
+
+clean: clean-not-downloaded clean-downloaded
+
+clean-not-downloaded:
+ $(AM_V_at)rm -rf $(NATIVE_INCLUDE)
+ $(AM_V_at)rm -rf $(OUTPUT)
+ $(AM_V_at)rm -rf $(BENCHMARK_OUTPUT)
+ $(AM_V_at)rm -rf $(SAMPLES_OUTPUT)
+
+clean-downloaded:
+ $(AM_V_at)rm -rf $(JAVA_TEST_LIBDIR)
+
+
+javadocs: java
+ $(AM_V_GEN)mkdir -p $(JAVADOC)
+ $(AM_V_at)javadoc -d $(JAVADOC) -sourcepath $(MAIN_SRC) -subpackages org
+
+javalib: java java_test javadocs
+
+java:
+ $(AM_V_GEN)mkdir -p $(MAIN_CLASSES)
+ifeq ($(shell java -version 2>&1 | grep 1.7.0 > /dev/null; printf $$?), 0)
+ $(AM_V_at)javac $(JAVAC_ARGS) -d $(MAIN_CLASSES)\
+ $(MAIN_SRC)/org/rocksdb/util/*.java\
+ $(MAIN_SRC)/org/rocksdb/*.java
+else
+ $(AM_V_at)javac $(JAVAC_ARGS) -h $(NATIVE_INCLUDE) -d $(MAIN_CLASSES)\
+ $(MAIN_SRC)/org/rocksdb/util/*.java\
+ $(MAIN_SRC)/org/rocksdb/*.java
+endif
+ $(AM_V_at)@cp ../HISTORY.md ./HISTORY-CPP.md
+ $(AM_V_at)@rm -f ./HISTORY-CPP.md
+ifeq ($(shell java -version 2>&1 | grep 1.7.0 > /dev/null; printf $$?), 0)
+ $(AM_V_at)javah -cp $(MAIN_CLASSES) -d $(NATIVE_INCLUDE) -jni $(NATIVE_JAVA_CLASSES)
+endif
+
+sample: java
+ $(AM_V_GEN)mkdir -p $(SAMPLES_MAIN_CLASSES)
+ $(AM_V_at)javac $(JAVAC_ARGS) -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/RocksDBSample.java
+ $(AM_V_at)@rm -rf /tmp/rocksdbjni
+ $(AM_V_at)@rm -rf /tmp/rocksdbjni_not_found
+ java $(JAVA_ARGS) -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) RocksDBSample /tmp/rocksdbjni
+ $(AM_V_at)@rm -rf /tmp/rocksdbjni
+ $(AM_V_at)@rm -rf /tmp/rocksdbjni_not_found
+
+column_family_sample: java
+ $(AM_V_GEN)mkdir -p $(SAMPLES_MAIN_CLASSES)
+ $(AM_V_at)javac $(JAVAC_ARGS) -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/RocksDBColumnFamilySample.java
+ $(AM_V_at)@rm -rf /tmp/rocksdbjni
+ java $(JAVA_ARGS) -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) RocksDBColumnFamilySample /tmp/rocksdbjni
+ $(AM_V_at)@rm -rf /tmp/rocksdbjni
+
+transaction_sample: java
+ $(AM_V_GEN)mkdir -p $(SAMPLES_MAIN_CLASSES)
+ $(AM_V_at)javac -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/TransactionSample.java
+ $(AM_V_at)@rm -rf /tmp/rocksdbjni
+ java -ea -Xcheck:jni -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) TransactionSample /tmp/rocksdbjni
+ $(AM_V_at)@rm -rf /tmp/rocksdbjni
+
+optimistic_transaction_sample: java
+ $(AM_V_GEN)mkdir -p $(SAMPLES_MAIN_CLASSES)
+ $(AM_V_at)javac -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/OptimisticTransactionSample.java
+ $(AM_V_at)@rm -rf /tmp/rocksdbjni
+ java -ea -Xcheck:jni -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) OptimisticTransactionSample /tmp/rocksdbjni
+ $(AM_V_at)@rm -rf /tmp/rocksdbjni
+
+resolve_test_deps:
+ test -d "$(JAVA_TEST_LIBDIR)" || mkdir -p "$(JAVA_TEST_LIBDIR)"
+ test -s "$(JAVA_JUNIT_JAR)" || cp $(MVN_LOCAL)/junit/junit/4.12/junit-4.12.jar $(JAVA_TEST_LIBDIR) || curl --fail --insecure --output $(JAVA_JUNIT_JAR) --location $(DEPS_URL)/junit-4.12.jar
+ test -s "$(JAVA_HAMCR_JAR)" || cp $(MVN_LOCAL)/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar $(JAVA_TEST_LIBDIR) || curl --fail --insecure --output $(JAVA_HAMCR_JAR) --location $(DEPS_URL)/hamcrest-core-1.3.jar
+ test -s "$(JAVA_MOCKITO_JAR)" || cp $(MVN_LOCAL)/org/mockito/mockito-all/1.10.19/mockito-all-1.10.19.jar $(JAVA_TEST_LIBDIR) || curl --fail --insecure --output "$(JAVA_MOCKITO_JAR)" --location $(DEPS_URL)/mockito-all-1.10.19.jar
+ test -s "$(JAVA_CGLIB_JAR)" || cp $(MVN_LOCAL)/cglib/cglib/2.2.2/cglib-2.2.2.jar $(JAVA_TEST_LIBDIR) || curl --fail --insecure --output "$(JAVA_CGLIB_JAR)" --location $(DEPS_URL)/cglib-2.2.2.jar
+ test -s "$(JAVA_ASSERTJ_JAR)" || cp $(MVN_LOCAL)/org/assertj/assertj-core/1.7.1/assertj-core-1.7.1.jar $(JAVA_TEST_LIBDIR) || curl --fail --insecure --output "$(JAVA_ASSERTJ_JAR)" --location $(DEPS_URL)/assertj-core-1.7.1.jar
+
+java_test: java resolve_test_deps
+ $(AM_V_GEN)mkdir -p $(TEST_CLASSES)
+ifeq ($(shell java -version 2>&1|grep 1.7.0 >/dev/null; printf $$?),0)
+ $(AM_V_at)javac $(JAVAC_ARGS) -cp $(MAIN_CLASSES):$(JAVA_TESTCLASSPATH) -d $(TEST_CLASSES)\
+ $(TEST_SRC)/org/rocksdb/test/*.java\
+ $(TEST_SRC)/org/rocksdb/util/*.java\
+ $(TEST_SRC)/org/rocksdb/*.java
+ $(AM_V_at)javah -cp $(MAIN_CLASSES):$(TEST_CLASSES) -d $(NATIVE_INCLUDE) -jni $(NATIVE_JAVA_TEST_CLASSES)
+else
+ $(AM_V_at)javac $(JAVAC_ARGS) -cp $(MAIN_CLASSES):$(JAVA_TESTCLASSPATH) -h $(NATIVE_INCLUDE) -d $(TEST_CLASSES)\
+ $(TEST_SRC)/org/rocksdb/test/*.java\
+ $(TEST_SRC)/org/rocksdb/util/*.java\
+ $(TEST_SRC)/org/rocksdb/*.java
+endif
+
+test: java java_test run_test
+
+run_test:
+ java $(JAVA_ARGS) -Djava.library.path=target -cp "$(MAIN_CLASSES):$(TEST_CLASSES):$(JAVA_TESTCLASSPATH):target/*" org.rocksdb.test.RocksJunitRunner $(JAVA_TESTS)
+
+db_bench: java
+ $(AM_V_GEN)mkdir -p $(BENCHMARK_MAIN_CLASSES)
+ $(AM_V_at)javac $(JAVAC_ARGS) -cp $(MAIN_CLASSES) -d $(BENCHMARK_MAIN_CLASSES) $(BENCHMARK_MAIN_SRC)/org/rocksdb/benchmark/*.java
diff --git a/src/rocksdb/java/RELEASE.md b/src/rocksdb/java/RELEASE.md
new file mode 100644
index 000000000..dda19455f
--- /dev/null
+++ b/src/rocksdb/java/RELEASE.md
@@ -0,0 +1,59 @@
+## Cross-building
+
+RocksDB can be built as a single self contained cross-platform JAR. The cross-platform jar can be used on any 64-bit OSX system, 32-bit Linux system, or 64-bit Linux system.
+
+Building a cross-platform JAR requires:
+
+ * [Docker](https://www.docker.com/docker-community)
+ * A Mac OSX machine that can compile RocksDB.
+ * Java 7 set as JAVA_HOME.
+
+Once you have these items, run this make command from RocksDB's root source directory:
+
+ make jclean clean rocksdbjavastaticreleasedocker
+
+This command will build RocksDB natively on OSX, and will then spin up docker containers to build RocksDB for 32-bit and 64-bit Linux with glibc, and 32-bit and 64-bit Linux with musl libc.
+
+You can find all native binaries and JARs in the java/target directory upon completion:
+
+ librocksdbjni-linux32.so
+ librocksdbjni-linux64.so
+ librocksdbjni-linux64-musl.so
+ librocksdbjni-linux32-musl.so
+ librocksdbjni-osx.jnilib
+ rocksdbjni-x.y.z-javadoc.jar
+ rocksdbjni-x.y.z-linux32.jar
+ rocksdbjni-x.y.z-linux64.jar
+ rocksdbjni-x.y.z-linux64-musl.jar
+ rocksdbjni-x.y.z-linux32-musl.jar
+ rocksdbjni-x.y.z-osx.jar
+ rocksdbjni-x.y.z-sources.jar
+ rocksdbjni-x.y.z.jar
+
+Where x.y.z is the built version number of RocksDB.
+
+## Maven publication
+
+Set ~/.m2/settings.xml to contain:
+
+ <settings xmlns="http://maven.apache.org/SETTINGS/1.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
+ <servers>
+ <server>
+ <id>sonatype-nexus-staging</id>
+ <username>your-sonatype-jira-username</username>
+ <password>your-sonatype-jira-password</password>
+ </server>
+ </servers>
+ </settings>
+
+From RocksDB's root directory, first build the Java static JARs:
+
+ make jclean clean rocksdbjavastaticpublish
+
+This command will [stage the JAR artifacts on the Sonatype staging repository](http://central.sonatype.org/pages/manual-staging-bundle-creation-and-deployment.html). To release the staged artifacts.
+
+1. Go to [https://oss.sonatype.org/#stagingRepositories](https://oss.sonatype.org/#stagingRepositories) and search for "rocksdb" in the upper right hand search box.
+2. Select the rocksdb staging repository, and inspect its contents.
+3. If all is well, follow [these steps](https://oss.sonatype.org/#stagingRepositories) to close the repository and release it.
+
+After the release has occurred, the artifacts will be synced to Maven central within 24-48 hours.
diff --git a/src/rocksdb/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java b/src/rocksdb/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java
new file mode 100644
index 000000000..ff36c74a4
--- /dev/null
+++ b/src/rocksdb/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java
@@ -0,0 +1,1653 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+/**
+ * Copyright (C) 2011 the original author or authors.
+ * See the notice.md file distributed with this work for additional
+ * information regarding copyright ownership.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.rocksdb.benchmark;
+
+import java.io.IOException;
+import java.lang.Runnable;
+import java.lang.Math;
+import java.io.File;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.util.Collection;
+import java.util.Date;
+import java.util.EnumMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.concurrent.Callable;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import org.rocksdb.*;
+import org.rocksdb.RocksMemEnv;
+import org.rocksdb.util.SizeUnit;
+
+class Stats {
+ int id_;
+ long start_;
+ long finish_;
+ double seconds_;
+ long done_;
+ long found_;
+ long lastOpTime_;
+ long nextReport_;
+ long bytes_;
+ StringBuilder message_;
+ boolean excludeFromMerge_;
+
+ // TODO(yhchiang): use the following arguments:
+ // (Long)Flag.stats_interval
+ // (Integer)Flag.stats_per_interval
+
+ Stats(int id) {
+ id_ = id;
+ nextReport_ = 100;
+ done_ = 0;
+ bytes_ = 0;
+ seconds_ = 0;
+ start_ = System.nanoTime();
+ lastOpTime_ = start_;
+ finish_ = start_;
+ found_ = 0;
+ message_ = new StringBuilder("");
+ excludeFromMerge_ = false;
+ }
+
+ void merge(final Stats other) {
+ if (other.excludeFromMerge_) {
+ return;
+ }
+
+ done_ += other.done_;
+ found_ += other.found_;
+ bytes_ += other.bytes_;
+ seconds_ += other.seconds_;
+ if (other.start_ < start_) start_ = other.start_;
+ if (other.finish_ > finish_) finish_ = other.finish_;
+
+ // Just keep the messages from one thread
+ if (message_.length() == 0) {
+ message_ = other.message_;
+ }
+ }
+
+ void stop() {
+ finish_ = System.nanoTime();
+ seconds_ = (double) (finish_ - start_) * 1e-9;
+ }
+
+ void addMessage(String msg) {
+ if (message_.length() > 0) {
+ message_.append(" ");
+ }
+ message_.append(msg);
+ }
+
+ void setId(int id) { id_ = id; }
+ void setExcludeFromMerge() { excludeFromMerge_ = true; }
+
+ void finishedSingleOp(int bytes) {
+ done_++;
+ lastOpTime_ = System.nanoTime();
+ bytes_ += bytes;
+ if (done_ >= nextReport_) {
+ if (nextReport_ < 1000) {
+ nextReport_ += 100;
+ } else if (nextReport_ < 5000) {
+ nextReport_ += 500;
+ } else if (nextReport_ < 10000) {
+ nextReport_ += 1000;
+ } else if (nextReport_ < 50000) {
+ nextReport_ += 5000;
+ } else if (nextReport_ < 100000) {
+ nextReport_ += 10000;
+ } else if (nextReport_ < 500000) {
+ nextReport_ += 50000;
+ } else {
+ nextReport_ += 100000;
+ }
+ System.err.printf("... Task %s finished %d ops%30s\r", id_, done_, "");
+ }
+ }
+
+ void report(String name) {
+ // Pretend at least one op was done in case we are running a benchmark
+ // that does not call FinishedSingleOp().
+ if (done_ < 1) done_ = 1;
+
+ StringBuilder extra = new StringBuilder("");
+ if (bytes_ > 0) {
+ // Rate is computed on actual elapsed time, not the sum of per-thread
+ // elapsed times.
+ double elapsed = (finish_ - start_) * 1e-9;
+ extra.append(String.format("%6.1f MB/s", (bytes_ / 1048576.0) / elapsed));
+ }
+ extra.append(message_.toString());
+ double elapsed = (finish_ - start_);
+ double throughput = (double) done_ / (elapsed * 1e-9);
+
+ System.out.format("%-12s : %11.3f micros/op %d ops/sec;%s%s\n",
+ name, (elapsed * 1e-6) / done_,
+ (long) throughput, (extra.length() == 0 ? "" : " "), extra.toString());
+ }
+}
+
+public class DbBenchmark {
+ enum Order {
+ SEQUENTIAL,
+ RANDOM
+ }
+
+ enum DBState {
+ FRESH,
+ EXISTING
+ }
+
+ static {
+ RocksDB.loadLibrary();
+ }
+
+ abstract class BenchmarkTask implements Callable<Stats> {
+ // TODO(yhchiang): use (Integer)Flag.perf_level.
+ public BenchmarkTask(
+ int tid, long randSeed, long numEntries, long keyRange) {
+ tid_ = tid;
+ rand_ = new Random(randSeed + tid * 1000);
+ numEntries_ = numEntries;
+ keyRange_ = keyRange;
+ stats_ = new Stats(tid);
+ }
+
+ @Override public Stats call() throws RocksDBException {
+ stats_.start_ = System.nanoTime();
+ runTask();
+ stats_.finish_ = System.nanoTime();
+ return stats_;
+ }
+
+ abstract protected void runTask() throws RocksDBException;
+
+ protected int tid_;
+ protected Random rand_;
+ protected long numEntries_;
+ protected long keyRange_;
+ protected Stats stats_;
+
+ protected void getFixedKey(byte[] key, long sn) {
+ generateKeyFromLong(key, sn);
+ }
+
+ protected void getRandomKey(byte[] key, long range) {
+ generateKeyFromLong(key, Math.abs(rand_.nextLong() % range));
+ }
+ }
+
+ abstract class WriteTask extends BenchmarkTask {
+ public WriteTask(
+ int tid, long randSeed, long numEntries, long keyRange,
+ WriteOptions writeOpt, long entriesPerBatch) {
+ super(tid, randSeed, numEntries, keyRange);
+ writeOpt_ = writeOpt;
+ entriesPerBatch_ = entriesPerBatch;
+ maxWritesPerSecond_ = -1;
+ }
+
+ public WriteTask(
+ int tid, long randSeed, long numEntries, long keyRange,
+ WriteOptions writeOpt, long entriesPerBatch, long maxWritesPerSecond) {
+ super(tid, randSeed, numEntries, keyRange);
+ writeOpt_ = writeOpt;
+ entriesPerBatch_ = entriesPerBatch;
+ maxWritesPerSecond_ = maxWritesPerSecond;
+ }
+
+ @Override public void runTask() throws RocksDBException {
+ if (numEntries_ != DbBenchmark.this.num_) {
+ stats_.message_.append(String.format(" (%d ops)", numEntries_));
+ }
+ byte[] key = new byte[keySize_];
+ byte[] value = new byte[valueSize_];
+
+ try {
+ if (entriesPerBatch_ == 1) {
+ for (long i = 0; i < numEntries_; ++i) {
+ getKey(key, i, keyRange_);
+ DbBenchmark.this.gen_.generate(value);
+ db_.put(writeOpt_, key, value);
+ stats_.finishedSingleOp(keySize_ + valueSize_);
+ writeRateControl(i);
+ if (isFinished()) {
+ return;
+ }
+ }
+ } else {
+ for (long i = 0; i < numEntries_; i += entriesPerBatch_) {
+ WriteBatch batch = new WriteBatch();
+ for (long j = 0; j < entriesPerBatch_; j++) {
+ getKey(key, i + j, keyRange_);
+ DbBenchmark.this.gen_.generate(value);
+ batch.put(key, value);
+ stats_.finishedSingleOp(keySize_ + valueSize_);
+ }
+ db_.write(writeOpt_, batch);
+ batch.dispose();
+ writeRateControl(i);
+ if (isFinished()) {
+ return;
+ }
+ }
+ }
+ } catch (InterruptedException e) {
+ // thread has been terminated.
+ }
+ }
+
+ protected void writeRateControl(long writeCount)
+ throws InterruptedException {
+ if (maxWritesPerSecond_ <= 0) return;
+ long minInterval =
+ writeCount * TimeUnit.SECONDS.toNanos(1) / maxWritesPerSecond_;
+ long interval = System.nanoTime() - stats_.start_;
+ if (minInterval - interval > TimeUnit.MILLISECONDS.toNanos(1)) {
+ TimeUnit.NANOSECONDS.sleep(minInterval - interval);
+ }
+ }
+
+ abstract protected void getKey(byte[] key, long id, long range);
+ protected WriteOptions writeOpt_;
+ protected long entriesPerBatch_;
+ protected long maxWritesPerSecond_;
+ }
+
+ class WriteSequentialTask extends WriteTask {
+ public WriteSequentialTask(
+ int tid, long randSeed, long numEntries, long keyRange,
+ WriteOptions writeOpt, long entriesPerBatch) {
+ super(tid, randSeed, numEntries, keyRange,
+ writeOpt, entriesPerBatch);
+ }
+ public WriteSequentialTask(
+ int tid, long randSeed, long numEntries, long keyRange,
+ WriteOptions writeOpt, long entriesPerBatch,
+ long maxWritesPerSecond) {
+ super(tid, randSeed, numEntries, keyRange,
+ writeOpt, entriesPerBatch,
+ maxWritesPerSecond);
+ }
+ @Override protected void getKey(byte[] key, long id, long range) {
+ getFixedKey(key, id);
+ }
+ }
+
+ class WriteRandomTask extends WriteTask {
+ public WriteRandomTask(
+ int tid, long randSeed, long numEntries, long keyRange,
+ WriteOptions writeOpt, long entriesPerBatch) {
+ super(tid, randSeed, numEntries, keyRange,
+ writeOpt, entriesPerBatch);
+ }
+ public WriteRandomTask(
+ int tid, long randSeed, long numEntries, long keyRange,
+ WriteOptions writeOpt, long entriesPerBatch,
+ long maxWritesPerSecond) {
+ super(tid, randSeed, numEntries, keyRange,
+ writeOpt, entriesPerBatch,
+ maxWritesPerSecond);
+ }
+ @Override protected void getKey(byte[] key, long id, long range) {
+ getRandomKey(key, range);
+ }
+ }
+
+ class WriteUniqueRandomTask extends WriteTask {
+ static final int MAX_BUFFER_SIZE = 10000000;
+ public WriteUniqueRandomTask(
+ int tid, long randSeed, long numEntries, long keyRange,
+ WriteOptions writeOpt, long entriesPerBatch) {
+ super(tid, randSeed, numEntries, keyRange,
+ writeOpt, entriesPerBatch);
+ initRandomKeySequence();
+ }
+ public WriteUniqueRandomTask(
+ int tid, long randSeed, long numEntries, long keyRange,
+ WriteOptions writeOpt, long entriesPerBatch,
+ long maxWritesPerSecond) {
+ super(tid, randSeed, numEntries, keyRange,
+ writeOpt, entriesPerBatch,
+ maxWritesPerSecond);
+ initRandomKeySequence();
+ }
+ @Override protected void getKey(byte[] key, long id, long range) {
+ generateKeyFromLong(key, nextUniqueRandom());
+ }
+
+ protected void initRandomKeySequence() {
+ bufferSize_ = MAX_BUFFER_SIZE;
+ if (bufferSize_ > keyRange_) {
+ bufferSize_ = (int) keyRange_;
+ }
+ currentKeyCount_ = bufferSize_;
+ keyBuffer_ = new long[MAX_BUFFER_SIZE];
+ for (int k = 0; k < bufferSize_; ++k) {
+ keyBuffer_[k] = k;
+ }
+ }
+
+ /**
+ * Semi-randomly return the next unique key. It is guaranteed to be
+ * fully random if keyRange_ <= MAX_BUFFER_SIZE.
+ */
+ long nextUniqueRandom() {
+ if (bufferSize_ == 0) {
+ System.err.println("bufferSize_ == 0.");
+ return 0;
+ }
+ int r = rand_.nextInt(bufferSize_);
+ // randomly pick one from the keyBuffer
+ long randKey = keyBuffer_[r];
+ if (currentKeyCount_ < keyRange_) {
+ // if we have not yet inserted all keys, insert next new key to [r].
+ keyBuffer_[r] = currentKeyCount_++;
+ } else {
+ // move the last element to [r] and decrease the size by 1.
+ keyBuffer_[r] = keyBuffer_[--bufferSize_];
+ }
+ return randKey;
+ }
+
+ int bufferSize_;
+ long currentKeyCount_;
+ long[] keyBuffer_;
+ }
+
+ class ReadRandomTask extends BenchmarkTask {
+ public ReadRandomTask(
+ int tid, long randSeed, long numEntries, long keyRange) {
+ super(tid, randSeed, numEntries, keyRange);
+ }
+ @Override public void runTask() throws RocksDBException {
+ byte[] key = new byte[keySize_];
+ byte[] value = new byte[valueSize_];
+ for (long i = 0; i < numEntries_; i++) {
+ getRandomKey(key, keyRange_);
+ int len = db_.get(key, value);
+ if (len != RocksDB.NOT_FOUND) {
+ stats_.found_++;
+ stats_.finishedSingleOp(keySize_ + valueSize_);
+ } else {
+ stats_.finishedSingleOp(keySize_);
+ }
+ if (isFinished()) {
+ return;
+ }
+ }
+ }
+ }
+
+ class ReadSequentialTask extends BenchmarkTask {
+ public ReadSequentialTask(
+ int tid, long randSeed, long numEntries, long keyRange) {
+ super(tid, randSeed, numEntries, keyRange);
+ }
+ @Override public void runTask() throws RocksDBException {
+ RocksIterator iter = db_.newIterator();
+ long i;
+ for (iter.seekToFirst(), i = 0;
+ iter.isValid() && i < numEntries_;
+ iter.next(), ++i) {
+ stats_.found_++;
+ stats_.finishedSingleOp(iter.key().length + iter.value().length);
+ if (isFinished()) {
+ iter.dispose();
+ return;
+ }
+ }
+ iter.dispose();
+ }
+ }
+
+ public DbBenchmark(Map<Flag, Object> flags) throws Exception {
+ benchmarks_ = (List<String>) flags.get(Flag.benchmarks);
+ num_ = (Integer) flags.get(Flag.num);
+ threadNum_ = (Integer) flags.get(Flag.threads);
+ reads_ = (Integer) (flags.get(Flag.reads) == null ?
+ flags.get(Flag.num) : flags.get(Flag.reads));
+ keySize_ = (Integer) flags.get(Flag.key_size);
+ valueSize_ = (Integer) flags.get(Flag.value_size);
+ compressionRatio_ = (Double) flags.get(Flag.compression_ratio);
+ useExisting_ = (Boolean) flags.get(Flag.use_existing_db);
+ randSeed_ = (Long) flags.get(Flag.seed);
+ databaseDir_ = (String) flags.get(Flag.db);
+ writesPerSeconds_ = (Integer) flags.get(Flag.writes_per_second);
+ memtable_ = (String) flags.get(Flag.memtablerep);
+ maxWriteBufferNumber_ = (Integer) flags.get(Flag.max_write_buffer_number);
+ prefixSize_ = (Integer) flags.get(Flag.prefix_size);
+ keysPerPrefix_ = (Integer) flags.get(Flag.keys_per_prefix);
+ hashBucketCount_ = (Long) flags.get(Flag.hash_bucket_count);
+ usePlainTable_ = (Boolean) flags.get(Flag.use_plain_table);
+ useMemenv_ = (Boolean) flags.get(Flag.use_mem_env);
+ flags_ = flags;
+ finishLock_ = new Object();
+ // options.setPrefixSize((Integer)flags_.get(Flag.prefix_size));
+ // options.setKeysPerPrefix((Long)flags_.get(Flag.keys_per_prefix));
+ compressionType_ = (String) flags.get(Flag.compression_type);
+ compression_ = CompressionType.NO_COMPRESSION;
+ try {
+ if (compressionType_!=null) {
+ final CompressionType compressionType =
+ CompressionType.getCompressionType(compressionType_);
+ if (compressionType != null &&
+ compressionType != CompressionType.NO_COMPRESSION) {
+ System.loadLibrary(compressionType.getLibraryName());
+ }
+
+ }
+ } catch (UnsatisfiedLinkError e) {
+ System.err.format("Unable to load %s library:%s%n" +
+ "No compression is used.%n",
+ compressionType_, e.toString());
+ compressionType_ = "none";
+ }
+ gen_ = new RandomGenerator(randSeed_, compressionRatio_);
+ }
+
+ private void prepareReadOptions(ReadOptions options) {
+ options.setVerifyChecksums((Boolean)flags_.get(Flag.verify_checksum));
+ options.setTailing((Boolean)flags_.get(Flag.use_tailing_iterator));
+ }
+
+ private void prepareWriteOptions(WriteOptions options) {
+ options.setSync((Boolean)flags_.get(Flag.sync));
+ options.setDisableWAL((Boolean)flags_.get(Flag.disable_wal));
+ }
+
+ private void prepareOptions(Options options) throws RocksDBException {
+ if (!useExisting_) {
+ options.setCreateIfMissing(true);
+ } else {
+ options.setCreateIfMissing(false);
+ }
+ if (useMemenv_) {
+ options.setEnv(new RocksMemEnv(Env.getDefault()));
+ }
+ switch (memtable_) {
+ case "skip_list":
+ options.setMemTableConfig(new SkipListMemTableConfig());
+ break;
+ case "vector":
+ options.setMemTableConfig(new VectorMemTableConfig());
+ break;
+ case "hash_linkedlist":
+ options.setMemTableConfig(
+ new HashLinkedListMemTableConfig()
+ .setBucketCount(hashBucketCount_));
+ options.useFixedLengthPrefixExtractor(prefixSize_);
+ break;
+ case "hash_skiplist":
+ case "prefix_hash":
+ options.setMemTableConfig(
+ new HashSkipListMemTableConfig()
+ .setBucketCount(hashBucketCount_));
+ options.useFixedLengthPrefixExtractor(prefixSize_);
+ break;
+ default:
+ System.err.format(
+ "unable to detect the specified memtable, " +
+ "use the default memtable factory %s%n",
+ options.memTableFactoryName());
+ break;
+ }
+ if (usePlainTable_) {
+ options.setTableFormatConfig(
+ new PlainTableConfig().setKeySize(keySize_));
+ } else {
+ BlockBasedTableConfig table_options = new BlockBasedTableConfig();
+ table_options.setBlockSize((Long)flags_.get(Flag.block_size))
+ .setBlockCacheSize((Long)flags_.get(Flag.cache_size))
+ .setCacheNumShardBits(
+ (Integer)flags_.get(Flag.cache_numshardbits));
+ options.setTableFormatConfig(table_options);
+ }
+ options.setWriteBufferSize(
+ (Long)flags_.get(Flag.write_buffer_size));
+ options.setMaxWriteBufferNumber(
+ (Integer)flags_.get(Flag.max_write_buffer_number));
+ options.setMaxBackgroundCompactions(
+ (Integer)flags_.get(Flag.max_background_compactions));
+ options.getEnv().setBackgroundThreads(
+ (Integer)flags_.get(Flag.max_background_compactions));
+ options.setMaxBackgroundFlushes(
+ (Integer)flags_.get(Flag.max_background_flushes));
+ options.setMaxBackgroundJobs((Integer) flags_.get(Flag.max_background_jobs));
+ options.setMaxOpenFiles(
+ (Integer)flags_.get(Flag.open_files));
+ options.setUseFsync(
+ (Boolean)flags_.get(Flag.use_fsync));
+ options.setWalDir(
+ (String)flags_.get(Flag.wal_dir));
+ options.setDeleteObsoleteFilesPeriodMicros(
+ (Integer)flags_.get(Flag.delete_obsolete_files_period_micros));
+ options.setTableCacheNumshardbits(
+ (Integer)flags_.get(Flag.table_cache_numshardbits));
+ options.setAllowMmapReads(
+ (Boolean)flags_.get(Flag.mmap_read));
+ options.setAllowMmapWrites(
+ (Boolean)flags_.get(Flag.mmap_write));
+ options.setAdviseRandomOnOpen(
+ (Boolean)flags_.get(Flag.advise_random_on_open));
+ options.setUseAdaptiveMutex(
+ (Boolean)flags_.get(Flag.use_adaptive_mutex));
+ options.setBytesPerSync(
+ (Long)flags_.get(Flag.bytes_per_sync));
+ options.setBloomLocality(
+ (Integer)flags_.get(Flag.bloom_locality));
+ options.setMinWriteBufferNumberToMerge(
+ (Integer)flags_.get(Flag.min_write_buffer_number_to_merge));
+ options.setMemtablePrefixBloomSizeRatio((Double) flags_.get(Flag.memtable_bloom_size_ratio));
+ options.setNumLevels(
+ (Integer)flags_.get(Flag.num_levels));
+ options.setTargetFileSizeBase(
+ (Integer)flags_.get(Flag.target_file_size_base));
+ options.setTargetFileSizeMultiplier((Integer)flags_.get(Flag.target_file_size_multiplier));
+ options.setMaxBytesForLevelBase(
+ (Integer)flags_.get(Flag.max_bytes_for_level_base));
+ options.setMaxBytesForLevelMultiplier((Double) flags_.get(Flag.max_bytes_for_level_multiplier));
+ options.setLevelZeroStopWritesTrigger(
+ (Integer)flags_.get(Flag.level0_stop_writes_trigger));
+ options.setLevelZeroSlowdownWritesTrigger(
+ (Integer)flags_.get(Flag.level0_slowdown_writes_trigger));
+ options.setLevelZeroFileNumCompactionTrigger(
+ (Integer)flags_.get(Flag.level0_file_num_compaction_trigger));
+ options.setMaxCompactionBytes(
+ (Long) flags_.get(Flag.max_compaction_bytes));
+ options.setDisableAutoCompactions(
+ (Boolean)flags_.get(Flag.disable_auto_compactions));
+ options.setMaxSuccessiveMerges(
+ (Integer)flags_.get(Flag.max_successive_merges));
+ options.setWalTtlSeconds((Long)flags_.get(Flag.wal_ttl_seconds));
+ options.setWalSizeLimitMB((Long)flags_.get(Flag.wal_size_limit_MB));
+ if(flags_.get(Flag.java_comparator) != null) {
+ options.setComparator(
+ (AbstractComparator)flags_.get(Flag.java_comparator));
+ }
+
+ /* TODO(yhchiang): enable the following parameters
+ options.setCompressionType((String)flags_.get(Flag.compression_type));
+ options.setCompressionLevel((Integer)flags_.get(Flag.compression_level));
+ options.setMinLevelToCompress((Integer)flags_.get(Flag.min_level_to_compress));
+ options.setHdfs((String)flags_.get(Flag.hdfs)); // env
+ options.setStatistics((Boolean)flags_.get(Flag.statistics));
+ options.setUniversalSizeRatio(
+ (Integer)flags_.get(Flag.universal_size_ratio));
+ options.setUniversalMinMergeWidth(
+ (Integer)flags_.get(Flag.universal_min_merge_width));
+ options.setUniversalMaxMergeWidth(
+ (Integer)flags_.get(Flag.universal_max_merge_width));
+ options.setUniversalMaxSizeAmplificationPercent(
+ (Integer)flags_.get(Flag.universal_max_size_amplification_percent));
+ options.setUniversalCompressionSizePercent(
+ (Integer)flags_.get(Flag.universal_compression_size_percent));
+ // TODO(yhchiang): add RocksDB.openForReadOnly() to enable Flag.readonly
+ // TODO(yhchiang): enable Flag.merge_operator by switch
+ options.setAccessHintOnCompactionStart(
+ (String)flags_.get(Flag.compaction_fadvice));
+ // available values of fadvice are "NONE", "NORMAL", "SEQUENTIAL", "WILLNEED" for fadvice
+ */
+ }
+
+ private void run() throws RocksDBException {
+ if (!useExisting_) {
+ destroyDb();
+ }
+ Options options = new Options();
+ prepareOptions(options);
+ open(options);
+
+ printHeader(options);
+
+ for (String benchmark : benchmarks_) {
+ List<Callable<Stats>> tasks = new ArrayList<Callable<Stats>>();
+ List<Callable<Stats>> bgTasks = new ArrayList<Callable<Stats>>();
+ WriteOptions writeOpt = new WriteOptions();
+ prepareWriteOptions(writeOpt);
+ ReadOptions readOpt = new ReadOptions();
+ prepareReadOptions(readOpt);
+ int currentTaskId = 0;
+ boolean known = true;
+
+ switch (benchmark) {
+ case "fillseq":
+ tasks.add(new WriteSequentialTask(
+ currentTaskId++, randSeed_, num_, num_, writeOpt, 1));
+ break;
+ case "fillbatch":
+ tasks.add(
+ new WriteSequentialTask(currentTaskId++, randSeed_, num_, num_, writeOpt, 1000));
+ break;
+ case "fillrandom":
+ tasks.add(new WriteRandomTask(
+ currentTaskId++, randSeed_, num_, num_, writeOpt, 1));
+ break;
+ case "filluniquerandom":
+ tasks.add(new WriteUniqueRandomTask(
+ currentTaskId++, randSeed_, num_, num_, writeOpt, 1));
+ break;
+ case "fillsync":
+ writeOpt.setSync(true);
+ tasks.add(new WriteRandomTask(
+ currentTaskId++, randSeed_, num_ / 1000, num_ / 1000,
+ writeOpt, 1));
+ break;
+ case "readseq":
+ for (int t = 0; t < threadNum_; ++t) {
+ tasks.add(new ReadSequentialTask(
+ currentTaskId++, randSeed_, reads_ / threadNum_, num_));
+ }
+ break;
+ case "readrandom":
+ for (int t = 0; t < threadNum_; ++t) {
+ tasks.add(new ReadRandomTask(
+ currentTaskId++, randSeed_, reads_ / threadNum_, num_));
+ }
+ break;
+ case "readwhilewriting":
+ WriteTask writeTask = new WriteRandomTask(
+ -1, randSeed_, Long.MAX_VALUE, num_, writeOpt, 1, writesPerSeconds_);
+ writeTask.stats_.setExcludeFromMerge();
+ bgTasks.add(writeTask);
+ for (int t = 0; t < threadNum_; ++t) {
+ tasks.add(new ReadRandomTask(
+ currentTaskId++, randSeed_, reads_ / threadNum_, num_));
+ }
+ break;
+ case "readhot":
+ for (int t = 0; t < threadNum_; ++t) {
+ tasks.add(new ReadRandomTask(
+ currentTaskId++, randSeed_, reads_ / threadNum_, num_ / 100));
+ }
+ break;
+ case "delete":
+ destroyDb();
+ open(options);
+ break;
+ default:
+ known = false;
+ System.err.println("Unknown benchmark: " + benchmark);
+ break;
+ }
+ if (known) {
+ ExecutorService executor = Executors.newCachedThreadPool();
+ ExecutorService bgExecutor = Executors.newCachedThreadPool();
+ try {
+ // measure only the main executor time
+ List<Future<Stats>> bgResults = new ArrayList<Future<Stats>>();
+ for (Callable bgTask : bgTasks) {
+ bgResults.add(bgExecutor.submit(bgTask));
+ }
+ start();
+ List<Future<Stats>> results = executor.invokeAll(tasks);
+ executor.shutdown();
+ boolean finished = executor.awaitTermination(10, TimeUnit.SECONDS);
+ if (!finished) {
+ System.out.format(
+ "Benchmark %s was not finished before timeout.",
+ benchmark);
+ executor.shutdownNow();
+ }
+ setFinished(true);
+ bgExecutor.shutdown();
+ finished = bgExecutor.awaitTermination(10, TimeUnit.SECONDS);
+ if (!finished) {
+ System.out.format(
+ "Benchmark %s was not finished before timeout.",
+ benchmark);
+ bgExecutor.shutdownNow();
+ }
+
+ stop(benchmark, results, currentTaskId);
+ } catch (InterruptedException e) {
+ System.err.println(e);
+ }
+ }
+ writeOpt.dispose();
+ readOpt.dispose();
+ }
+ options.dispose();
+ db_.close();
+ }
+
+ private void printHeader(Options options) {
+ int kKeySize = 16;
+ System.out.printf("Keys: %d bytes each\n", kKeySize);
+ System.out.printf("Values: %d bytes each (%d bytes after compression)\n",
+ valueSize_,
+ (int) (valueSize_ * compressionRatio_ + 0.5));
+ System.out.printf("Entries: %d\n", num_);
+ System.out.printf("RawSize: %.1f MB (estimated)\n",
+ ((double)(kKeySize + valueSize_) * num_) / SizeUnit.MB);
+ System.out.printf("FileSize: %.1f MB (estimated)\n",
+ (((kKeySize + valueSize_ * compressionRatio_) * num_) / SizeUnit.MB));
+ System.out.format("Memtable Factory: %s%n", options.memTableFactoryName());
+ System.out.format("Prefix: %d bytes%n", prefixSize_);
+ System.out.format("Compression: %s%n", compressionType_);
+ printWarnings();
+ System.out.printf("------------------------------------------------\n");
+ }
+
+ void printWarnings() {
+ boolean assertsEnabled = false;
+ assert assertsEnabled = true; // Intentional side effect!!!
+ if (assertsEnabled) {
+ System.out.printf(
+ "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
+ }
+ }
+
+ private void open(Options options) throws RocksDBException {
+ System.out.println("Using database directory: " + databaseDir_);
+ db_ = RocksDB.open(options, databaseDir_);
+ }
+
+ private void start() {
+ setFinished(false);
+ startTime_ = System.nanoTime();
+ }
+
+ private void stop(
+ String benchmark, List<Future<Stats>> results, int concurrentThreads) {
+ long endTime = System.nanoTime();
+ double elapsedSeconds =
+ 1.0d * (endTime - startTime_) / TimeUnit.SECONDS.toNanos(1);
+
+ Stats stats = new Stats(-1);
+ int taskFinishedCount = 0;
+ for (Future<Stats> result : results) {
+ if (result.isDone()) {
+ try {
+ Stats taskStats = result.get(3, TimeUnit.SECONDS);
+ if (!result.isCancelled()) {
+ taskFinishedCount++;
+ }
+ stats.merge(taskStats);
+ } catch (Exception e) {
+ // then it's not successful, the output will indicate this
+ }
+ }
+ }
+ String extra = "";
+ if (benchmark.indexOf("read") >= 0) {
+ extra = String.format(" %d / %d found; ", stats.found_, stats.done_);
+ } else {
+ extra = String.format(" %d ops done; ", stats.done_);
+ }
+
+ System.out.printf(
+ "%-16s : %11.5f micros/op; %6.1f MB/s;%s %d / %d task(s) finished.\n",
+ benchmark, elapsedSeconds / stats.done_ * 1e6,
+ (stats.bytes_ / 1048576.0) / elapsedSeconds, extra,
+ taskFinishedCount, concurrentThreads);
+ }
+
+ public void generateKeyFromLong(byte[] slice, long n) {
+ assert(n >= 0);
+ int startPos = 0;
+
+ if (keysPerPrefix_ > 0) {
+ long numPrefix = (num_ + keysPerPrefix_ - 1) / keysPerPrefix_;
+ long prefix = n % numPrefix;
+ int bytesToFill = Math.min(prefixSize_, 8);
+ for (int i = 0; i < bytesToFill; ++i) {
+ slice[i] = (byte) (prefix % 256);
+ prefix /= 256;
+ }
+ for (int i = 8; i < bytesToFill; ++i) {
+ slice[i] = '0';
+ }
+ startPos = bytesToFill;
+ }
+
+ for (int i = slice.length - 1; i >= startPos; --i) {
+ slice[i] = (byte) ('0' + (n % 10));
+ n /= 10;
+ }
+ }
+
+ private void destroyDb() {
+ if (db_ != null) {
+ db_.close();
+ }
+ // TODO(yhchiang): develop our own FileUtil
+ // FileUtil.deleteDir(databaseDir_);
+ }
+
+ private void printStats() {
+ }
+
+ static void printHelp() {
+ System.out.println("usage:");
+ for (Flag flag : Flag.values()) {
+ System.out.format(" --%s%n\t%s%n",
+ flag.name(),
+ flag.desc());
+ if (flag.getDefaultValue() != null) {
+ System.out.format("\tDEFAULT: %s%n",
+ flag.getDefaultValue().toString());
+ }
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ Map<Flag, Object> flags = new EnumMap<Flag, Object>(Flag.class);
+ for (Flag flag : Flag.values()) {
+ if (flag.getDefaultValue() != null) {
+ flags.put(flag, flag.getDefaultValue());
+ }
+ }
+ for (String arg : args) {
+ boolean valid = false;
+ if (arg.equals("--help") || arg.equals("-h")) {
+ printHelp();
+ System.exit(0);
+ }
+ if (arg.startsWith("--")) {
+ try {
+ String[] parts = arg.substring(2).split("=");
+ if (parts.length >= 1) {
+ Flag key = Flag.valueOf(parts[0]);
+ if (key != null) {
+ Object value = null;
+ if (parts.length >= 2) {
+ value = key.parseValue(parts[1]);
+ }
+ flags.put(key, value);
+ valid = true;
+ }
+ }
+ }
+ catch (Exception e) {
+ }
+ }
+ if (!valid) {
+ System.err.println("Invalid argument " + arg);
+ System.exit(1);
+ }
+ }
+ new DbBenchmark(flags).run();
+ }
+
+ private enum Flag {
+ benchmarks(Arrays.asList("fillseq", "readrandom", "fillrandom"),
+ "Comma-separated list of operations to run in the specified order\n"
+ + "\tActual benchmarks:\n"
+ + "\t\tfillseq -- write N values in sequential key order in async mode.\n"
+ + "\t\tfillrandom -- write N values in random key order in async mode.\n"
+ + "\t\tfillbatch -- write N/1000 batch where each batch has 1000 values\n"
+ + "\t\t in sequential key order in sync mode.\n"
+ + "\t\tfillsync -- write N/100 values in random key order in sync mode.\n"
+ + "\t\tfill100K -- write N/1000 100K values in random order in async mode.\n"
+ + "\t\treadseq -- read N times sequentially.\n"
+ + "\t\treadrandom -- read N times in random order.\n"
+ + "\t\treadhot -- read N times in random order from 1% section of DB.\n"
+ + "\t\treadwhilewriting -- measure the read performance of multiple readers\n"
+ + "\t\t with a bg single writer. The write rate of the bg\n"
+ + "\t\t is capped by --writes_per_second.\n"
+ + "\tMeta Operations:\n"
+ + "\t\tdelete -- delete DB") {
+ @Override public Object parseValue(String value) {
+ return new ArrayList<String>(Arrays.asList(value.split(",")));
+ }
+ },
+ compression_ratio(0.5d,
+ "Arrange to generate values that shrink to this fraction of\n" +
+ "\ttheir original size after compression.") {
+ @Override public Object parseValue(String value) {
+ return Double.parseDouble(value);
+ }
+ },
+ use_existing_db(false,
+ "If true, do not destroy the existing database. If you set this\n" +
+ "\tflag and also specify a benchmark that wants a fresh database,\n" +
+ "\tthat benchmark will fail.") {
+ @Override public Object parseValue(String value) {
+ return parseBoolean(value);
+ }
+ },
+ num(1000000,
+ "Number of key/values to place in database.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ threads(1,
+ "Number of concurrent threads to run.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ reads(null,
+ "Number of read operations to do. If negative, do --nums reads.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ key_size(16,
+ "The size of each key in bytes.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ value_size(100,
+ "The size of each value in bytes.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ write_buffer_size(4L * SizeUnit.MB,
+ "Number of bytes to buffer in memtable before compacting\n" +
+ "\t(initialized to default value by 'main'.)") {
+ @Override public Object parseValue(String value) {
+ return Long.parseLong(value);
+ }
+ },
+ max_write_buffer_number(2,
+ "The number of in-memory memtables. Each memtable is of size\n" +
+ "\twrite_buffer_size.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ prefix_size(0, "Controls the prefix size for HashSkipList, HashLinkedList,\n" +
+ "\tand plain table.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ keys_per_prefix(0, "Controls the average number of keys generated\n" +
+ "\tper prefix, 0 means no special handling of the prefix,\n" +
+ "\ti.e. use the prefix comes with the generated random number.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ memtablerep("skip_list",
+ "The memtable format. Available options are\n" +
+ "\tskip_list,\n" +
+ "\tvector,\n" +
+ "\thash_linkedlist,\n" +
+ "\thash_skiplist (prefix_hash.)") {
+ @Override public Object parseValue(String value) {
+ return value;
+ }
+ },
+ hash_bucket_count(SizeUnit.MB,
+ "The number of hash buckets used in the hash-bucket-based\n" +
+ "\tmemtables. Memtables that currently support this argument are\n" +
+ "\thash_linkedlist and hash_skiplist.") {
+ @Override public Object parseValue(String value) {
+ return Long.parseLong(value);
+ }
+ },
+ writes_per_second(10000,
+ "The write-rate of the background writer used in the\n" +
+ "\t`readwhilewriting` benchmark. Non-positive number indicates\n" +
+ "\tusing an unbounded write-rate in `readwhilewriting` benchmark.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ use_plain_table(false,
+ "Use plain-table sst format.") {
+ @Override public Object parseValue(String value) {
+ return parseBoolean(value);
+ }
+ },
+ cache_size(-1L,
+ "Number of bytes to use as a cache of uncompressed data.\n" +
+ "\tNegative means use default settings.") {
+ @Override public Object parseValue(String value) {
+ return Long.parseLong(value);
+ }
+ },
+ seed(0L,
+ "Seed base for random number generators.") {
+ @Override public Object parseValue(String value) {
+ return Long.parseLong(value);
+ }
+ },
+ num_levels(7,
+ "The total number of levels.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ numdistinct(1000L,
+ "Number of distinct keys to use. Used in RandomWithVerify to\n" +
+ "\tread/write on fewer keys so that gets are more likely to find the\n" +
+ "\tkey and puts are more likely to update the same key.") {
+ @Override public Object parseValue(String value) {
+ return Long.parseLong(value);
+ }
+ },
+ merge_keys(-1L,
+ "Number of distinct keys to use for MergeRandom and\n" +
+ "\tReadRandomMergeRandom.\n" +
+ "\tIf negative, there will be FLAGS_num keys.") {
+ @Override public Object parseValue(String value) {
+ return Long.parseLong(value);
+ }
+ },
+ bloom_locality(0,"Control bloom filter probes locality.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ duration(0,"Time in seconds for the random-ops tests to run.\n" +
+ "\tWhen 0 then num & reads determine the test duration.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ num_multi_db(0,
+ "Number of DBs used in the benchmark. 0 means single DB.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ histogram(false,"Print histogram of operation timings.") {
+ @Override public Object parseValue(String value) {
+ return parseBoolean(value);
+ }
+ },
+ min_write_buffer_number_to_merge(
+ defaultOptions_.minWriteBufferNumberToMerge(),
+ "The minimum number of write buffers that will be merged together\n" +
+ "\tbefore writing to storage. This is cheap because it is an\n" +
+ "\tin-memory merge. If this feature is not enabled, then all these\n" +
+ "\twrite buffers are flushed to L0 as separate files and this\n" +
+ "\tincreases read amplification because a get request has to check\n" +
+ "\tin all of these files. Also, an in-memory merge may result in\n" +
+ "\twriting less data to storage if there are duplicate records\n" +
+ "\tin each of these individual write buffers.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ max_background_compactions(
+ defaultOptions_.maxBackgroundCompactions(),
+ "The maximum number of concurrent background compactions\n" +
+ "\tthat can occur in parallel.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ max_background_flushes(
+ defaultOptions_.maxBackgroundFlushes(),
+ "The maximum number of concurrent background flushes\n" +
+ "\tthat can occur in parallel.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ max_background_jobs(defaultOptions_.maxBackgroundJobs(),
+ "The maximum number of concurrent background jobs\n"
+ + "\tthat can occur in parallel.") {
+ @Override
+ public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ /* TODO(yhchiang): enable the following
+ compaction_style((int32_t) defaultOptions_.compactionStyle(),
+ "style of compaction: level-based vs universal.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },*/
+ universal_size_ratio(0,
+ "Percentage flexibility while comparing file size\n" +
+ "\t(for universal compaction only).") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ universal_min_merge_width(0,"The minimum number of files in a\n" +
+ "\tsingle compaction run (for universal compaction only).") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ universal_max_merge_width(0,"The max number of files to compact\n" +
+ "\tin universal style compaction.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ universal_max_size_amplification_percent(0,
+ "The max size amplification for universal style compaction.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ universal_compression_size_percent(-1,
+ "The percentage of the database to compress for universal\n" +
+ "\tcompaction. -1 means compress everything.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ block_size(defaultBlockBasedTableOptions_.blockSize(),
+ "Number of bytes in a block.") {
+ @Override public Object parseValue(String value) {
+ return Long.parseLong(value);
+ }
+ },
+ compressed_cache_size(-1L,
+ "Number of bytes to use as a cache of compressed data.") {
+ @Override public Object parseValue(String value) {
+ return Long.parseLong(value);
+ }
+ },
+ open_files(defaultOptions_.maxOpenFiles(),
+ "Maximum number of files to keep open at the same time\n" +
+ "\t(use default if == 0)") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ bloom_bits(-1,"Bloom filter bits per key. Negative means\n" +
+ "\tuse default settings.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ memtable_bloom_size_ratio(0.0d, "Ratio of memtable used by the bloom filter.\n"
+ + "\t0 means no bloom filter.") {
+ @Override public Object parseValue(String value) {
+ return Double.parseDouble(value);
+ }
+ },
+ cache_numshardbits(-1,"Number of shards for the block cache\n" +
+ "\tis 2 ** cache_numshardbits. Negative means use default settings.\n" +
+ "\tThis is applied only if FLAGS_cache_size is non-negative.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ verify_checksum(false,"Verify checksum for every block read\n" +
+ "\tfrom storage.") {
+ @Override public Object parseValue(String value) {
+ return parseBoolean(value);
+ }
+ },
+ statistics(false,"Database statistics.") {
+ @Override public Object parseValue(String value) {
+ return parseBoolean(value);
+ }
+ },
+ writes(-1L, "Number of write operations to do. If negative, do\n" +
+ "\t--num reads.") {
+ @Override public Object parseValue(String value) {
+ return Long.parseLong(value);
+ }
+ },
+ sync(false,"Sync all writes to disk.") {
+ @Override public Object parseValue(String value) {
+ return parseBoolean(value);
+ }
+ },
+ use_fsync(false,"If true, issue fsync instead of fdatasync.") {
+ @Override public Object parseValue(String value) {
+ return parseBoolean(value);
+ }
+ },
+ disable_wal(false,"If true, do not write WAL for write.") {
+ @Override public Object parseValue(String value) {
+ return parseBoolean(value);
+ }
+ },
+ wal_dir("", "If not empty, use the given dir for WAL.") {
+ @Override public Object parseValue(String value) {
+ return value;
+ }
+ },
+ target_file_size_base(2 * 1048576,"Target file size at level-1") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ target_file_size_multiplier(1,
+ "A multiplier to compute target level-N file size (N >= 2)") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ max_bytes_for_level_base(10 * 1048576,
+ "Max bytes for level-1") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ max_bytes_for_level_multiplier(10.0d,
+ "A multiplier to compute max bytes for level-N (N >= 2)") {
+ @Override public Object parseValue(String value) {
+ return Double.parseDouble(value);
+ }
+ },
+ level0_stop_writes_trigger(12,"Number of files in level-0\n" +
+ "\tthat will trigger put stop.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ level0_slowdown_writes_trigger(8,"Number of files in level-0\n" +
+ "\tthat will slow down writes.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ level0_file_num_compaction_trigger(4,"Number of files in level-0\n" +
+ "\twhen compactions start.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ readwritepercent(90,"Ratio of reads to reads/writes (expressed\n" +
+ "\tas percentage) for the ReadRandomWriteRandom workload. The\n" +
+ "\tdefault value 90 means 90% operations out of all reads and writes\n" +
+ "\toperations are reads. In other words, 9 gets for every 1 put.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ mergereadpercent(70,"Ratio of merges to merges&reads (expressed\n" +
+ "\tas percentage) for the ReadRandomMergeRandom workload. The\n" +
+ "\tdefault value 70 means 70% out of all read and merge operations\n" +
+ "\tare merges. In other words, 7 merges for every 3 gets.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ deletepercent(2,"Percentage of deletes out of reads/writes/\n" +
+ "\tdeletes (used in RandomWithVerify only). RandomWithVerify\n" +
+ "\tcalculates writepercent as (100 - FLAGS_readwritepercent -\n" +
+ "\tdeletepercent), so deletepercent must be smaller than (100 -\n" +
+ "\tFLAGS_readwritepercent)") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ delete_obsolete_files_period_micros(0,"Option to delete\n" +
+ "\tobsolete files periodically. 0 means that obsolete files are\n" +
+ "\tdeleted after every compaction run.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ compression_type("snappy",
+ "Algorithm used to compress the database.") {
+ @Override public Object parseValue(String value) {
+ return value;
+ }
+ },
+ compression_level(-1,
+ "Compression level. For zlib this should be -1 for the\n" +
+ "\tdefault level, or between 0 and 9.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ min_level_to_compress(-1,"If non-negative, compression starts\n" +
+ "\tfrom this level. Levels with number < min_level_to_compress are\n" +
+ "\tnot compressed. Otherwise, apply compression_type to\n" +
+ "\tall levels.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ table_cache_numshardbits(4,"") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ stats_interval(0L, "Stats are reported every N operations when\n" +
+ "\tthis is greater than zero. When 0 the interval grows over time.") {
+ @Override public Object parseValue(String value) {
+ return Long.parseLong(value);
+ }
+ },
+ stats_per_interval(0,"Reports additional stats per interval when\n" +
+ "\tthis is greater than 0.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ perf_level(0,"Level of perf collection.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ soft_rate_limit(0.0d,"") {
+ @Override public Object parseValue(String value) {
+ return Double.parseDouble(value);
+ }
+ },
+ hard_rate_limit(0.0d,"When not equal to 0 this make threads\n" +
+ "\tsleep at each stats reporting interval until the compaction\n" +
+ "\tscore for all levels is less than or equal to this value.") {
+ @Override public Object parseValue(String value) {
+ return Double.parseDouble(value);
+ }
+ },
+ rate_limit_delay_max_milliseconds(1000,
+ "When hard_rate_limit is set then this is the max time a put will\n" +
+ "\tbe stalled.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ max_compaction_bytes(0L, "Limit number of bytes in one compaction to be lower than this\n" +
+ "\threshold. But it's not guaranteed.") {
+ @Override public Object parseValue(String value) {
+ return Long.parseLong(value);
+ }
+ },
+ readonly(false,"Run read only benchmarks.") {
+ @Override public Object parseValue(String value) {
+ return parseBoolean(value);
+ }
+ },
+ disable_auto_compactions(false,"Do not auto trigger compactions.") {
+ @Override public Object parseValue(String value) {
+ return parseBoolean(value);
+ }
+ },
+ wal_ttl_seconds(0L,"Set the TTL for the WAL Files in seconds.") {
+ @Override public Object parseValue(String value) {
+ return Long.parseLong(value);
+ }
+ },
+ wal_size_limit_MB(0L,"Set the size limit for the WAL Files\n" +
+ "\tin MB.") {
+ @Override public Object parseValue(String value) {
+ return Long.parseLong(value);
+ }
+ },
+ /* TODO(yhchiang): enable the following
+ direct_reads(rocksdb::EnvOptions().use_direct_reads,
+ "Allow direct I/O reads.") {
+ @Override public Object parseValue(String value) {
+ return parseBoolean(value);
+ }
+ },
+ direct_writes(rocksdb::EnvOptions().use_direct_reads,
+ "Allow direct I/O reads.") {
+ @Override public Object parseValue(String value) {
+ return parseBoolean(value);
+ }
+ },
+ */
+ mmap_read(false,
+ "Allow reads to occur via mmap-ing files.") {
+ @Override public Object parseValue(String value) {
+ return parseBoolean(value);
+ }
+ },
+ mmap_write(false,
+ "Allow writes to occur via mmap-ing files.") {
+ @Override public Object parseValue(String value) {
+ return parseBoolean(value);
+ }
+ },
+ advise_random_on_open(defaultOptions_.adviseRandomOnOpen(),
+ "Advise random access on table file open.") {
+ @Override public Object parseValue(String value) {
+ return parseBoolean(value);
+ }
+ },
+ compaction_fadvice("NORMAL",
+ "Access pattern advice when a file is compacted.") {
+ @Override public Object parseValue(String value) {
+ return value;
+ }
+ },
+ use_tailing_iterator(false,
+ "Use tailing iterator to access a series of keys instead of get.") {
+ @Override public Object parseValue(String value) {
+ return parseBoolean(value);
+ }
+ },
+ use_adaptive_mutex(defaultOptions_.useAdaptiveMutex(),
+ "Use adaptive mutex.") {
+ @Override public Object parseValue(String value) {
+ return parseBoolean(value);
+ }
+ },
+ bytes_per_sync(defaultOptions_.bytesPerSync(),
+ "Allows OS to incrementally sync files to disk while they are\n" +
+ "\tbeing written, in the background. Issue one request for every\n" +
+ "\tbytes_per_sync written. 0 turns it off.") {
+ @Override public Object parseValue(String value) {
+ return Long.parseLong(value);
+ }
+ },
+ filter_deletes(false," On true, deletes use bloom-filter and drop\n" +
+ "\tthe delete if key not present.") {
+ @Override public Object parseValue(String value) {
+ return parseBoolean(value);
+ }
+ },
+ max_successive_merges(0,"Maximum number of successive merge\n" +
+ "\toperations on a key in the memtable.") {
+ @Override public Object parseValue(String value) {
+ return Integer.parseInt(value);
+ }
+ },
+ db(getTempDir("rocksdb-jni"),
+ "Use the db with the following name.") {
+ @Override public Object parseValue(String value) {
+ return value;
+ }
+ },
+ use_mem_env(false, "Use RocksMemEnv instead of default filesystem based\n" +
+ "environment.") {
+ @Override public Object parseValue(String value) {
+ return parseBoolean(value);
+ }
+ },
+ java_comparator(null, "Class name of a Java Comparator to use instead\n" +
+ "\tof the default C++ ByteWiseComparatorImpl. Must be available on\n" +
+ "\tthe classpath") {
+ @Override
+ protected Object parseValue(final String value) {
+ try {
+ final ComparatorOptions copt = new ComparatorOptions();
+ final Class<AbstractComparator> clsComparator =
+ (Class<AbstractComparator>)Class.forName(value);
+ final Constructor cstr =
+ clsComparator.getConstructor(ComparatorOptions.class);
+ return cstr.newInstance(copt);
+ } catch(final ClassNotFoundException cnfe) {
+ throw new IllegalArgumentException("Java Comparator '" + value + "'" +
+ " not found on the classpath", cnfe);
+ } catch(final NoSuchMethodException nsme) {
+ throw new IllegalArgumentException("Java Comparator '" + value + "'" +
+ " does not have a public ComparatorOptions constructor", nsme);
+ } catch(final IllegalAccessException | InstantiationException
+ | InvocationTargetException ie) {
+ throw new IllegalArgumentException("Unable to construct Java" +
+ " Comparator '" + value + "'", ie);
+ }
+ }
+ };
+
+ private Flag(Object defaultValue, String desc) {
+ defaultValue_ = defaultValue;
+ desc_ = desc;
+ }
+
+ public Object getDefaultValue() {
+ return defaultValue_;
+ }
+
+ public String desc() {
+ return desc_;
+ }
+
+ public boolean parseBoolean(String value) {
+ if (value.equals("1")) {
+ return true;
+ } else if (value.equals("0")) {
+ return false;
+ }
+ return Boolean.parseBoolean(value);
+ }
+
+ protected abstract Object parseValue(String value);
+
+ private final Object defaultValue_;
+ private final String desc_;
+ }
+
+ private final static String DEFAULT_TEMP_DIR = "/tmp";
+
+ private static String getTempDir(final String dirName) {
+ try {
+ return Files.createTempDirectory(dirName).toAbsolutePath().toString();
+ } catch(final IOException ioe) {
+ System.err.println("Unable to create temp directory, defaulting to: " +
+ DEFAULT_TEMP_DIR);
+ return DEFAULT_TEMP_DIR + File.pathSeparator + dirName;
+ }
+ }
+
+ private static class RandomGenerator {
+ private final byte[] data_;
+ private int dataLength_;
+ private int position_;
+ private double compressionRatio_;
+ Random rand_;
+
+ private RandomGenerator(long seed, double compressionRatio) {
+ // We use a limited amount of data over and over again and ensure
+ // that it is larger than the compression window (32KB), and also
+ byte[] value = new byte[100];
+ // large enough to serve all typical value sizes we want to write.
+ rand_ = new Random(seed);
+ dataLength_ = value.length * 10000;
+ data_ = new byte[dataLength_];
+ compressionRatio_ = compressionRatio;
+ int pos = 0;
+ while (pos < dataLength_) {
+ compressibleBytes(value);
+ System.arraycopy(value, 0, data_, pos,
+ Math.min(value.length, dataLength_ - pos));
+ pos += value.length;
+ }
+ }
+
+ private void compressibleBytes(byte[] value) {
+ int baseLength = value.length;
+ if (compressionRatio_ < 1.0d) {
+ baseLength = (int) (compressionRatio_ * value.length + 0.5);
+ }
+ if (baseLength <= 0) {
+ baseLength = 1;
+ }
+ int pos;
+ for (pos = 0; pos < baseLength; ++pos) {
+ value[pos] = (byte) (' ' + rand_.nextInt(95)); // ' ' .. '~'
+ }
+ while (pos < value.length) {
+ System.arraycopy(value, 0, value, pos,
+ Math.min(baseLength, value.length - pos));
+ pos += baseLength;
+ }
+ }
+
+ private void generate(byte[] value) {
+ if (position_ + value.length > data_.length) {
+ position_ = 0;
+ assert(value.length <= data_.length);
+ }
+ position_ += value.length;
+ System.arraycopy(data_, position_ - value.length,
+ value, 0, value.length);
+ }
+ }
+
+ boolean isFinished() {
+ synchronized(finishLock_) {
+ return isFinished_;
+ }
+ }
+
+ void setFinished(boolean flag) {
+ synchronized(finishLock_) {
+ isFinished_ = flag;
+ }
+ }
+
+ RocksDB db_;
+ final List<String> benchmarks_;
+ final int num_;
+ final int reads_;
+ final int keySize_;
+ final int valueSize_;
+ final int threadNum_;
+ final int writesPerSeconds_;
+ final long randSeed_;
+ final boolean useExisting_;
+ final String databaseDir_;
+ double compressionRatio_;
+ RandomGenerator gen_;
+ long startTime_;
+
+ // env
+ boolean useMemenv_;
+
+ // memtable related
+ final int maxWriteBufferNumber_;
+ final int prefixSize_;
+ final int keysPerPrefix_;
+ final String memtable_;
+ final long hashBucketCount_;
+
+ // sst format related
+ boolean usePlainTable_;
+
+ Object finishLock_;
+ boolean isFinished_;
+ Map<Flag, Object> flags_;
+ // as the scope of a static member equals to the scope of the problem,
+ // we let its c++ pointer to be disposed in its finalizer.
+ static Options defaultOptions_ = new Options();
+ static BlockBasedTableConfig defaultBlockBasedTableOptions_ =
+ new BlockBasedTableConfig();
+ String compressionType_;
+ CompressionType compression_;
+}
diff --git a/src/rocksdb/java/crossbuild/Vagrantfile b/src/rocksdb/java/crossbuild/Vagrantfile
new file mode 100644
index 000000000..0ee50de2c
--- /dev/null
+++ b/src/rocksdb/java/crossbuild/Vagrantfile
@@ -0,0 +1,51 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
+VAGRANTFILE_API_VERSION = "2"
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+
+ config.vm.define "linux32" do |linux32|
+ linux32.vm.box = "bento/centos-6.10-i386"
+ linux32.vm.provision :shell, path: "build-linux-centos.sh"
+ end
+
+ config.vm.define "linux64" do |linux64|
+ linux64.vm.box = "bento/centos-6.10"
+ linux64.vm.provision :shell, path: "build-linux-centos.sh"
+ end
+
+ config.vm.define "linux32-musl" do |musl32|
+ musl32.vm.box = "alpine/alpine32"
+ musl32.vm.box_version = "3.6.0"
+ musl32.vm.provision :shell, path: "build-linux-alpine.sh"
+ end
+
+ config.vm.define "linux64-musl" do |musl64|
+ musl64.vm.box = "generic/alpine36"
+
+ ## Should use the alpine/alpine64 box, but this issue needs to be fixed first - https://github.com/hashicorp/vagrant/issues/11218
+ # musl64.vm.box = "alpine/alpine64"
+ # musl64.vm.box_version = "3.6.0"
+
+ musl64.vm.provision :shell, path: "build-linux-alpine.sh"
+ end
+
+ config.vm.provider "virtualbox" do |v|
+ v.memory = 2048
+ v.cpus = 4
+ v.customize ["modifyvm", :id, "--nictype1", "virtio" ]
+ end
+
+ if Vagrant.has_plugin?("vagrant-cachier")
+ config.cache.scope = :box
+ end
+ if Vagrant.has_plugin?("vagrant-vbguest")
+ config.vbguest.no_install = true
+ end
+
+ config.vm.synced_folder "../target", "/rocksdb-build"
+ config.vm.synced_folder "../..", "/rocksdb", type: "rsync"
+ config.vm.boot_timeout = 1200
+end
diff --git a/src/rocksdb/java/crossbuild/build-linux-alpine.sh b/src/rocksdb/java/crossbuild/build-linux-alpine.sh
new file mode 100755
index 000000000..561d34141
--- /dev/null
+++ b/src/rocksdb/java/crossbuild/build-linux-alpine.sh
@@ -0,0 +1,70 @@
+#!/usr/bin/env bash
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+
+set -e
+
+# update Alpine with latest versions
+echo '@edge http://nl.alpinelinux.org/alpine/edge/main' >> /etc/apk/repositories
+echo '@community http://nl.alpinelinux.org/alpine/edge/community' >> /etc/apk/repositories
+apk update
+apk upgrade
+
+# install CA certificates
+apk add ca-certificates
+
+# install build tools
+apk add \
+ build-base \
+ coreutils \
+ file \
+ git \
+ perl \
+ automake \
+ autoconf \
+ cmake
+
+# install tool dependencies for building RocksDB static library
+apk add \
+ curl \
+ bash \
+ wget \
+ tar \
+ openssl
+
+# install RocksDB dependencies
+apk add \
+ snappy snappy-dev \
+ zlib zlib-dev \
+ bzip2 bzip2-dev \
+ lz4 lz4-dev \
+ zstd zstd-dev \
+ linux-headers \
+ jemalloc jemalloc-dev
+
+# install OpenJDK7
+apk add openjdk7 \
+ && apk add java-cacerts \
+ && rm /usr/lib/jvm/java-1.7-openjdk/jre/lib/security/cacerts \
+ && ln -s /etc/ssl/certs/java/cacerts /usr/lib/jvm/java-1.7-openjdk/jre/lib/security/cacerts
+
+# cleanup
+rm -rf /var/cache/apk/*
+
+# puts javac in the PATH
+export JAVA_HOME=/usr/lib/jvm/java-1.7-openjdk
+export PATH=/usr/lib/jvm/java-1.7-openjdk/bin:$PATH
+
+# gflags from source
+cd /tmp &&\
+ git clone -b v2.0 --single-branch https://github.com/gflags/gflags.git &&\
+ cd gflags &&\
+ ./configure --prefix=/usr && make && make install &&\
+ rm -rf /tmp/*
+
+
+# build rocksdb
+cd /rocksdb
+make jclean clean
+PORTABLE=1 make -j8 rocksdbjavastatic
+cp /rocksdb/java/target/librocksdbjni-* /rocksdb-build
+cp /rocksdb/java/target/rocksdbjni-* /rocksdb-build
diff --git a/src/rocksdb/java/crossbuild/build-linux-centos.sh b/src/rocksdb/java/crossbuild/build-linux-centos.sh
new file mode 100755
index 000000000..176e3456c
--- /dev/null
+++ b/src/rocksdb/java/crossbuild/build-linux-centos.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+
+set -e
+
+# remove fixed relesever variable present in the hanscode boxes
+sudo rm -f /etc/yum/vars/releasever
+
+# enable EPEL
+sudo yum -y install epel-release
+
+# install all required packages for rocksdb that are available through yum
+sudo yum -y install openssl java-1.7.0-openjdk-devel zlib-devel bzip2-devel lz4-devel snappy-devel libzstd-devel jemalloc-devel cmake3
+
+# set up cmake3 as cmake binary
+sudo alternatives --install /usr/local/bin/cmake cmake /usr/bin/cmake 10 --slave /usr/local/bin/ctest ctest /usr/bin/ctest --slave /usr/local/bin/cpack cpack /usr/bin/cpack --slave /usr/local/bin/ccmake ccmake /usr/bin/ccmake
+sudo alternatives --install /usr/local/bin/cmake cmake /usr/bin/cmake3 20 --slave /usr/local/bin/ctest ctest /usr/bin/ctest3 --slave /usr/local/bin/cpack cpack /usr/bin/cpack3 --slave /usr/local/bin/ccmake ccmake /usr/bin/ccmake3
+
+# install gcc/g++ 4.8.2 from tru/devtools-2
+sudo wget -O /etc/yum.repos.d/devtools-2.repo https://people.centos.org/tru/devtools-2/devtools-2.repo
+sudo yum -y install devtoolset-2-binutils devtoolset-2-gcc devtoolset-2-gcc-c++
+
+# install gflags
+wget https://github.com/gflags/gflags/archive/v2.0.tar.gz -O gflags-2.0.tar.gz
+tar xvfz gflags-2.0.tar.gz; cd gflags-2.0; scl enable devtoolset-2 ./configure; scl enable devtoolset-2 make; sudo make install
+export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib
+
+# set java home so we can build rocksdb jars
+export JAVA_HOME=/usr/lib/jvm/java-1.7.0
+
+export PATH=$JAVA_HOME:/usr/local/bin:$PATH
+
+# build rocksdb
+cd /rocksdb
+scl enable devtoolset-2 'make clean-not-downloaded'
+scl enable devtoolset-2 'PORTABLE=1 make -j8 rocksdbjavastatic'
+cp /rocksdb/java/target/librocksdbjni-* /rocksdb-build
+cp /rocksdb/java/target/rocksdbjni-* /rocksdb-build
diff --git a/src/rocksdb/java/crossbuild/build-linux.sh b/src/rocksdb/java/crossbuild/build-linux.sh
new file mode 100755
index 000000000..74178adb5
--- /dev/null
+++ b/src/rocksdb/java/crossbuild/build-linux.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+# install all required packages for rocksdb
+sudo apt-get update
+sudo apt-get -y install git make gcc g++ libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev default-jdk
+
+# set java home so we can build rocksdb jars
+export JAVA_HOME=$(echo /usr/lib/jvm/java-7-openjdk*)
+cd /rocksdb
+make jclean clean
+make -j 4 rocksdbjavastatic
+cp /rocksdb/java/target/librocksdbjni-* /rocksdb-build
+cp /rocksdb/java/target/rocksdbjni-* /rocksdb-build
+sudo shutdown -h now
+
diff --git a/src/rocksdb/java/crossbuild/docker-build-linux-alpine.sh b/src/rocksdb/java/crossbuild/docker-build-linux-alpine.sh
new file mode 100755
index 000000000..e605c7716
--- /dev/null
+++ b/src/rocksdb/java/crossbuild/docker-build-linux-alpine.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+
+set -e
+#set -x
+
+# just in-case this is run outside Docker
+mkdir -p /rocksdb-local-build
+
+rm -rf /rocksdb-local-build/*
+cp -r /rocksdb-host/* /rocksdb-local-build
+cd /rocksdb-local-build
+
+make clean-not-downloaded
+PORTABLE=1 make rocksdbjavastatic
+
+cp java/target/librocksdbjni-linux*.so java/target/rocksdbjni-*-linux*.jar /rocksdb-java-target
+
diff --git a/src/rocksdb/java/crossbuild/docker-build-linux-centos.sh b/src/rocksdb/java/crossbuild/docker-build-linux-centos.sh
new file mode 100755
index 000000000..c4217785f
--- /dev/null
+++ b/src/rocksdb/java/crossbuild/docker-build-linux-centos.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+
+set -e
+#set -x
+
+# just in-case this is run outside Docker
+mkdir -p /rocksdb-local-build
+
+rm -rf /rocksdb-local-build/*
+cp -r /rocksdb-host/* /rocksdb-local-build
+cd /rocksdb-local-build
+
+# Use scl devtoolset if available
+if hash scl 2>/dev/null; then
+ if scl --list | grep -q 'devtoolset-7'; then
+ # CentOS 7+
+ scl enable devtoolset-7 'make clean-not-downloaded'
+ scl enable devtoolset-7 'PORTABLE=1 make -j2 rocksdbjavastatic'
+ elif scl --list | grep -q 'devtoolset-2'; then
+ # CentOS 5 or 6
+ scl enable devtoolset-2 'make clean-not-downloaded'
+ scl enable devtoolset-2 'PORTABLE=1 make -j2 rocksdbjavastatic'
+ else
+ echo "Could not find devtoolset"
+ exit 1;
+ fi
+else
+ make clean-not-downloaded
+ PORTABLE=1 make -j2 rocksdbjavastatic
+fi
+
+cp java/target/librocksdbjni-linux*.so java/target/rocksdbjni-*-linux*.jar /rocksdb-java-target
+
diff --git a/src/rocksdb/java/jdb_bench.sh b/src/rocksdb/java/jdb_bench.sh
new file mode 100755
index 000000000..5dfc385e3
--- /dev/null
+++ b/src/rocksdb/java/jdb_bench.sh
@@ -0,0 +1,13 @@
+# shellcheck disable=SC2148
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+PLATFORM=64
+if [ `getconf LONG_BIT` != "64" ]
+then
+ PLATFORM=32
+fi
+
+ROCKS_JAR=`find target -name rocksdbjni*.jar`
+
+echo "Running benchmark in $PLATFORM-Bit mode."
+# shellcheck disable=SC2068
+java -server -d$PLATFORM -XX:NewSize=4m -XX:+AggressiveOpts -Djava.library.path=target -cp "${ROCKS_JAR}:benchmark/target/classes" org.rocksdb.benchmark.DbBenchmark $@
diff --git a/src/rocksdb/java/jmh/LICENSE-HEADER.txt b/src/rocksdb/java/jmh/LICENSE-HEADER.txt
new file mode 100644
index 000000000..365ee653b
--- /dev/null
+++ b/src/rocksdb/java/jmh/LICENSE-HEADER.txt
@@ -0,0 +1,5 @@
+Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+ This source code is licensed under both the GPLv2 (found in the
+ COPYING file in the root directory) and Apache 2.0 License
+ (found in the LICENSE.Apache file in the root directory).
+
diff --git a/src/rocksdb/java/jmh/README.md b/src/rocksdb/java/jmh/README.md
new file mode 100644
index 000000000..f1ed0c686
--- /dev/null
+++ b/src/rocksdb/java/jmh/README.md
@@ -0,0 +1,18 @@
+# JMH Benchmarks for RocksJava
+
+These are micro-benchmarks for RocksJava functionality, using [JMH (Java Microbenchmark Harness)](https://openjdk.java.net/projects/code-tools/jmh/).
+
+## Compiling
+
+**Note**: This uses a specific build of RocksDB that is set in the `<version>` element of the `dependencies` section of the `pom.xml` file. If you are testing local changes you should build and install a SNAPSHOT version of rocksdbjni, and update the `pom.xml` of rocksdbjni-jmh file to test with this.
+
+```bash
+$ mvn package
+```
+
+## Running
+```bash
+$ java -jar target/rocksdbjni-jmh-1.0-SNAPSHOT-benchmarks.jar
+```
+
+NOTE: you can append `-help` to the command above to see all of the JMH runtime options.
diff --git a/src/rocksdb/java/jmh/pom.xml b/src/rocksdb/java/jmh/pom.xml
new file mode 100644
index 000000000..62671091c
--- /dev/null
+++ b/src/rocksdb/java/jmh/pom.xml
@@ -0,0 +1,138 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <groupId>org.rocksdb</groupId>
+ <artifactId>rocksdbjni-jmh</artifactId>
+ <version>1.0-SNAPSHOT</version>
+
+ <url>http://rocksdb.org/</url>
+
+ <name>rocksdbjni-jmh</name>
+ <description>JMH Benchmarks for RocksDB Java API</description>
+
+ <organization>
+ <name>Facebook, Inc.</name>
+ <url>https://www.facebook.com</url>
+ </organization>
+
+ <licenses>
+ <license>
+ <name>Apache License 2.0</name>
+ <url>http://www.apache.org/licenses/LICENSE-2.0.html</url>
+ <distribution>repo</distribution>
+ </license>
+ <license>
+ <name>GNU General Public License, version 2</name>
+ <url>http://www.gnu.org/licenses/gpl-2.0.html</url>
+ <distribution>repo</distribution>
+ </license>
+ </licenses>
+
+ <scm>
+ <connection>scm:git:git://github.com/facebook/rocksdb.git</connection>
+ <developerConnection>scm:git:git@github.com:facebook/rocksdb.git</developerConnection>
+ <url>http://github.com/facebook/rocksdb/</url>
+ </scm>
+
+ <properties>
+ <project.build.source>1.7</project.build.source>
+ <project.build.target>1.7</project.build.target>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+
+ <jmh.version>1.22</jmh.version>
+ <uberjar.name>benchmarks</uberjar.name>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.rocksdb</groupId>
+ <artifactId>rocksdbjni</artifactId>
+ <version>6.6.0-SNAPSHOT</version>
+ </dependency>
+
+ <dependency>
+ <groupId>org.openjdk.jmh</groupId>
+ <artifactId>jmh-core</artifactId>
+ <version>${jmh.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.openjdk.jmh</groupId>
+ <artifactId>jmh-generator-annprocess</artifactId>
+ <version>${jmh.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>3.8.1</version>
+ <configuration>
+ <source>${project.build.source}</source>
+ <target>${project.build.target}</target>
+ <encoding>${project.build.sourceEncoding}</encoding>
+ </configuration>
+ </plugin>
+
+ <plugin>
+ <groupId>com.mycila</groupId>
+ <artifactId>license-maven-plugin</artifactId>
+ <version>3.0</version>
+ <inherited>true</inherited>
+ <configuration>
+ <header>LICENSE-HEADER.txt</header>
+ <failIfMissing>true</failIfMissing>
+ <aggregate>true</aggregate>
+ <strictCheck>true</strictCheck>
+ <excludes>
+ <exclude>pom.xml</exclude>
+ </excludes>
+ <encoding>${project.build.sourceEncoding}</encoding>
+ </configuration>
+ </plugin>
+
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-shade-plugin</artifactId>
+ <version>3.2.1</version>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>shade</goal>
+ </goals>
+ <configuration>
+ <finalName>${project.artifactId}-${project.version}-${uberjar.name}</finalName>
+ <transformers>
+ <transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
+ <mainClass>org.openjdk.jmh.Main</mainClass>
+ </transformer>
+ </transformers>
+ <filters>
+ <filter>
+ <!--
+ Shading signed JARs will fail without this.
+ http://stackoverflow.com/questions/999489/invalid-signature-file-when-attempting-to-run-a-jar
+ -->
+ <artifact>*:*</artifact>
+ <excludes>
+ <exclude>META-INF/*.SF</exclude>
+ <exclude>META-INF/*.DSA</exclude>
+ <exclude>META-INF/*.RSA</exclude>
+ </excludes>
+ </filter>
+ </filters>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+
+ </plugins>
+ </build>
+
+</project> \ No newline at end of file
diff --git a/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/ComparatorBenchmarks.java b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/ComparatorBenchmarks.java
new file mode 100644
index 000000000..1973b5487
--- /dev/null
+++ b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/ComparatorBenchmarks.java
@@ -0,0 +1,139 @@
+/**
+ * Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+ * This source code is licensed under both the GPLv2 (found in the
+ * COPYING file in the root directory) and Apache 2.0 License
+ * (found in the LICENSE.Apache file in the root directory).
+ */
+package org.rocksdb.jmh;
+
+import org.openjdk.jmh.annotations.*;
+import org.rocksdb.*;
+import org.rocksdb.util.BytewiseComparator;
+import org.rocksdb.util.FileUtils;
+import org.rocksdb.util.ReverseBytewiseComparator;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.rocksdb.util.KVUtils.ba;
+
+@State(Scope.Benchmark)
+public class ComparatorBenchmarks {
+
+ @Param({
+ "native_bytewise",
+ "native_reverse_bytewise",
+
+ "java_bytewise_non-direct_reused-64_adaptive-mutex",
+ "java_bytewise_non-direct_reused-64_non-adaptive-mutex",
+ "java_bytewise_non-direct_reused-64_thread-local",
+ "java_bytewise_direct_reused-64_adaptive-mutex",
+ "java_bytewise_direct_reused-64_non-adaptive-mutex",
+ "java_bytewise_direct_reused-64_thread-local",
+ "java_bytewise_non-direct_no-reuse",
+ "java_bytewise_direct_no-reuse",
+
+ "java_reverse_bytewise_non-direct_reused-64_adaptive-mutex",
+ "java_reverse_bytewise_non-direct_reused-64_non-adaptive-mutex",
+ "java_reverse_bytewise_non-direct_reused-64_thread-local",
+ "java_reverse_bytewise_direct_reused-64_adaptive-mutex",
+ "java_reverse_bytewise_direct_reused-64_non-adaptive-mutex",
+ "java_reverse_bytewise_direct_reused-64_thread-local",
+ "java_reverse_bytewise_non-direct_no-reuse",
+ "java_reverse_bytewise_direct_no-reuse"
+ })
+ public String comparatorName;
+
+ Path dbDir;
+ ComparatorOptions comparatorOptions;
+ AbstractComparator comparator;
+ Options options;
+ RocksDB db;
+
+ @Setup(Level.Trial)
+ public void setup() throws IOException, RocksDBException {
+ RocksDB.loadLibrary();
+
+ dbDir = Files.createTempDirectory("rocksjava-comparator-benchmarks");
+
+ options = new Options()
+ .setCreateIfMissing(true);
+
+ if ("native_bytewise".equals(comparatorName)) {
+ options.setComparator(BuiltinComparator.BYTEWISE_COMPARATOR);
+
+ } else if ("native_reverse_bytewise".equals(comparatorName)) {
+ options.setComparator(BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR);
+
+ } else if (comparatorName.startsWith("java_")) {
+ comparatorOptions = new ComparatorOptions();
+
+ if (comparatorName.indexOf("non-direct") > -1) {
+ comparatorOptions.setUseDirectBuffer(false);
+ } else if (comparatorName.indexOf("direct") > -1) {
+ comparatorOptions.setUseDirectBuffer(true);
+ }
+
+ if (comparatorName.indexOf("no-reuse") > -1) {
+ comparatorOptions.setMaxReusedBufferSize(-1);
+ } else if (comparatorName.indexOf("_reused-") > -1) {
+ final int idx = comparatorName.indexOf("_reused-");
+ String s = comparatorName.substring(idx + 8);
+ s = s.substring(0, s.indexOf('_'));
+ comparatorOptions.setMaxReusedBufferSize(Integer.parseInt(s));
+ }
+
+ if (comparatorName.indexOf("non-adaptive-mutex") > -1) {
+ comparatorOptions.setReusedSynchronisationType(ReusedSynchronisationType.MUTEX);
+ } else if (comparatorName.indexOf("adaptive-mutex") > -1) {
+ comparatorOptions.setReusedSynchronisationType(ReusedSynchronisationType.ADAPTIVE_MUTEX);
+ } else if (comparatorName.indexOf("thread-local") > -1) {
+ comparatorOptions.setReusedSynchronisationType(ReusedSynchronisationType.THREAD_LOCAL);
+ }
+
+ if (comparatorName.startsWith("java_bytewise")) {
+ comparator = new BytewiseComparator(comparatorOptions);
+ } else if (comparatorName.startsWith("java_reverse_bytewise")) {
+ comparator = new ReverseBytewiseComparator(comparatorOptions);
+ }
+
+ options.setComparator(comparator);
+
+ } else {
+ throw new IllegalArgumentException("Unknown comparatorName: " + comparatorName);
+ }
+
+ db = RocksDB.open(options, dbDir.toAbsolutePath().toString());
+ }
+
+ @TearDown(Level.Trial)
+ public void cleanup() throws IOException {
+ db.close();
+ if (comparator != null) {
+ comparator.close();
+ }
+ if (comparatorOptions != null) {
+ comparatorOptions.close();
+ }
+ options.close();
+ FileUtils.delete(dbDir);
+ }
+
+ @State(Scope.Benchmark)
+ public static class Counter {
+ private final AtomicInteger count = new AtomicInteger();
+
+ public int next() {
+ return count.getAndIncrement();
+ }
+ }
+
+
+ @Benchmark
+ public void put(final Counter counter) throws RocksDBException {
+ final int i = counter.next();
+ db.put(ba("key" + i), ba("value" + i));
+ }
+}
diff --git a/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/GetBenchmarks.java b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/GetBenchmarks.java
new file mode 100644
index 000000000..e34005c2f
--- /dev/null
+++ b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/GetBenchmarks.java
@@ -0,0 +1,139 @@
+/**
+ * Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+ * This source code is licensed under both the GPLv2 (found in the
+ * COPYING file in the root directory) and Apache 2.0 License
+ * (found in the LICENSE.Apache file in the root directory).
+ */
+package org.rocksdb.jmh;
+
+import org.openjdk.jmh.annotations.*;
+import org.rocksdb.*;
+import org.rocksdb.util.FileUtils;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.rocksdb.util.KVUtils.ba;
+
+@State(Scope.Benchmark)
+public class GetBenchmarks {
+
+ @Param({
+ "no_column_family",
+ "1_column_family",
+ "20_column_families",
+ "100_column_families"
+ })
+ String columnFamilyTestType;
+
+ @Param("100000")
+ int keyCount;
+
+ Path dbDir;
+ DBOptions options;
+ int cfs = 0; // number of column families
+ private AtomicInteger cfHandlesIdx;
+ ColumnFamilyHandle[] cfHandles;
+ RocksDB db;
+ private final AtomicInteger keyIndex = new AtomicInteger();
+
+ @Setup(Level.Trial)
+ public void setup() throws IOException, RocksDBException {
+ RocksDB.loadLibrary();
+
+ dbDir = Files.createTempDirectory("rocksjava-get-benchmarks");
+
+ options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+
+ final List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
+ cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY));
+
+ if ("1_column_family".equals(columnFamilyTestType)) {
+ cfs = 1;
+ } else if ("20_column_families".equals(columnFamilyTestType)) {
+ cfs = 20;
+ } else if ("100_column_families".equals(columnFamilyTestType)) {
+ cfs = 100;
+ }
+
+ if (cfs > 0) {
+ cfHandlesIdx = new AtomicInteger(1);
+ for (int i = 1; i <= cfs; i++) {
+ cfDescriptors.add(new ColumnFamilyDescriptor(ba("cf" + i)));
+ }
+ }
+
+ final List<ColumnFamilyHandle> cfHandlesList = new ArrayList<>(cfDescriptors.size());
+ db = RocksDB.open(options, dbDir.toAbsolutePath().toString(), cfDescriptors, cfHandlesList);
+ cfHandles = cfHandlesList.toArray(new ColumnFamilyHandle[0]);
+
+ // store initial data for retrieving via get
+ for (int i = 0; i < cfs; i++) {
+ for (int j = 0; j < keyCount; j++) {
+ db.put(cfHandles[i], ba("key" + j), ba("value" + j));
+ }
+ }
+
+ try (final FlushOptions flushOptions = new FlushOptions()
+ .setWaitForFlush(true)) {
+ db.flush(flushOptions);
+ }
+ }
+
+ @TearDown(Level.Trial)
+ public void cleanup() throws IOException {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ db.close();
+ options.close();
+ FileUtils.delete(dbDir);
+ }
+
+ private ColumnFamilyHandle getColumnFamily() {
+ if (cfs == 0) {
+ return cfHandles[0];
+ } else if (cfs == 1) {
+ return cfHandles[1];
+ } else {
+ int idx = cfHandlesIdx.getAndIncrement();
+ if (idx > cfs) {
+ cfHandlesIdx.set(1); // doesn't ensure a perfect distribution, but it's ok
+ idx = 0;
+ }
+ return cfHandles[idx];
+ }
+ }
+
+ /**
+ * Takes the next position in the index.
+ */
+ private int next() {
+ int idx;
+ int nextIdx;
+ while (true) {
+ idx = keyIndex.get();
+ nextIdx = idx + 1;
+ if (nextIdx >= keyCount) {
+ nextIdx = 0;
+ }
+
+ if (keyIndex.compareAndSet(idx, nextIdx)) {
+ break;
+ }
+ }
+ return idx;
+ }
+
+ @Benchmark
+ public byte[] get() throws RocksDBException {
+ final int keyIdx = next();
+ return db.get(getColumnFamily(), ba("key" + keyIdx));
+ }
+}
diff --git a/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/MultiGetBenchmarks.java b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/MultiGetBenchmarks.java
new file mode 100644
index 000000000..60a0de87f
--- /dev/null
+++ b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/MultiGetBenchmarks.java
@@ -0,0 +1,158 @@
+/**
+ * Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+ * This source code is licensed under both the GPLv2 (found in the
+ * COPYING file in the root directory) and Apache 2.0 License
+ * (found in the LICENSE.Apache file in the root directory).
+ */
+package org.rocksdb.jmh;
+
+import org.openjdk.jmh.annotations.*;
+import org.rocksdb.*;
+import org.rocksdb.util.FileUtils;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.rocksdb.util.KVUtils.ba;
+import static org.rocksdb.util.KVUtils.keys;
+
+@State(Scope.Benchmark)
+public class MultiGetBenchmarks {
+
+ @Param({
+ "no_column_family",
+ "1_column_family",
+ "20_column_families",
+ "100_column_families"
+ })
+ String columnFamilyTestType;
+
+ @Param("100000")
+ int keyCount;
+
+ @Param({
+ "10",
+ "100",
+ "1000",
+ "10000",
+ })
+ int multiGetSize;
+
+ Path dbDir;
+ DBOptions options;
+ int cfs = 0; // number of column families
+ private AtomicInteger cfHandlesIdx;
+ ColumnFamilyHandle[] cfHandles;
+ RocksDB db;
+ private final AtomicInteger keyIndex = new AtomicInteger();
+
+ @Setup(Level.Trial)
+ public void setup() throws IOException, RocksDBException {
+ RocksDB.loadLibrary();
+
+ dbDir = Files.createTempDirectory("rocksjava-multiget-benchmarks");
+
+ options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+
+ final List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
+ cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY));
+
+ if ("1_column_family".equals(columnFamilyTestType)) {
+ cfs = 1;
+ } else if ("20_column_families".equals(columnFamilyTestType)) {
+ cfs = 20;
+ } else if ("100_column_families".equals(columnFamilyTestType)) {
+ cfs = 100;
+ }
+
+ if (cfs > 0) {
+ cfHandlesIdx = new AtomicInteger(1);
+ for (int i = 1; i <= cfs; i++) {
+ cfDescriptors.add(new ColumnFamilyDescriptor(ba("cf" + i)));
+ }
+ }
+
+ final List<ColumnFamilyHandle> cfHandlesList = new ArrayList<>(cfDescriptors.size());
+ db = RocksDB.open(options, dbDir.toAbsolutePath().toString(), cfDescriptors, cfHandlesList);
+ cfHandles = cfHandlesList.toArray(new ColumnFamilyHandle[0]);
+
+ // store initial data for retrieving via get
+ for (int i = 0; i < cfs; i++) {
+ for (int j = 0; j < keyCount; j++) {
+ db.put(cfHandles[i], ba("key" + j), ba("value" + j));
+ }
+ }
+
+ try (final FlushOptions flushOptions = new FlushOptions()
+ .setWaitForFlush(true)) {
+ db.flush(flushOptions);
+ }
+ }
+
+ @TearDown(Level.Trial)
+ public void cleanup() throws IOException {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ db.close();
+ options.close();
+ FileUtils.delete(dbDir);
+ }
+
+ private ColumnFamilyHandle getColumnFamily() {
+ if (cfs == 0) {
+ return cfHandles[0];
+ } else if (cfs == 1) {
+ return cfHandles[1];
+ } else {
+ int idx = cfHandlesIdx.getAndIncrement();
+ if (idx > cfs) {
+ cfHandlesIdx.set(1); // doesn't ensure a perfect distribution, but it's ok
+ idx = 0;
+ }
+ return cfHandles[idx];
+ }
+ }
+
+ /**
+ * Reserves the next {@inc} positions in the index.
+ *
+ * @param inc the number by which to increment the index
+ * @param limit the limit for the index
+ * @return the index before {@code inc} is added
+ */
+ private int next(final int inc, final int limit) {
+ int idx;
+ int nextIdx;
+ while (true) {
+ idx = keyIndex.get();
+ nextIdx = idx + inc;
+ if (nextIdx >= limit) {
+ nextIdx = inc;
+ }
+
+ if (keyIndex.compareAndSet(idx, nextIdx)) {
+ break;
+ }
+ }
+
+ if (nextIdx >= limit) {
+ return -1;
+ } else {
+ return idx;
+ }
+ }
+
+ @Benchmark
+ public List<byte[]> multiGet10() throws RocksDBException {
+ final int fromKeyIdx = next(multiGetSize, keyCount);
+ final List<byte[]> keys = keys(fromKeyIdx, fromKeyIdx + multiGetSize);
+ return db.multiGetAsList(keys);
+ }
+}
diff --git a/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/PutBenchmarks.java b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/PutBenchmarks.java
new file mode 100644
index 000000000..5aae21cb9
--- /dev/null
+++ b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/PutBenchmarks.java
@@ -0,0 +1,112 @@
+/**
+ * Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+ * This source code is licensed under both the GPLv2 (found in the
+ * COPYING file in the root directory) and Apache 2.0 License
+ * (found in the LICENSE.Apache file in the root directory).
+ */
+package org.rocksdb.jmh;
+
+import org.openjdk.jmh.annotations.*;
+import org.rocksdb.*;
+import org.rocksdb.util.FileUtils;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.rocksdb.util.KVUtils.ba;
+
+@State(Scope.Benchmark)
+public class PutBenchmarks {
+
+ @Param({
+ "no_column_family",
+ "1_column_family",
+ "20_column_families",
+ "100_column_families"
+ })
+ String columnFamilyTestType;
+
+ Path dbDir;
+ DBOptions options;
+ int cfs = 0; // number of column families
+ private AtomicInteger cfHandlesIdx;
+ ColumnFamilyHandle[] cfHandles;
+ RocksDB db;
+
+ @Setup(Level.Trial)
+ public void setup() throws IOException, RocksDBException {
+ RocksDB.loadLibrary();
+
+ dbDir = Files.createTempDirectory("rocksjava-put-benchmarks");
+
+ options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+
+ final List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
+ cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY));
+
+ if ("1_column_family".equals(columnFamilyTestType)) {
+ cfs = 1;
+ } else if ("20_column_families".equals(columnFamilyTestType)) {
+ cfs = 20;
+ } else if ("100_column_families".equals(columnFamilyTestType)) {
+ cfs = 100;
+ }
+
+ if (cfs > 0) {
+ cfHandlesIdx = new AtomicInteger(1);
+ for (int i = 1; i <= cfs; i++) {
+ cfDescriptors.add(new ColumnFamilyDescriptor(ba("cf" + i)));
+ }
+ }
+
+ final List<ColumnFamilyHandle> cfHandlesList = new ArrayList<>(cfDescriptors.size());
+ db = RocksDB.open(options, dbDir.toAbsolutePath().toString(), cfDescriptors, cfHandlesList);
+ cfHandles = cfHandlesList.toArray(new ColumnFamilyHandle[0]);
+ }
+
+ @TearDown(Level.Trial)
+ public void cleanup() throws IOException {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ db.close();
+ options.close();
+ FileUtils.delete(dbDir);
+ }
+
+ private ColumnFamilyHandle getColumnFamily() {
+ if (cfs == 0) {
+ return cfHandles[0];
+ } else if (cfs == 1) {
+ return cfHandles[1];
+ } else {
+ int idx = cfHandlesIdx.getAndIncrement();
+ if (idx > cfs) {
+ cfHandlesIdx.set(1); // doesn't ensure a perfect distribution, but it's ok
+ idx = 0;
+ }
+ return cfHandles[idx];
+ }
+ }
+
+ @State(Scope.Benchmark)
+ public static class Counter {
+ private final AtomicInteger count = new AtomicInteger();
+
+ public int next() {
+ return count.getAndIncrement();
+ }
+ }
+
+ @Benchmark
+ public void put(final ComparatorBenchmarks.Counter counter) throws RocksDBException {
+ final int i = counter.next();
+ db.put(getColumnFamily(), ba("key" + i), ba("value" + i));
+ }
+}
diff --git a/src/rocksdb/java/jmh/src/main/java/org/rocksdb/util/FileUtils.java b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/util/FileUtils.java
new file mode 100644
index 000000000..63744a14f
--- /dev/null
+++ b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/util/FileUtils.java
@@ -0,0 +1,59 @@
+/**
+ * Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+ * This source code is licensed under both the GPLv2 (found in the
+ * COPYING file in the root directory) and Apache 2.0 License
+ * (found in the LICENSE.Apache file in the root directory).
+ */
+package org.rocksdb.util;
+
+import java.io.IOException;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
+
+public final class FileUtils {
+ private static final SimpleFileVisitor<Path> DELETE_DIR_VISITOR = new DeleteDirVisitor();
+
+ /**
+ * Deletes a path from the filesystem
+ *
+ * If the path is a directory its contents
+ * will be recursively deleted before it itself
+ * is deleted.
+ *
+ * Note that removal of a directory is not an atomic-operation
+ * and so if an error occurs during removal, some of the directories
+ * descendants may have already been removed
+ *
+ * @param path the path to delete.
+ *
+ * @throws IOException if an error occurs whilst removing a file or directory
+ */
+ public static void delete(final Path path) throws IOException {
+ if (!Files.isDirectory(path)) {
+ Files.deleteIfExists(path);
+ } else {
+ Files.walkFileTree(path, DELETE_DIR_VISITOR);
+ }
+ }
+
+ private static class DeleteDirVisitor extends SimpleFileVisitor<Path> {
+ @Override
+ public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException {
+ Files.deleteIfExists(file);
+ return FileVisitResult.CONTINUE;
+ }
+
+ @Override
+ public FileVisitResult postVisitDirectory(final Path dir, final IOException exc) throws IOException {
+ if (exc != null) {
+ throw exc;
+ }
+
+ Files.deleteIfExists(dir);
+ return FileVisitResult.CONTINUE;
+ }
+ }
+}
diff --git a/src/rocksdb/java/jmh/src/main/java/org/rocksdb/util/KVUtils.java b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/util/KVUtils.java
new file mode 100644
index 000000000..848de5d82
--- /dev/null
+++ b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/util/KVUtils.java
@@ -0,0 +1,58 @@
+/**
+ * Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+ * This source code is licensed under both the GPLv2 (found in the
+ * COPYING file in the root directory) and Apache 2.0 License
+ * (found in the LICENSE.Apache file in the root directory).
+ */
+package org.rocksdb.util;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+public final class KVUtils {
+
+ /**
+ * Get a byte array from a string.
+ *
+ * Assumes UTF-8 encoding
+ *
+ * @param string the string
+ *
+ * @return the bytes.
+ */
+ public static byte[] ba(final String string) {
+ return string.getBytes(UTF_8);
+ }
+
+ /**
+ * Get a string from a byte array.
+ *
+ * Assumes UTF-8 encoding
+ *
+ * @param bytes the bytes
+ *
+ * @return the string.
+ */
+ public static String str(final byte[] bytes) {
+ return new String(bytes, UTF_8);
+ }
+
+ /**
+ * Get a list of keys where the keys are named key1..key1+N
+ * in the range of {@code from} to {@code to} i.e. keyFrom..keyTo.
+ *
+ * @param from the first key
+ * @param to the last key
+ *
+ * @return the array of keys
+ */
+ public static List<byte[]> keys(final int from, final int to) {
+ final List<byte[]> keys = new ArrayList<>(to - from);
+ for (int i = from; i < to; i++) {
+ keys.add(ba("key" + i));
+ }
+ return keys;
+ }
+}
diff --git a/src/rocksdb/java/rocksjni.pom b/src/rocksdb/java/rocksjni.pom
new file mode 100644
index 000000000..5defdca7d
--- /dev/null
+++ b/src/rocksdb/java/rocksjni.pom
@@ -0,0 +1,150 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
+ xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <modelVersion>4.0.0</modelVersion>
+ <name>RocksDB JNI</name>
+ <url>http://rocksdb.org/</url>
+ <groupId>org.rocksdb</groupId>
+ <artifactId>rocksdbjni</artifactId>
+ <!-- Version will be automatically replaced -->
+ <version>-</version>
+ <description>RocksDB fat jar that contains .so files for linux32 and linux64 (glibc and musl-libc), jnilib files
+ for Mac OSX, and a .dll for Windows x64.
+ </description>
+ <licenses>
+ <license>
+ <name>Apache License 2.0</name>
+ <url>http://www.apache.org/licenses/LICENSE-2.0.html</url>
+ <distribution>repo</distribution>
+ </license>
+ <license>
+ <name>GNU General Public License, version 2</name>
+ <url>http://www.gnu.org/licenses/gpl-2.0.html</url>
+ <distribution>repo</distribution>
+ </license>
+ </licenses>
+ <scm>
+ <connection>scm:git:git://github.com/dropwizard/metrics.git</connection>
+ <developerConnection>scm:git:git@github.com:dropwizard/metrics.git</developerConnection>
+ <url>http://github.com/dropwizard/metrics/</url>
+ <tag>HEAD</tag>
+ </scm>
+ <developers>
+ <developer>
+ <name>Facebook</name>
+ <email>help@facebook.com</email>
+ <timezone>America/New_York</timezone>
+ <roles>
+ <role>architect</role>
+ </roles>
+ </developer>
+ </developers>
+
+ <properties>
+ <project.build.source>1.7</project.build.source>
+ <project.build.target>1.7</project.build.target>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ </properties>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>3.2</version>
+ <configuration>
+ <source>${project.build.source}</source>
+ <target>${project.build.target}</target>
+ <encoding>${project.build.sourceEncoding}</encoding>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <version>2.18.1</version>
+ <configuration>
+ <argLine>${argLine} -ea -Xcheck:jni -Djava.library.path=${project.build.directory}</argLine>
+ <useManifestOnlyJar>false</useManifestOnlyJar>
+ <useSystemClassLoader>false</useSystemClassLoader>
+ <additionalClasspathElements>
+ <additionalClasspathElement>${project.build.directory}/*</additionalClasspathElement>
+ </additionalClasspathElements>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.jacoco</groupId>
+ <artifactId>jacoco-maven-plugin</artifactId>
+ <version>0.7.2.201409121644</version>
+ <executions>
+ <execution>
+ <goals>
+ <goal>prepare-agent</goal>
+ </goals>
+ </execution>
+ <execution>
+ <id>report</id>
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>report</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.gmaven</groupId>
+ <artifactId>groovy-maven-plugin</artifactId>
+ <version>2.0</version>
+ <executions>
+ <execution>
+ <phase>process-classes</phase>
+ <goals>
+ <goal>execute</goal>
+ </goals>
+ <configuration>
+ <defaults>
+ <name>Xenu</name>
+ </defaults>
+ <source>
+ String fileContents = new File(project.basedir.absolutePath + '/../include/rocksdb/version.h').getText('UTF-8')
+ matcher = (fileContents =~ /(?s).*ROCKSDB_MAJOR ([0-9]+).*?/)
+ String major_version = matcher.getAt(0).getAt(1)
+ matcher = (fileContents =~ /(?s).*ROCKSDB_MINOR ([0-9]+).*?/)
+ String minor_version = matcher.getAt(0).getAt(1)
+ matcher = (fileContents =~ /(?s).*ROCKSDB_PATCH ([0-9]+).*?/)
+ String patch_version = matcher.getAt(0).getAt(1)
+ String version = String.format('%s.%s.%s', major_version, minor_version, patch_version)
+ // Set version to be used in pom.properties
+ project.version = version
+ // Set version to be set as jar name
+ project.build.finalName = project.artifactId + "-" + version
+ </source>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+
+ <dependencies>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <version>4.12</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.assertj</groupId>
+ <artifactId>assertj-core</artifactId>
+ <version>1.7.1</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-all</artifactId>
+ <version>1.10.19</version>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+</project>
diff --git a/src/rocksdb/java/rocksjni/backupablejni.cc b/src/rocksdb/java/rocksjni/backupablejni.cc
new file mode 100644
index 000000000..3e52305d2
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/backupablejni.cc
@@ -0,0 +1,363 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling c++ ROCKSDB_NAMESPACE::BackupEnginge and
+// ROCKSDB_NAMESPACE::BackupableDBOptions methods from Java side.
+
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string>
+#include <vector>
+
+#include "include/org_rocksdb_BackupableDBOptions.h"
+#include "rocksdb/utilities/backupable_db.h"
+#include "rocksjni/portal.h"
+
+///////////////////////////////////////////////////////////////////////////
+// BackupDBOptions
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: newBackupableDBOptions
+ * Signature: (Ljava/lang/String;)J
+ */
+jlong Java_org_rocksdb_BackupableDBOptions_newBackupableDBOptions(
+ JNIEnv* env, jclass /*jcls*/, jstring jpath) {
+ const char* cpath = env->GetStringUTFChars(jpath, nullptr);
+ if (cpath == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return 0;
+ }
+ auto* bopt = new ROCKSDB_NAMESPACE::BackupableDBOptions(cpath);
+ env->ReleaseStringUTFChars(jpath, cpath);
+ return reinterpret_cast<jlong>(bopt);
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: backupDir
+ * Signature: (J)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_BackupableDBOptions_backupDir(JNIEnv* env,
+ jobject /*jopt*/,
+ jlong jhandle) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ return env->NewStringUTF(bopt->backup_dir.c_str());
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: setBackupEnv
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_BackupableDBOptions_setBackupEnv(
+ JNIEnv* /*env*/, jobject /*jopt*/, jlong jhandle, jlong jrocks_env_handle) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ auto* rocks_env =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jrocks_env_handle);
+ bopt->backup_env = rocks_env;
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: setShareTableFiles
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_BackupableDBOptions_setShareTableFiles(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle,
+ jboolean flag) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ bopt->share_table_files = flag;
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: shareTableFiles
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_BackupableDBOptions_shareTableFiles(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ return bopt->share_table_files;
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: setInfoLog
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_BackupableDBOptions_setInfoLog(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle,
+ jlong /*jlogger_handle*/) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ auto* sptr_logger =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::LoggerJniCallback>*>(
+ jhandle);
+ bopt->info_log = sptr_logger->get();
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: setSync
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_BackupableDBOptions_setSync(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle,
+ jboolean flag) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ bopt->sync = flag;
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: sync
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_BackupableDBOptions_sync(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ return bopt->sync;
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: setDestroyOldData
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_BackupableDBOptions_setDestroyOldData(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle,
+ jboolean flag) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ bopt->destroy_old_data = flag;
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: destroyOldData
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_BackupableDBOptions_destroyOldData(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ return bopt->destroy_old_data;
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: setBackupLogFiles
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_BackupableDBOptions_setBackupLogFiles(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle,
+ jboolean flag) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ bopt->backup_log_files = flag;
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: backupLogFiles
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_BackupableDBOptions_backupLogFiles(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ return bopt->backup_log_files;
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: setBackupRateLimit
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_BackupableDBOptions_setBackupRateLimit(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ jlong jbackup_rate_limit) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ bopt->backup_rate_limit = jbackup_rate_limit;
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: backupRateLimit
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_BackupableDBOptions_backupRateLimit(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ return bopt->backup_rate_limit;
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: setBackupRateLimiter
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_BackupableDBOptions_setBackupRateLimiter(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ jlong jrate_limiter_handle) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ auto* sptr_rate_limiter =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::RateLimiter>*>(
+ jrate_limiter_handle);
+ bopt->backup_rate_limiter = *sptr_rate_limiter;
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: setRestoreRateLimit
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_BackupableDBOptions_setRestoreRateLimit(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ jlong jrestore_rate_limit) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ bopt->restore_rate_limit = jrestore_rate_limit;
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: restoreRateLimit
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_BackupableDBOptions_restoreRateLimit(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ return bopt->restore_rate_limit;
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: setRestoreRateLimiter
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_BackupableDBOptions_setRestoreRateLimiter(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ jlong jrate_limiter_handle) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ auto* sptr_rate_limiter =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::RateLimiter>*>(
+ jrate_limiter_handle);
+ bopt->restore_rate_limiter = *sptr_rate_limiter;
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: setShareFilesWithChecksum
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_BackupableDBOptions_setShareFilesWithChecksum(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jboolean flag) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ bopt->share_files_with_checksum = flag;
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: shareFilesWithChecksum
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_BackupableDBOptions_shareFilesWithChecksum(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ return bopt->share_files_with_checksum;
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: setMaxBackgroundOperations
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_BackupableDBOptions_setMaxBackgroundOperations(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ jint max_background_operations) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ bopt->max_background_operations = static_cast<int>(max_background_operations);
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: maxBackgroundOperations
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_BackupableDBOptions_maxBackgroundOperations(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ return static_cast<jint>(bopt->max_background_operations);
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: setCallbackTriggerIntervalSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_BackupableDBOptions_setCallbackTriggerIntervalSize(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ jlong jcallback_trigger_interval_size) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ bopt->callback_trigger_interval_size =
+ static_cast<uint64_t>(jcallback_trigger_interval_size);
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: callbackTriggerIntervalSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_BackupableDBOptions_callbackTriggerIntervalSize(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ return static_cast<jlong>(bopt->callback_trigger_interval_size);
+}
+
+/*
+ * Class: org_rocksdb_BackupableDBOptions
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_BackupableDBOptions_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jopt*/,
+ jlong jhandle) {
+ auto* bopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(jhandle);
+ assert(bopt != nullptr);
+ delete bopt;
+}
diff --git a/src/rocksdb/java/rocksjni/backupenginejni.cc b/src/rocksdb/java/rocksjni/backupenginejni.cc
new file mode 100644
index 000000000..76889fa80
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/backupenginejni.cc
@@ -0,0 +1,277 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling C++ ROCKSDB_NAMESPACE::BackupEngine methods from the Java side.
+
+#include <jni.h>
+#include <vector>
+
+#include "include/org_rocksdb_BackupEngine.h"
+#include "rocksdb/utilities/backupable_db.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_BackupEngine
+ * Method: open
+ * Signature: (JJ)J
+ */
+jlong Java_org_rocksdb_BackupEngine_open(JNIEnv* env, jclass /*jcls*/,
+ jlong env_handle,
+ jlong backupable_db_options_handle) {
+ auto* rocks_env = reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(env_handle);
+ auto* backupable_db_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupableDBOptions*>(
+ backupable_db_options_handle);
+ ROCKSDB_NAMESPACE::BackupEngine* backup_engine;
+ auto status = ROCKSDB_NAMESPACE::BackupEngine::Open(
+ rocks_env, *backupable_db_options, &backup_engine);
+
+ if (status.ok()) {
+ return reinterpret_cast<jlong>(backup_engine);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ return 0;
+ }
+}
+
+/*
+ * Class: org_rocksdb_BackupEngine
+ * Method: createNewBackup
+ * Signature: (JJZ)V
+ */
+void Java_org_rocksdb_BackupEngine_createNewBackup(
+ JNIEnv* env, jobject /*jbe*/, jlong jbe_handle, jlong db_handle,
+ jboolean jflush_before_backup) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(db_handle);
+ auto* backup_engine =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupEngine*>(jbe_handle);
+ auto status = backup_engine->CreateNewBackup(
+ db, static_cast<bool>(jflush_before_backup));
+
+ if (status.ok()) {
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+}
+
+/*
+ * Class: org_rocksdb_BackupEngine
+ * Method: createNewBackupWithMetadata
+ * Signature: (JJLjava/lang/String;Z)V
+ */
+void Java_org_rocksdb_BackupEngine_createNewBackupWithMetadata(
+ JNIEnv* env, jobject /*jbe*/, jlong jbe_handle, jlong db_handle,
+ jstring japp_metadata, jboolean jflush_before_backup) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(db_handle);
+ auto* backup_engine =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupEngine*>(jbe_handle);
+
+ jboolean has_exception = JNI_FALSE;
+ std::string app_metadata = ROCKSDB_NAMESPACE::JniUtil::copyStdString(
+ env, japp_metadata, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, "Could not copy jstring to std::string");
+ return;
+ }
+
+ auto status = backup_engine->CreateNewBackupWithMetadata(
+ db, app_metadata, static_cast<bool>(jflush_before_backup));
+
+ if (status.ok()) {
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+}
+
+/*
+ * Class: org_rocksdb_BackupEngine
+ * Method: getBackupInfo
+ * Signature: (J)Ljava/util/List;
+ */
+jobject Java_org_rocksdb_BackupEngine_getBackupInfo(JNIEnv* env,
+ jobject /*jbe*/,
+ jlong jbe_handle) {
+ auto* backup_engine =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupEngine*>(jbe_handle);
+ std::vector<ROCKSDB_NAMESPACE::BackupInfo> backup_infos;
+ backup_engine->GetBackupInfo(&backup_infos);
+ return ROCKSDB_NAMESPACE::BackupInfoListJni::getBackupInfo(env, backup_infos);
+}
+
+/*
+ * Class: org_rocksdb_BackupEngine
+ * Method: getCorruptedBackups
+ * Signature: (J)[I
+ */
+jintArray Java_org_rocksdb_BackupEngine_getCorruptedBackups(JNIEnv* env,
+ jobject /*jbe*/,
+ jlong jbe_handle) {
+ auto* backup_engine =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupEngine*>(jbe_handle);
+ std::vector<ROCKSDB_NAMESPACE::BackupID> backup_ids;
+ backup_engine->GetCorruptedBackups(&backup_ids);
+ // store backupids in int array
+ std::vector<jint> int_backup_ids(backup_ids.begin(), backup_ids.end());
+
+ // Store ints in java array
+ // Its ok to loose precision here (64->32)
+ jsize ret_backup_ids_size = static_cast<jsize>(backup_ids.size());
+ jintArray ret_backup_ids = env->NewIntArray(ret_backup_ids_size);
+ if (ret_backup_ids == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ env->SetIntArrayRegion(ret_backup_ids, 0, ret_backup_ids_size,
+ int_backup_ids.data());
+ return ret_backup_ids;
+}
+
+/*
+ * Class: org_rocksdb_BackupEngine
+ * Method: garbageCollect
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_BackupEngine_garbageCollect(JNIEnv* env, jobject /*jbe*/,
+ jlong jbe_handle) {
+ auto* backup_engine =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupEngine*>(jbe_handle);
+ auto status = backup_engine->GarbageCollect();
+
+ if (status.ok()) {
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+}
+
+/*
+ * Class: org_rocksdb_BackupEngine
+ * Method: purgeOldBackups
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_BackupEngine_purgeOldBackups(JNIEnv* env, jobject /*jbe*/,
+ jlong jbe_handle,
+ jint jnum_backups_to_keep) {
+ auto* backup_engine =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupEngine*>(jbe_handle);
+ auto status = backup_engine->PurgeOldBackups(
+ static_cast<uint32_t>(jnum_backups_to_keep));
+
+ if (status.ok()) {
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+}
+
+/*
+ * Class: org_rocksdb_BackupEngine
+ * Method: deleteBackup
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_BackupEngine_deleteBackup(JNIEnv* env, jobject /*jbe*/,
+ jlong jbe_handle,
+ jint jbackup_id) {
+ auto* backup_engine =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupEngine*>(jbe_handle);
+ auto status = backup_engine->DeleteBackup(
+ static_cast<ROCKSDB_NAMESPACE::BackupID>(jbackup_id));
+
+ if (status.ok()) {
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+}
+
+/*
+ * Class: org_rocksdb_BackupEngine
+ * Method: restoreDbFromBackup
+ * Signature: (JILjava/lang/String;Ljava/lang/String;J)V
+ */
+void Java_org_rocksdb_BackupEngine_restoreDbFromBackup(
+ JNIEnv* env, jobject /*jbe*/, jlong jbe_handle, jint jbackup_id,
+ jstring jdb_dir, jstring jwal_dir, jlong jrestore_options_handle) {
+ auto* backup_engine =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupEngine*>(jbe_handle);
+ const char* db_dir = env->GetStringUTFChars(jdb_dir, nullptr);
+ if (db_dir == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+ const char* wal_dir = env->GetStringUTFChars(jwal_dir, nullptr);
+ if (wal_dir == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->ReleaseStringUTFChars(jdb_dir, db_dir);
+ return;
+ }
+ auto* restore_options = reinterpret_cast<ROCKSDB_NAMESPACE::RestoreOptions*>(
+ jrestore_options_handle);
+ auto status = backup_engine->RestoreDBFromBackup(
+ static_cast<ROCKSDB_NAMESPACE::BackupID>(jbackup_id), db_dir, wal_dir,
+ *restore_options);
+
+ env->ReleaseStringUTFChars(jwal_dir, wal_dir);
+ env->ReleaseStringUTFChars(jdb_dir, db_dir);
+
+ if (status.ok()) {
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+}
+
+/*
+ * Class: org_rocksdb_BackupEngine
+ * Method: restoreDbFromLatestBackup
+ * Signature: (JLjava/lang/String;Ljava/lang/String;J)V
+ */
+void Java_org_rocksdb_BackupEngine_restoreDbFromLatestBackup(
+ JNIEnv* env, jobject /*jbe*/, jlong jbe_handle, jstring jdb_dir,
+ jstring jwal_dir, jlong jrestore_options_handle) {
+ auto* backup_engine =
+ reinterpret_cast<ROCKSDB_NAMESPACE::BackupEngine*>(jbe_handle);
+ const char* db_dir = env->GetStringUTFChars(jdb_dir, nullptr);
+ if (db_dir == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+ const char* wal_dir = env->GetStringUTFChars(jwal_dir, nullptr);
+ if (wal_dir == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->ReleaseStringUTFChars(jdb_dir, db_dir);
+ return;
+ }
+ auto* restore_options = reinterpret_cast<ROCKSDB_NAMESPACE::RestoreOptions*>(
+ jrestore_options_handle);
+ auto status = backup_engine->RestoreDBFromLatestBackup(db_dir, wal_dir,
+ *restore_options);
+
+ env->ReleaseStringUTFChars(jwal_dir, wal_dir);
+ env->ReleaseStringUTFChars(jdb_dir, db_dir);
+
+ if (status.ok()) {
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+}
+
+/*
+ * Class: org_rocksdb_BackupEngine
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_BackupEngine_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jbe*/,
+ jlong jbe_handle) {
+ auto* be = reinterpret_cast<ROCKSDB_NAMESPACE::BackupEngine*>(jbe_handle);
+ assert(be != nullptr);
+ delete be;
+}
diff --git a/src/rocksdb/java/rocksjni/cassandra_compactionfilterjni.cc b/src/rocksdb/java/rocksjni/cassandra_compactionfilterjni.cc
new file mode 100644
index 000000000..10d482baf
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/cassandra_compactionfilterjni.cc
@@ -0,0 +1,24 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+#include <jni.h>
+
+#include "include/org_rocksdb_CassandraCompactionFilter.h"
+#include "utilities/cassandra/cassandra_compaction_filter.h"
+
+/*
+ * Class: org_rocksdb_CassandraCompactionFilter
+ * Method: createNewCassandraCompactionFilter0
+ * Signature: (ZI)J
+ */
+jlong Java_org_rocksdb_CassandraCompactionFilter_createNewCassandraCompactionFilter0(
+ JNIEnv* /*env*/, jclass /*jcls*/, jboolean purge_ttl_on_expiration,
+ jint gc_grace_period_in_seconds) {
+ auto* compaction_filter =
+ new ROCKSDB_NAMESPACE::cassandra::CassandraCompactionFilter(
+ purge_ttl_on_expiration, gc_grace_period_in_seconds);
+ // set the native handle to our native compaction filter
+ return reinterpret_cast<jlong>(compaction_filter);
+}
diff --git a/src/rocksdb/java/rocksjni/cassandra_value_operator.cc b/src/rocksdb/java/rocksjni/cassandra_value_operator.cc
new file mode 100644
index 000000000..9bd31b9fb
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/cassandra_value_operator.cc
@@ -0,0 +1,48 @@
+// Copyright (c) 2017-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory>
+#include <string>
+
+#include "include/org_rocksdb_CassandraValueMergeOperator.h"
+#include "rocksdb/db.h"
+#include "rocksdb/memtablerep.h"
+#include "rocksdb/merge_operator.h"
+#include "rocksdb/options.h"
+#include "rocksdb/slice_transform.h"
+#include "rocksdb/statistics.h"
+#include "rocksdb/table.h"
+#include "rocksjni/portal.h"
+#include "utilities/cassandra/merge_operator.h"
+
+/*
+ * Class: org_rocksdb_CassandraValueMergeOperator
+ * Method: newSharedCassandraValueMergeOperator
+ * Signature: (II)J
+ */
+jlong Java_org_rocksdb_CassandraValueMergeOperator_newSharedCassandraValueMergeOperator(
+ JNIEnv* /*env*/, jclass /*jclazz*/, jint gcGracePeriodInSeconds,
+ jint operands_limit) {
+ auto* op = new std::shared_ptr<ROCKSDB_NAMESPACE::MergeOperator>(
+ new ROCKSDB_NAMESPACE::cassandra::CassandraValueMergeOperator(
+ gcGracePeriodInSeconds, operands_limit));
+ return reinterpret_cast<jlong>(op);
+}
+
+/*
+ * Class: org_rocksdb_CassandraValueMergeOperator
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_CassandraValueMergeOperator_disposeInternal(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* op =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::MergeOperator>*>(
+ jhandle);
+ delete op;
+}
diff --git a/src/rocksdb/java/rocksjni/checkpoint.cc b/src/rocksdb/java/rocksjni/checkpoint.cc
new file mode 100644
index 000000000..b04846e87
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/checkpoint.cc
@@ -0,0 +1,68 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling c++ ROCKSDB_NAMESPACE::Checkpoint methods from Java side.
+
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string>
+
+#include "include/org_rocksdb_Checkpoint.h"
+#include "rocksdb/db.h"
+#include "rocksdb/utilities/checkpoint.h"
+#include "rocksjni/portal.h"
+/*
+ * Class: org_rocksdb_Checkpoint
+ * Method: newCheckpoint
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Checkpoint_newCheckpoint(JNIEnv* /*env*/,
+ jclass /*jclazz*/,
+ jlong jdb_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::Checkpoint* checkpoint;
+ ROCKSDB_NAMESPACE::Checkpoint::Create(db, &checkpoint);
+ return reinterpret_cast<jlong>(checkpoint);
+}
+
+/*
+ * Class: org_rocksdb_Checkpoint
+ * Method: dispose
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_Checkpoint_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* checkpoint = reinterpret_cast<ROCKSDB_NAMESPACE::Checkpoint*>(jhandle);
+ assert(checkpoint != nullptr);
+ delete checkpoint;
+}
+
+/*
+ * Class: org_rocksdb_Checkpoint
+ * Method: createCheckpoint
+ * Signature: (JLjava/lang/String;)V
+ */
+void Java_org_rocksdb_Checkpoint_createCheckpoint(JNIEnv* env, jobject /*jobj*/,
+ jlong jcheckpoint_handle,
+ jstring jcheckpoint_path) {
+ const char* checkpoint_path = env->GetStringUTFChars(jcheckpoint_path, 0);
+ if (checkpoint_path == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ auto* checkpoint =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Checkpoint*>(jcheckpoint_handle);
+ ROCKSDB_NAMESPACE::Status s = checkpoint->CreateCheckpoint(checkpoint_path);
+
+ env->ReleaseStringUTFChars(jcheckpoint_path, checkpoint_path);
+
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
diff --git a/src/rocksdb/java/rocksjni/clock_cache.cc b/src/rocksdb/java/rocksjni/clock_cache.cc
new file mode 100644
index 000000000..56ddcfce5
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/clock_cache.cc
@@ -0,0 +1,40 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::ClockCache.
+
+#include <jni.h>
+
+#include "cache/clock_cache.h"
+#include "include/org_rocksdb_ClockCache.h"
+
+/*
+ * Class: org_rocksdb_ClockCache
+ * Method: newClockCache
+ * Signature: (JIZ)J
+ */
+jlong Java_org_rocksdb_ClockCache_newClockCache(
+ JNIEnv* /*env*/, jclass /*jcls*/, jlong jcapacity, jint jnum_shard_bits,
+ jboolean jstrict_capacity_limit) {
+ auto* sptr_clock_cache = new std::shared_ptr<ROCKSDB_NAMESPACE::Cache>(
+ ROCKSDB_NAMESPACE::NewClockCache(
+ static_cast<size_t>(jcapacity), static_cast<int>(jnum_shard_bits),
+ static_cast<bool>(jstrict_capacity_limit)));
+ return reinterpret_cast<jlong>(sptr_clock_cache);
+}
+
+/*
+ * Class: org_rocksdb_ClockCache
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_ClockCache_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* sptr_clock_cache =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Cache>*>(jhandle);
+ delete sptr_clock_cache; // delete std::shared_ptr
+}
diff --git a/src/rocksdb/java/rocksjni/columnfamilyhandle.cc b/src/rocksdb/java/rocksjni/columnfamilyhandle.cc
new file mode 100644
index 000000000..4140580f0
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/columnfamilyhandle.cc
@@ -0,0 +1,72 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::ColumnFamilyHandle.
+
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "include/org_rocksdb_ColumnFamilyHandle.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_ColumnFamilyHandle
+ * Method: getName
+ * Signature: (J)[B
+ */
+jbyteArray Java_org_rocksdb_ColumnFamilyHandle_getName(JNIEnv* env,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* cfh = reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jhandle);
+ std::string cf_name = cfh->GetName();
+ return ROCKSDB_NAMESPACE::JniUtil::copyBytes(env, cf_name);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyHandle
+ * Method: getID
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_ColumnFamilyHandle_getID(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* cfh = reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jhandle);
+ const int32_t id = cfh->GetID();
+ return static_cast<jint>(id);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyHandle
+ * Method: getDescriptor
+ * Signature: (J)Lorg/rocksdb/ColumnFamilyDescriptor;
+ */
+jobject Java_org_rocksdb_ColumnFamilyHandle_getDescriptor(JNIEnv* env,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* cfh = reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jhandle);
+ ROCKSDB_NAMESPACE::ColumnFamilyDescriptor desc;
+ ROCKSDB_NAMESPACE::Status s = cfh->GetDescriptor(&desc);
+ if (s.ok()) {
+ return ROCKSDB_NAMESPACE::ColumnFamilyDescriptorJni::construct(env, &desc);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyHandle
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_ColumnFamilyHandle_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* cfh = reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jhandle);
+ assert(cfh != nullptr);
+ delete cfh;
+}
diff --git a/src/rocksdb/java/rocksjni/compact_range_options.cc b/src/rocksdb/java/rocksjni/compact_range_options.cc
new file mode 100644
index 000000000..af5a200e7
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/compact_range_options.cc
@@ -0,0 +1,211 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::CompactRangeOptions.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_CompactRangeOptions.h"
+#include "rocksdb/options.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_CompactRangeOptions
+ * Method: newCompactRangeOptions
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_CompactRangeOptions_newCompactRangeOptions(
+ JNIEnv* /*env*/, jclass /*jclazz*/) {
+ auto* options = new ROCKSDB_NAMESPACE::CompactRangeOptions();
+ return reinterpret_cast<jlong>(options);
+}
+
+
+/*
+ * Class: org_rocksdb_CompactRangeOptions
+ * Method: exclusiveManualCompaction
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_CompactRangeOptions_exclusiveManualCompaction(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactRangeOptions*>(jhandle);
+ return static_cast<jboolean>(options->exclusive_manual_compaction);
+}
+
+/*
+ * Class: org_rocksdb_CompactRangeOptions
+ * Method: setExclusiveManualCompaction
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_CompactRangeOptions_setExclusiveManualCompaction(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jboolean exclusive_manual_compaction) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactRangeOptions*>(jhandle);
+ options->exclusive_manual_compaction = static_cast<bool>(exclusive_manual_compaction);
+}
+
+
+/*
+ * Class: org_rocksdb_CompactRangeOptions
+ * Method: bottommostLevelCompaction
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompactRangeOptions_bottommostLevelCompaction(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactRangeOptions*>(jhandle);
+ return ROCKSDB_NAMESPACE::BottommostLevelCompactionJni::
+ toJavaBottommostLevelCompaction(options->bottommost_level_compaction);
+}
+
+/*
+ * Class: org_rocksdb_CompactRangeOptions
+ * Method: setBottommostLevelCompaction
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_CompactRangeOptions_setBottommostLevelCompaction(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ jint bottommost_level_compaction) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactRangeOptions*>(jhandle);
+ options->bottommost_level_compaction =
+ ROCKSDB_NAMESPACE::BottommostLevelCompactionJni::
+ toCppBottommostLevelCompaction(bottommost_level_compaction);
+}
+
+/*
+ * Class: org_rocksdb_CompactRangeOptions
+ * Method: changeLevel
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_CompactRangeOptions_changeLevel
+ (JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactRangeOptions*>(jhandle);
+ return static_cast<jboolean>(options->change_level);
+}
+
+/*
+ * Class: org_rocksdb_CompactRangeOptions
+ * Method: setChangeLevel
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_CompactRangeOptions_setChangeLevel
+ (JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jboolean change_level) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactRangeOptions*>(jhandle);
+ options->change_level = static_cast<bool>(change_level);
+}
+
+/*
+ * Class: org_rocksdb_CompactRangeOptions
+ * Method: targetLevel
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompactRangeOptions_targetLevel
+ (JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactRangeOptions*>(jhandle);
+ return static_cast<jint>(options->target_level);
+}
+
+/*
+ * Class: org_rocksdb_CompactRangeOptions
+ * Method: setTargetLevel
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_CompactRangeOptions_setTargetLevel
+ (JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint target_level) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactRangeOptions*>(jhandle);
+ options->target_level = static_cast<int>(target_level);
+}
+
+/*
+ * Class: org_rocksdb_CompactRangeOptions
+ * Method: targetPathId
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompactRangeOptions_targetPathId
+ (JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactRangeOptions*>(jhandle);
+ return static_cast<jint>(options->target_path_id);
+}
+
+/*
+ * Class: org_rocksdb_CompactRangeOptions
+ * Method: setTargetPathId
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_CompactRangeOptions_setTargetPathId
+ (JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint target_path_id) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactRangeOptions*>(jhandle);
+ options->target_path_id = static_cast<uint32_t>(target_path_id);
+}
+
+/*
+ * Class: org_rocksdb_CompactRangeOptions
+ * Method: allowWriteStall
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_CompactRangeOptions_allowWriteStall
+ (JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactRangeOptions*>(jhandle);
+ return static_cast<jboolean>(options->allow_write_stall);
+}
+
+/*
+ * Class: org_rocksdb_CompactRangeOptions
+ * Method: setAllowWriteStall
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_CompactRangeOptions_setAllowWriteStall
+ (JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jboolean allow_write_stall) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactRangeOptions*>(jhandle);
+ options->allow_write_stall = static_cast<bool>(allow_write_stall);
+}
+
+
+/*
+ * Class: org_rocksdb_CompactRangeOptions
+ * Method: maxSubcompactions
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompactRangeOptions_maxSubcompactions
+ (JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactRangeOptions*>(jhandle);
+ return static_cast<jint>(options->max_subcompactions);
+}
+
+/*
+ * Class: org_rocksdb_CompactRangeOptions
+ * Method: setMaxSubcompactions
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_CompactRangeOptions_setMaxSubcompactions
+ (JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint max_subcompactions) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactRangeOptions*>(jhandle);
+ options->max_subcompactions = static_cast<uint32_t>(max_subcompactions);
+}
+
+/*
+ * Class: org_rocksdb_CompactRangeOptions
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_CompactRangeOptions_disposeInternal(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactRangeOptions*>(jhandle);
+ delete options;
+}
diff --git a/src/rocksdb/java/rocksjni/compaction_filter.cc b/src/rocksdb/java/rocksjni/compaction_filter.cc
new file mode 100644
index 000000000..c3a68cdf2
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/compaction_filter.cc
@@ -0,0 +1,28 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::CompactionFilter.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_AbstractCompactionFilter.h"
+#include "rocksdb/compaction_filter.h"
+
+// <editor-fold desc="org.rocksdb.AbstractCompactionFilter">
+
+/*
+ * Class: org_rocksdb_AbstractCompactionFilter
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_AbstractCompactionFilter_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ auto* cf = reinterpret_cast<ROCKSDB_NAMESPACE::CompactionFilter*>(handle);
+ assert(cf != nullptr);
+ delete cf;
+}
+// </editor-fold>
diff --git a/src/rocksdb/java/rocksjni/compaction_filter_factory.cc b/src/rocksdb/java/rocksjni/compaction_filter_factory.cc
new file mode 100644
index 000000000..3b7c462c4
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/compaction_filter_factory.cc
@@ -0,0 +1,40 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::CompactionFilterFactory.
+
+#include <jni.h>
+#include <memory>
+
+#include "include/org_rocksdb_AbstractCompactionFilterFactory.h"
+#include "rocksjni/compaction_filter_factory_jnicallback.h"
+
+/*
+ * Class: org_rocksdb_AbstractCompactionFilterFactory
+ * Method: createNewCompactionFilterFactory0
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_AbstractCompactionFilterFactory_createNewCompactionFilterFactory0(
+ JNIEnv* env, jobject jobj) {
+ auto* cff =
+ new ROCKSDB_NAMESPACE::CompactionFilterFactoryJniCallback(env, jobj);
+ auto* ptr_sptr_cff = new std::shared_ptr<
+ ROCKSDB_NAMESPACE::CompactionFilterFactoryJniCallback>(cff);
+ return reinterpret_cast<jlong>(ptr_sptr_cff);
+}
+
+/*
+ * Class: org_rocksdb_AbstractCompactionFilterFactory
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_AbstractCompactionFilterFactory_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* ptr_sptr_cff = reinterpret_cast<
+ std::shared_ptr<ROCKSDB_NAMESPACE::CompactionFilterFactoryJniCallback>*>(
+ jhandle);
+ delete ptr_sptr_cff;
+}
diff --git a/src/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.cc b/src/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.cc
new file mode 100644
index 000000000..cacbf02c1
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.cc
@@ -0,0 +1,76 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::CompactionFilterFactory.
+
+#include "rocksjni/compaction_filter_factory_jnicallback.h"
+#include "rocksjni/portal.h"
+
+namespace ROCKSDB_NAMESPACE {
+CompactionFilterFactoryJniCallback::CompactionFilterFactoryJniCallback(
+ JNIEnv* env, jobject jcompaction_filter_factory)
+ : JniCallback(env, jcompaction_filter_factory) {
+
+ // Note: The name of a CompactionFilterFactory will not change during
+ // it's lifetime, so we cache it in a global var
+ jmethodID jname_method_id =
+ AbstractCompactionFilterFactoryJni::getNameMethodId(env);
+ if(jname_method_id == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+
+ jstring jname =
+ (jstring)env->CallObjectMethod(m_jcallback_obj, jname_method_id);
+ if(env->ExceptionCheck()) {
+ // exception thrown
+ return;
+ }
+ jboolean has_exception = JNI_FALSE;
+ m_name = JniUtil::copyString(env, jname, &has_exception); // also releases jname
+ if (has_exception == JNI_TRUE) {
+ // exception thrown
+ return;
+ }
+
+ m_jcreate_compaction_filter_methodid =
+ AbstractCompactionFilterFactoryJni::getCreateCompactionFilterMethodId(env);
+ if(m_jcreate_compaction_filter_methodid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+}
+
+const char* CompactionFilterFactoryJniCallback::Name() const {
+ return m_name.get();
+}
+
+std::unique_ptr<CompactionFilter> CompactionFilterFactoryJniCallback::CreateCompactionFilter(
+ const CompactionFilter::Context& context) {
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* env = getJniEnv(&attached_thread);
+ assert(env != nullptr);
+
+ jlong addr_compaction_filter = env->CallLongMethod(m_jcallback_obj,
+ m_jcreate_compaction_filter_methodid,
+ static_cast<jboolean>(context.is_full_compaction),
+ static_cast<jboolean>(context.is_manual_compaction));
+
+ if(env->ExceptionCheck()) {
+ // exception thrown from CallLongMethod
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return nullptr;
+ }
+
+ auto* cff = reinterpret_cast<CompactionFilter*>(addr_compaction_filter);
+
+ releaseJniEnv(attached_thread);
+
+ return std::unique_ptr<CompactionFilter>(cff);
+}
+
+} // namespace ROCKSDB_NAMESPACE
diff --git a/src/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.h b/src/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.h
new file mode 100644
index 000000000..eb2d5111d
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.h
@@ -0,0 +1,35 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::CompactionFilterFactory.
+
+#ifndef JAVA_ROCKSJNI_COMPACTION_FILTER_FACTORY_JNICALLBACK_H_
+#define JAVA_ROCKSJNI_COMPACTION_FILTER_FACTORY_JNICALLBACK_H_
+
+#include <jni.h>
+#include <memory>
+
+#include "rocksdb/compaction_filter.h"
+#include "rocksjni/jnicallback.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+class CompactionFilterFactoryJniCallback : public JniCallback, public CompactionFilterFactory {
+ public:
+ CompactionFilterFactoryJniCallback(
+ JNIEnv* env, jobject jcompaction_filter_factory);
+ virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
+ const CompactionFilter::Context& context);
+ virtual const char* Name() const;
+
+ private:
+ std::unique_ptr<const char[]> m_name;
+ jmethodID m_jcreate_compaction_filter_methodid;
+};
+
+} // namespace ROCKSDB_NAMESPACE
+
+#endif // JAVA_ROCKSJNI_COMPACTION_FILTER_FACTORY_JNICALLBACK_H_
diff --git a/src/rocksdb/java/rocksjni/compaction_job_info.cc b/src/rocksdb/java/rocksjni/compaction_job_info.cc
new file mode 100644
index 000000000..245ff7e61
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/compaction_job_info.cc
@@ -0,0 +1,231 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::CompactionJobInfo.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_CompactionJobInfo.h"
+#include "rocksdb/listener.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: newCompactionJobInfo
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_CompactionJobInfo_newCompactionJobInfo(
+ JNIEnv*, jclass) {
+ auto* compact_job_info = new ROCKSDB_NAMESPACE::CompactionJobInfo();
+ return reinterpret_cast<jlong>(compact_job_info);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_CompactionJobInfo_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobInfo*>(jhandle);
+ delete compact_job_info;
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: columnFamilyName
+ * Signature: (J)[B
+ */
+jbyteArray Java_org_rocksdb_CompactionJobInfo_columnFamilyName(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobInfo*>(jhandle);
+ return ROCKSDB_NAMESPACE::JniUtil::copyBytes(env, compact_job_info->cf_name);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: status
+ * Signature: (J)Lorg/rocksdb/Status;
+ */
+jobject Java_org_rocksdb_CompactionJobInfo_status(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobInfo*>(jhandle);
+ return ROCKSDB_NAMESPACE::StatusJni::construct(env, compact_job_info->status);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: threadId
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobInfo_threadId(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobInfo*>(jhandle);
+ return static_cast<jlong>(compact_job_info->thread_id);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: jobId
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompactionJobInfo_jobId(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobInfo*>(jhandle);
+ return static_cast<jint>(compact_job_info->job_id);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: baseInputLevel
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompactionJobInfo_baseInputLevel(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobInfo*>(jhandle);
+ return static_cast<jint>(compact_job_info->base_input_level);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: outputLevel
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompactionJobInfo_outputLevel(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobInfo*>(jhandle);
+ return static_cast<jint>(compact_job_info->output_level);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: inputFiles
+ * Signature: (J)[Ljava/lang/String;
+ */
+jobjectArray Java_org_rocksdb_CompactionJobInfo_inputFiles(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobInfo*>(jhandle);
+ return ROCKSDB_NAMESPACE::JniUtil::toJavaStrings(
+ env, &compact_job_info->input_files);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: outputFiles
+ * Signature: (J)[Ljava/lang/String;
+ */
+jobjectArray Java_org_rocksdb_CompactionJobInfo_outputFiles(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobInfo*>(jhandle);
+ return ROCKSDB_NAMESPACE::JniUtil::toJavaStrings(
+ env, &compact_job_info->output_files);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: tableProperties
+ * Signature: (J)Ljava/util/Map;
+ */
+jobject Java_org_rocksdb_CompactionJobInfo_tableProperties(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobInfo*>(jhandle);
+ auto* map = &compact_job_info->table_properties;
+
+ jobject jhash_map = ROCKSDB_NAMESPACE::HashMapJni::construct(
+ env, static_cast<uint32_t>(map->size()));
+ if (jhash_map == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const ROCKSDB_NAMESPACE::HashMapJni::FnMapKV<
+ const std::string,
+ std::shared_ptr<const ROCKSDB_NAMESPACE::TableProperties>, jobject,
+ jobject>
+ fn_map_kv =
+ [env](const std::pair<
+ const std::string,
+ std::shared_ptr<const ROCKSDB_NAMESPACE::TableProperties>>&
+ kv) {
+ jstring jkey = ROCKSDB_NAMESPACE::JniUtil::toJavaString(
+ env, &(kv.first), false);
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ jobject jtable_properties =
+ ROCKSDB_NAMESPACE::TablePropertiesJni::fromCppTableProperties(
+ env, *(kv.second.get()));
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ env->DeleteLocalRef(jkey);
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ return std::unique_ptr<std::pair<jobject, jobject>>(
+ new std::pair<jobject, jobject>(static_cast<jobject>(jkey),
+ jtable_properties));
+ };
+
+ if (!ROCKSDB_NAMESPACE::HashMapJni::putAll(env, jhash_map, map->begin(),
+ map->end(), fn_map_kv)) {
+ // exception occurred
+ return nullptr;
+ }
+
+ return jhash_map;
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: compactionReason
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_CompactionJobInfo_compactionReason(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobInfo*>(jhandle);
+ return ROCKSDB_NAMESPACE::CompactionReasonJni::toJavaCompactionReason(
+ compact_job_info->compaction_reason);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: compression
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_CompactionJobInfo_compression(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobInfo*>(jhandle);
+ return ROCKSDB_NAMESPACE::CompressionTypeJni::toJavaCompressionType(
+ compact_job_info->compression);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: stats
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobInfo_stats(
+ JNIEnv *, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobInfo*>(jhandle);
+ auto* stats = new ROCKSDB_NAMESPACE::CompactionJobStats();
+ stats->Add(compact_job_info->stats);
+ return reinterpret_cast<jlong>(stats);
+}
diff --git a/src/rocksdb/java/rocksjni/compaction_job_stats.cc b/src/rocksdb/java/rocksjni/compaction_job_stats.cc
new file mode 100644
index 000000000..efaec69ee
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/compaction_job_stats.cc
@@ -0,0 +1,361 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::CompactionJobStats.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_CompactionJobStats.h"
+#include "rocksdb/compaction_job_stats.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: newCompactionJobStats
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_newCompactionJobStats(
+ JNIEnv*, jclass) {
+ auto* compact_job_stats = new ROCKSDB_NAMESPACE::CompactionJobStats();
+ return reinterpret_cast<jlong>(compact_job_stats);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_CompactionJobStats_disposeInternal(
+ JNIEnv *, jobject, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ delete compact_job_stats;
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: reset
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_CompactionJobStats_reset(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ compact_job_stats->Reset();
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: add
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_CompactionJobStats_add(
+ JNIEnv*, jclass, jlong jhandle, jlong jother_handle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ auto* other_compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jother_handle);
+ compact_job_stats->Add(*other_compact_job_stats);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: elapsedMicros
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_elapsedMicros(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(compact_job_stats->elapsed_micros);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numInputRecords
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numInputRecords(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(compact_job_stats->num_input_records);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numInputFiles
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numInputFiles(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(compact_job_stats->num_input_files);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numInputFilesAtOutputLevel
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numInputFilesAtOutputLevel(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->num_input_files_at_output_level);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numOutputRecords
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numOutputRecords(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->num_output_records);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numOutputFiles
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numOutputFiles(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->num_output_files);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: isManualCompaction
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_CompactionJobStats_isManualCompaction(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ if (compact_job_stats->is_manual_compaction) {
+ return JNI_TRUE;
+ } else {
+ return JNI_FALSE;
+ }
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: totalInputBytes
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_totalInputBytes(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->total_input_bytes);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: totalOutputBytes
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_totalOutputBytes(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->total_output_bytes);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numRecordsReplaced
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numRecordsReplaced(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->num_records_replaced);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: totalInputRawKeyBytes
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_totalInputRawKeyBytes(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->total_input_raw_key_bytes);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: totalInputRawValueBytes
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_totalInputRawValueBytes(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->total_input_raw_value_bytes);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numInputDeletionRecords
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numInputDeletionRecords(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->num_input_deletion_records);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numExpiredDeletionRecords
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numExpiredDeletionRecords(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->num_expired_deletion_records);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numCorruptKeys
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numCorruptKeys(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->num_corrupt_keys);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: fileWriteNanos
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_fileWriteNanos(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->file_write_nanos);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: fileRangeSyncNanos
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_fileRangeSyncNanos(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->file_range_sync_nanos);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: fileFsyncNanos
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_fileFsyncNanos(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->file_fsync_nanos);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: filePrepareWriteNanos
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_filePrepareWriteNanos(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->file_prepare_write_nanos);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: smallestOutputKeyPrefix
+ * Signature: (J)[B
+ */
+jbyteArray Java_org_rocksdb_CompactionJobStats_smallestOutputKeyPrefix(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return ROCKSDB_NAMESPACE::JniUtil::copyBytes(
+ env, compact_job_stats->smallest_output_key_prefix);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: largestOutputKeyPrefix
+ * Signature: (J)[B
+ */
+jbyteArray Java_org_rocksdb_CompactionJobStats_largestOutputKeyPrefix(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return ROCKSDB_NAMESPACE::JniUtil::copyBytes(
+ env, compact_job_stats->largest_output_key_prefix);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numSingleDelFallthru
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numSingleDelFallthru(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->num_single_del_fallthru);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numSingleDelMismatch
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numSingleDelMismatch(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->num_single_del_mismatch);
+}
diff --git a/src/rocksdb/java/rocksjni/compaction_options.cc b/src/rocksdb/java/rocksjni/compaction_options.cc
new file mode 100644
index 000000000..e904d4abc
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/compaction_options.cc
@@ -0,0 +1,116 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::CompactionOptions.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_CompactionOptions.h"
+#include "rocksdb/options.h"
+#include "rocksjni/portal.h"
+
+
+/*
+ * Class: org_rocksdb_CompactionOptions
+ * Method: newCompactionOptions
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_CompactionOptions_newCompactionOptions(
+ JNIEnv*, jclass) {
+ auto* compact_opts = new ROCKSDB_NAMESPACE::CompactionOptions();
+ return reinterpret_cast<jlong>(compact_opts);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptions
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_CompactionOptions_disposeInternal(
+ JNIEnv *, jobject, jlong jhandle) {
+ auto* compact_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptions*>(jhandle);
+ delete compact_opts;
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptions
+ * Method: compression
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_CompactionOptions_compression(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptions*>(jhandle);
+ return ROCKSDB_NAMESPACE::CompressionTypeJni::toJavaCompressionType(
+ compact_opts->compression);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptions
+ * Method: setCompression
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_CompactionOptions_setCompression(
+ JNIEnv*, jclass, jlong jhandle, jbyte jcompression_type_value) {
+ auto* compact_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptions*>(jhandle);
+ compact_opts->compression =
+ ROCKSDB_NAMESPACE::CompressionTypeJni::toCppCompressionType(
+ jcompression_type_value);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptions
+ * Method: outputFileSizeLimit
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionOptions_outputFileSizeLimit(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptions*>(jhandle);
+ return static_cast<jlong>(
+ compact_opts->output_file_size_limit);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptions
+ * Method: setOutputFileSizeLimit
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_CompactionOptions_setOutputFileSizeLimit(
+ JNIEnv*, jclass, jlong jhandle, jlong joutput_file_size_limit) {
+ auto* compact_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptions*>(jhandle);
+ compact_opts->output_file_size_limit =
+ static_cast<uint64_t>(joutput_file_size_limit);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptions
+ * Method: maxSubcompactions
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompactionOptions_maxSubcompactions(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptions*>(jhandle);
+ return static_cast<jint>(
+ compact_opts->max_subcompactions);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptions
+ * Method: setMaxSubcompactions
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_CompactionOptions_setMaxSubcompactions(
+ JNIEnv*, jclass, jlong jhandle, jint jmax_subcompactions) {
+ auto* compact_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptions*>(jhandle);
+ compact_opts->max_subcompactions =
+ static_cast<uint32_t>(jmax_subcompactions);
+}
diff --git a/src/rocksdb/java/rocksjni/compaction_options_fifo.cc b/src/rocksdb/java/rocksjni/compaction_options_fifo.cc
new file mode 100644
index 000000000..08993524b
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/compaction_options_fifo.cc
@@ -0,0 +1,81 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::CompactionOptionsFIFO.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_CompactionOptionsFIFO.h"
+#include "rocksdb/advanced_options.h"
+
+/*
+ * Class: org_rocksdb_CompactionOptionsFIFO
+ * Method: newCompactionOptionsFIFO
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_CompactionOptionsFIFO_newCompactionOptionsFIFO(
+ JNIEnv*, jclass) {
+ const auto* opt = new ROCKSDB_NAMESPACE::CompactionOptionsFIFO();
+ return reinterpret_cast<jlong>(opt);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsFIFO
+ * Method: setMaxTableFilesSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_CompactionOptionsFIFO_setMaxTableFilesSize(
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_table_files_size) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsFIFO*>(jhandle);
+ opt->max_table_files_size = static_cast<uint64_t>(jmax_table_files_size);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsFIFO
+ * Method: maxTableFilesSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionOptionsFIFO_maxTableFilesSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsFIFO*>(jhandle);
+ return static_cast<jlong>(opt->max_table_files_size);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsFIFO
+ * Method: setAllowCompaction
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_CompactionOptionsFIFO_setAllowCompaction(
+ JNIEnv*, jobject, jlong jhandle, jboolean allow_compaction) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsFIFO*>(jhandle);
+ opt->allow_compaction = static_cast<bool>(allow_compaction);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsFIFO
+ * Method: allowCompaction
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_CompactionOptionsFIFO_allowCompaction(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsFIFO*>(jhandle);
+ return static_cast<jboolean>(opt->allow_compaction);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsFIFO
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_CompactionOptionsFIFO_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ delete reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsFIFO*>(jhandle);
+}
diff --git a/src/rocksdb/java/rocksjni/compaction_options_universal.cc b/src/rocksdb/java/rocksjni/compaction_options_universal.cc
new file mode 100644
index 000000000..899ee6d1a
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/compaction_options_universal.cc
@@ -0,0 +1,209 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::CompactionOptionsUniversal.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_CompactionOptionsUniversal.h"
+#include "rocksdb/advanced_options.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_CompactionOptionsUniversal
+ * Method: newCompactionOptionsUniversal
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_CompactionOptionsUniversal_newCompactionOptionsUniversal(
+ JNIEnv*, jclass) {
+ const auto* opt = new ROCKSDB_NAMESPACE::CompactionOptionsUniversal();
+ return reinterpret_cast<jlong>(opt);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsUniversal
+ * Method: setSizeRatio
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_CompactionOptionsUniversal_setSizeRatio(
+ JNIEnv*, jobject, jlong jhandle, jint jsize_ratio) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsUniversal*>(jhandle);
+ opt->size_ratio = static_cast<unsigned int>(jsize_ratio);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsUniversal
+ * Method: sizeRatio
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompactionOptionsUniversal_sizeRatio(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsUniversal*>(jhandle);
+ return static_cast<jint>(opt->size_ratio);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsUniversal
+ * Method: setMinMergeWidth
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_CompactionOptionsUniversal_setMinMergeWidth(
+ JNIEnv*, jobject, jlong jhandle, jint jmin_merge_width) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsUniversal*>(jhandle);
+ opt->min_merge_width = static_cast<unsigned int>(jmin_merge_width);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsUniversal
+ * Method: minMergeWidth
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompactionOptionsUniversal_minMergeWidth(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsUniversal*>(jhandle);
+ return static_cast<jint>(opt->min_merge_width);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsUniversal
+ * Method: setMaxMergeWidth
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_CompactionOptionsUniversal_setMaxMergeWidth(
+ JNIEnv*, jobject, jlong jhandle, jint jmax_merge_width) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsUniversal*>(jhandle);
+ opt->max_merge_width = static_cast<unsigned int>(jmax_merge_width);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsUniversal
+ * Method: maxMergeWidth
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompactionOptionsUniversal_maxMergeWidth(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsUniversal*>(jhandle);
+ return static_cast<jint>(opt->max_merge_width);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsUniversal
+ * Method: setMaxSizeAmplificationPercent
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_CompactionOptionsUniversal_setMaxSizeAmplificationPercent(
+ JNIEnv*, jobject, jlong jhandle, jint jmax_size_amplification_percent) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsUniversal*>(jhandle);
+ opt->max_size_amplification_percent =
+ static_cast<unsigned int>(jmax_size_amplification_percent);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsUniversal
+ * Method: maxSizeAmplificationPercent
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompactionOptionsUniversal_maxSizeAmplificationPercent(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsUniversal*>(jhandle);
+ return static_cast<jint>(opt->max_size_amplification_percent);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsUniversal
+ * Method: setCompressionSizePercent
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_CompactionOptionsUniversal_setCompressionSizePercent(
+ JNIEnv*, jobject, jlong jhandle,
+ jint jcompression_size_percent) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsUniversal*>(jhandle);
+ opt->compression_size_percent =
+ static_cast<unsigned int>(jcompression_size_percent);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsUniversal
+ * Method: compressionSizePercent
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompactionOptionsUniversal_compressionSizePercent(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsUniversal*>(jhandle);
+ return static_cast<jint>(opt->compression_size_percent);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsUniversal
+ * Method: setStopStyle
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_CompactionOptionsUniversal_setStopStyle(
+ JNIEnv*, jobject, jlong jhandle, jbyte jstop_style_value) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsUniversal*>(jhandle);
+ opt->stop_style =
+ ROCKSDB_NAMESPACE::CompactionStopStyleJni::toCppCompactionStopStyle(
+ jstop_style_value);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsUniversal
+ * Method: stopStyle
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_CompactionOptionsUniversal_stopStyle(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsUniversal*>(jhandle);
+ return ROCKSDB_NAMESPACE::CompactionStopStyleJni::toJavaCompactionStopStyle(
+ opt->stop_style);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsUniversal
+ * Method: setAllowTrivialMove
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_CompactionOptionsUniversal_setAllowTrivialMove(
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_trivial_move) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsUniversal*>(jhandle);
+ opt->allow_trivial_move = static_cast<bool>(jallow_trivial_move);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsUniversal
+ * Method: allowTrivialMove
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_CompactionOptionsUniversal_allowTrivialMove(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsUniversal*>(jhandle);
+ return opt->allow_trivial_move;
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptionsUniversal
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_CompactionOptionsUniversal_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ delete reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsUniversal*>(
+ jhandle);
+}
diff --git a/src/rocksdb/java/rocksjni/comparator.cc b/src/rocksdb/java/rocksjni/comparator.cc
new file mode 100644
index 000000000..485d18f0b
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/comparator.cc
@@ -0,0 +1,57 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::Comparator.
+
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <functional>
+#include <string>
+
+#include "include/org_rocksdb_AbstractComparator.h"
+#include "include/org_rocksdb_NativeComparatorWrapper.h"
+#include "rocksjni/comparatorjnicallback.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_AbstractComparator
+ * Method: createNewComparator
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_AbstractComparator_createNewComparator(
+ JNIEnv* env, jobject jcomparator, jlong copt_handle) {
+ auto* copt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ComparatorJniCallbackOptions*>(
+ copt_handle);
+ auto* c =
+ new ROCKSDB_NAMESPACE::ComparatorJniCallback(env, jcomparator, copt);
+ return reinterpret_cast<jlong>(c);
+}
+
+/*
+ * Class: org_rocksdb_AbstractComparator
+ * Method: usingDirectBuffers
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_AbstractComparator_usingDirectBuffers(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* c =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ComparatorJniCallback*>(jhandle);
+ return static_cast<jboolean>(c->m_options->direct_buffer);
+}
+
+/*
+ * Class: org_rocksdb_NativeComparatorWrapper
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_NativeComparatorWrapper_disposeInternal(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jcomparator_handle) {
+ auto* comparator =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Comparator*>(jcomparator_handle);
+ delete comparator;
+}
diff --git a/src/rocksdb/java/rocksjni/comparatorjnicallback.cc b/src/rocksdb/java/rocksjni/comparatorjnicallback.cc
new file mode 100644
index 000000000..248b15d3a
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/comparatorjnicallback.cc
@@ -0,0 +1,638 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::Comparator.
+
+#include "rocksjni/comparatorjnicallback.h"
+#include "rocksjni/portal.h"
+
+namespace ROCKSDB_NAMESPACE {
+ComparatorJniCallback::ComparatorJniCallback(
+ JNIEnv* env, jobject jcomparator,
+ const ComparatorJniCallbackOptions* options)
+ : JniCallback(env, jcomparator),
+ m_options(options) {
+
+ // cache the AbstractComparatorJniBridge class as we will reuse it many times for each callback
+ m_abstract_comparator_jni_bridge_clazz =
+ static_cast<jclass>(env->NewGlobalRef(AbstractComparatorJniBridge::getJClass(env)));
+
+ // Note: The name of a Comparator will not change during it's lifetime,
+ // so we cache it in a global var
+ jmethodID jname_mid = AbstractComparatorJni::getNameMethodId(env);
+ if (jname_mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+ jstring js_name = (jstring)env->CallObjectMethod(m_jcallback_obj, jname_mid);
+ if (env->ExceptionCheck()) {
+ // exception thrown
+ return;
+ }
+ jboolean has_exception = JNI_FALSE;
+ m_name = JniUtil::copyString(env, js_name,
+ &has_exception); // also releases jsName
+ if (has_exception == JNI_TRUE) {
+ // exception thrown
+ return;
+ }
+
+ // cache the ByteBuffer class as we will reuse it many times for each callback
+ m_jbytebuffer_clazz =
+ static_cast<jclass>(env->NewGlobalRef(ByteBufferJni::getJClass(env)));
+
+ m_jcompare_mid = AbstractComparatorJniBridge::getCompareInternalMethodId(
+ env, m_abstract_comparator_jni_bridge_clazz);
+ if (m_jcompare_mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+
+ m_jshortest_mid =
+ AbstractComparatorJniBridge::getFindShortestSeparatorInternalMethodId(
+ env, m_abstract_comparator_jni_bridge_clazz);
+ if (m_jshortest_mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+
+ m_jshort_mid =
+ AbstractComparatorJniBridge::getFindShortSuccessorInternalMethodId(env,
+ m_abstract_comparator_jni_bridge_clazz);
+ if (m_jshort_mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+
+ // do we need reusable buffers?
+ if (m_options->max_reused_buffer_size > -1) {
+
+ if (m_options->reused_synchronisation_type
+ == ReusedSynchronisationType::THREAD_LOCAL) {
+ // buffers reused per thread
+ UnrefHandler unref = [](void* ptr) {
+ ThreadLocalBuf* tlb = reinterpret_cast<ThreadLocalBuf*>(ptr);
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* _env = JniUtil::getJniEnv(tlb->jvm, &attached_thread);
+ if (_env != nullptr) {
+ if (tlb->direct_buffer) {
+ void* buf = _env->GetDirectBufferAddress(tlb->jbuf);
+ delete[] static_cast<char*>(buf);
+ }
+ _env->DeleteGlobalRef(tlb->jbuf);
+ JniUtil::releaseJniEnv(tlb->jvm, attached_thread);
+ }
+ };
+
+ m_tl_buf_a = new ThreadLocalPtr(unref);
+ m_tl_buf_b = new ThreadLocalPtr(unref);
+
+ m_jcompare_buf_a = nullptr;
+ m_jcompare_buf_b = nullptr;
+ m_jshortest_buf_start = nullptr;
+ m_jshortest_buf_limit = nullptr;
+ m_jshort_buf_key = nullptr;
+
+ } else {
+ //buffers reused and shared across threads
+ const bool adaptive =
+ m_options->reused_synchronisation_type == ReusedSynchronisationType::ADAPTIVE_MUTEX;
+ mtx_compare = std::unique_ptr<port::Mutex>(new port::Mutex(adaptive));
+ mtx_shortest = std::unique_ptr<port::Mutex>(new port::Mutex(adaptive));
+ mtx_short = std::unique_ptr<port::Mutex>(new port::Mutex(adaptive));
+
+ m_jcompare_buf_a = env->NewGlobalRef(ByteBufferJni::construct(
+ env, m_options->direct_buffer, m_options->max_reused_buffer_size,
+ m_jbytebuffer_clazz));
+ if (m_jcompare_buf_a == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ m_jcompare_buf_b = env->NewGlobalRef(ByteBufferJni::construct(
+ env, m_options->direct_buffer, m_options->max_reused_buffer_size,
+ m_jbytebuffer_clazz));
+ if (m_jcompare_buf_b == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ m_jshortest_buf_start = env->NewGlobalRef(ByteBufferJni::construct(
+ env, m_options->direct_buffer, m_options->max_reused_buffer_size,
+ m_jbytebuffer_clazz));
+ if (m_jshortest_buf_start == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ m_jshortest_buf_limit = env->NewGlobalRef(ByteBufferJni::construct(
+ env, m_options->direct_buffer, m_options->max_reused_buffer_size,
+ m_jbytebuffer_clazz));
+ if (m_jshortest_buf_limit == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ m_jshort_buf_key = env->NewGlobalRef(ByteBufferJni::construct(
+ env, m_options->direct_buffer, m_options->max_reused_buffer_size,
+ m_jbytebuffer_clazz));
+ if (m_jshort_buf_key == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ m_tl_buf_a = nullptr;
+ m_tl_buf_b = nullptr;
+ }
+
+ } else {
+ m_jcompare_buf_a = nullptr;
+ m_jcompare_buf_b = nullptr;
+ m_jshortest_buf_start = nullptr;
+ m_jshortest_buf_limit = nullptr;
+ m_jshort_buf_key = nullptr;
+
+ m_tl_buf_a = nullptr;
+ m_tl_buf_b = nullptr;
+ }
+}
+
+ComparatorJniCallback::~ComparatorJniCallback() {
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* env = getJniEnv(&attached_thread);
+ assert(env != nullptr);
+
+ env->DeleteGlobalRef(m_abstract_comparator_jni_bridge_clazz);
+
+ env->DeleteGlobalRef(m_jbytebuffer_clazz);
+
+ if (m_jcompare_buf_a != nullptr) {
+ if (m_options->direct_buffer) {
+ void* buf = env->GetDirectBufferAddress(m_jcompare_buf_a);
+ delete[] static_cast<char*>(buf);
+ }
+ env->DeleteGlobalRef(m_jcompare_buf_a);
+ }
+
+ if (m_jcompare_buf_b != nullptr) {
+ if (m_options->direct_buffer) {
+ void* buf = env->GetDirectBufferAddress(m_jcompare_buf_b);
+ delete[] static_cast<char*>(buf);
+ }
+ env->DeleteGlobalRef(m_jcompare_buf_b);
+ }
+
+ if (m_jshortest_buf_start != nullptr) {
+ if (m_options->direct_buffer) {
+ void* buf = env->GetDirectBufferAddress(m_jshortest_buf_start);
+ delete[] static_cast<char*>(buf);
+ }
+ env->DeleteGlobalRef(m_jshortest_buf_start);
+ }
+
+ if (m_jshortest_buf_limit != nullptr) {
+ if (m_options->direct_buffer) {
+ void* buf = env->GetDirectBufferAddress(m_jshortest_buf_limit);
+ delete[] static_cast<char*>(buf);
+ }
+ env->DeleteGlobalRef(m_jshortest_buf_limit);
+ }
+
+ if (m_jshort_buf_key != nullptr) {
+ if (m_options->direct_buffer) {
+ void* buf = env->GetDirectBufferAddress(m_jshort_buf_key);
+ delete[] static_cast<char*>(buf);
+ }
+ env->DeleteGlobalRef(m_jshort_buf_key);
+ }
+
+ if (m_tl_buf_a != nullptr) {
+ delete m_tl_buf_a;
+ }
+
+ if (m_tl_buf_b != nullptr) {
+ delete m_tl_buf_b;
+ }
+
+ releaseJniEnv(attached_thread);
+}
+
+const char* ComparatorJniCallback::Name() const {
+ return m_name.get();
+}
+
+int ComparatorJniCallback::Compare(const Slice& a, const Slice& b) const {
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* env = getJniEnv(&attached_thread);
+ assert(env != nullptr);
+
+ const bool reuse_jbuf_a =
+ static_cast<int64_t>(a.size()) <= m_options->max_reused_buffer_size;
+ const bool reuse_jbuf_b =
+ static_cast<int64_t>(b.size()) <= m_options->max_reused_buffer_size;
+
+ MaybeLockForReuse(mtx_compare, reuse_jbuf_a || reuse_jbuf_b);
+
+ jobject jcompare_buf_a = GetBuffer(env, a, reuse_jbuf_a, m_tl_buf_a, m_jcompare_buf_a);
+ if (jcompare_buf_a == nullptr) {
+ // exception occurred
+ MaybeUnlockForReuse(mtx_compare, reuse_jbuf_a || reuse_jbuf_b);
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return 0;
+ }
+
+ jobject jcompare_buf_b = GetBuffer(env, b, reuse_jbuf_b, m_tl_buf_b, m_jcompare_buf_b);
+ if (jcompare_buf_b == nullptr) {
+ // exception occurred
+ if (!reuse_jbuf_a) {
+ DeleteBuffer(env, jcompare_buf_a);
+ }
+ MaybeUnlockForReuse(mtx_compare, reuse_jbuf_a || reuse_jbuf_b);
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return 0;
+ }
+
+ jint result =
+ env->CallStaticIntMethod(
+ m_abstract_comparator_jni_bridge_clazz, m_jcompare_mid,
+ m_jcallback_obj,
+ jcompare_buf_a, reuse_jbuf_a ? a.size() : -1,
+ jcompare_buf_b, reuse_jbuf_b ? b.size() : -1);
+
+ if (env->ExceptionCheck()) {
+ // exception thrown from CallIntMethod
+ env->ExceptionDescribe(); // print out exception to stderr
+ result = 0; // we could not get a result from java callback so use 0
+ }
+
+ if (!reuse_jbuf_a) {
+ DeleteBuffer(env, jcompare_buf_a);
+ }
+ if (!reuse_jbuf_b) {
+ DeleteBuffer(env, jcompare_buf_b);
+ }
+
+ MaybeUnlockForReuse(mtx_compare, reuse_jbuf_a || reuse_jbuf_b);
+
+ releaseJniEnv(attached_thread);
+
+ return result;
+}
+
+void ComparatorJniCallback::FindShortestSeparator(
+ std::string* start, const Slice& limit) const {
+ if (start == nullptr) {
+ return;
+ }
+
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* env = getJniEnv(&attached_thread);
+ assert(env != nullptr);
+
+ const bool reuse_jbuf_start =
+ static_cast<int64_t>(start->length()) <= m_options->max_reused_buffer_size;
+ const bool reuse_jbuf_limit =
+ static_cast<int64_t>(limit.size()) <= m_options->max_reused_buffer_size;
+
+ MaybeLockForReuse(mtx_shortest, reuse_jbuf_start || reuse_jbuf_limit);
+
+ Slice sstart(start->data(), start->length());
+ jobject j_start_buf = GetBuffer(env, sstart, reuse_jbuf_start, m_tl_buf_a, m_jshortest_buf_start);
+ if (j_start_buf == nullptr) {
+ // exception occurred
+ MaybeUnlockForReuse(mtx_shortest, reuse_jbuf_start || reuse_jbuf_limit);
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return;
+ }
+
+ jobject j_limit_buf = GetBuffer(env, limit, reuse_jbuf_limit, m_tl_buf_b, m_jshortest_buf_limit);
+ if (j_limit_buf == nullptr) {
+ // exception occurred
+ if (!reuse_jbuf_start) {
+ DeleteBuffer(env, j_start_buf);
+ }
+ MaybeUnlockForReuse(mtx_shortest, reuse_jbuf_start || reuse_jbuf_limit);
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return;
+ }
+
+ jint jstart_len = env->CallStaticIntMethod(
+ m_abstract_comparator_jni_bridge_clazz, m_jshortest_mid,
+ m_jcallback_obj,
+ j_start_buf, reuse_jbuf_start ? start->length() : -1,
+ j_limit_buf, reuse_jbuf_limit ? limit.size() : -1);
+
+ if (env->ExceptionCheck()) {
+ // exception thrown from CallIntMethod
+ env->ExceptionDescribe(); // print out exception to stderr
+
+ } else if (static_cast<size_t>(jstart_len) != start->length()) {
+ // start buffer has changed in Java, so update `start` with the result
+ bool copy_from_non_direct = false;
+ if (reuse_jbuf_start) {
+ // reused a buffer
+ if (m_options->direct_buffer) {
+ // reused direct buffer
+ void* start_buf = env->GetDirectBufferAddress(j_start_buf);
+ if (start_buf == nullptr) {
+ if (!reuse_jbuf_start) {
+ DeleteBuffer(env, j_start_buf);
+ }
+ if (!reuse_jbuf_limit) {
+ DeleteBuffer(env, j_limit_buf);
+ }
+ MaybeUnlockForReuse(mtx_shortest, reuse_jbuf_start || reuse_jbuf_limit);
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, "Unable to get Direct Buffer Address");
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return;
+ }
+ start->assign(static_cast<const char*>(start_buf), jstart_len);
+
+ } else {
+
+ // reused non-direct buffer
+ copy_from_non_direct = true;
+ }
+ } else {
+ // there was a new buffer
+ if (m_options->direct_buffer) {
+ // it was direct... don't forget to potentially truncate the `start` string
+ start->resize(jstart_len);
+ } else {
+ // it was non-direct
+ copy_from_non_direct = true;
+ }
+ }
+
+ if (copy_from_non_direct) {
+ jbyteArray jarray = ByteBufferJni::array(env, j_start_buf,
+ m_jbytebuffer_clazz);
+ if (jarray == nullptr) {
+ if (!reuse_jbuf_start) {
+ DeleteBuffer(env, j_start_buf);
+ }
+ if (!reuse_jbuf_limit) {
+ DeleteBuffer(env, j_limit_buf);
+ }
+ MaybeUnlockForReuse(mtx_shortest, reuse_jbuf_start || reuse_jbuf_limit);
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return;
+ }
+ jboolean has_exception = JNI_FALSE;
+ JniUtil::byteString<std::string>(env, jarray, [start, jstart_len](const char* data, const size_t) {
+ return start->assign(data, static_cast<size_t>(jstart_len));
+ }, &has_exception);
+ env->DeleteLocalRef(jarray);
+ if (has_exception == JNI_TRUE) {
+ if (!reuse_jbuf_start) {
+ DeleteBuffer(env, j_start_buf);
+ }
+ if (!reuse_jbuf_limit) {
+ DeleteBuffer(env, j_limit_buf);
+ }
+ env->ExceptionDescribe(); // print out exception to stderr
+ MaybeUnlockForReuse(mtx_shortest, reuse_jbuf_start || reuse_jbuf_limit);
+ releaseJniEnv(attached_thread);
+ return;
+ }
+ }
+ }
+
+ if (!reuse_jbuf_start) {
+ DeleteBuffer(env, j_start_buf);
+ }
+ if (!reuse_jbuf_limit) {
+ DeleteBuffer(env, j_limit_buf);
+ }
+
+ MaybeUnlockForReuse(mtx_shortest, reuse_jbuf_start || reuse_jbuf_limit);
+
+ releaseJniEnv(attached_thread);
+}
+
+void ComparatorJniCallback::FindShortSuccessor(
+ std::string* key) const {
+ if (key == nullptr) {
+ return;
+ }
+
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* env = getJniEnv(&attached_thread);
+ assert(env != nullptr);
+
+ const bool reuse_jbuf_key =
+ static_cast<int64_t>(key->length()) <= m_options->max_reused_buffer_size;
+
+ MaybeLockForReuse(mtx_short, reuse_jbuf_key);
+
+ Slice skey(key->data(), key->length());
+ jobject j_key_buf = GetBuffer(env, skey, reuse_jbuf_key, m_tl_buf_a, m_jshort_buf_key);
+ if (j_key_buf == nullptr) {
+ // exception occurred
+ MaybeUnlockForReuse(mtx_short, reuse_jbuf_key);
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return;
+ }
+
+ jint jkey_len = env->CallStaticIntMethod(
+ m_abstract_comparator_jni_bridge_clazz, m_jshort_mid,
+ m_jcallback_obj,
+ j_key_buf, reuse_jbuf_key ? key->length() : -1);
+
+ if (env->ExceptionCheck()) {
+ // exception thrown from CallObjectMethod
+ if (!reuse_jbuf_key) {
+ DeleteBuffer(env, j_key_buf);
+ }
+ MaybeUnlockForReuse(mtx_short, reuse_jbuf_key);
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return;
+
+ }
+
+ if (static_cast<size_t>(jkey_len) != key->length()) {
+ // key buffer has changed in Java, so update `key` with the result
+ bool copy_from_non_direct = false;
+ if (reuse_jbuf_key) {
+ // reused a buffer
+ if (m_options->direct_buffer) {
+ // reused direct buffer
+ void* key_buf = env->GetDirectBufferAddress(j_key_buf);
+ if (key_buf == nullptr) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, "Unable to get Direct Buffer Address");
+ if (!reuse_jbuf_key) {
+ DeleteBuffer(env, j_key_buf);
+ }
+ MaybeUnlockForReuse(mtx_short, reuse_jbuf_key);
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return;
+ }
+ key->assign(static_cast<const char*>(key_buf), jkey_len);
+ } else {
+ // reused non-direct buffer
+ copy_from_non_direct = true;
+ }
+ } else {
+ // there was a new buffer
+ if (m_options->direct_buffer) {
+ // it was direct... don't forget to potentially truncate the `key` string
+ key->resize(jkey_len);
+ } else {
+ // it was non-direct
+ copy_from_non_direct = true;
+ }
+ }
+
+ if (copy_from_non_direct) {
+ jbyteArray jarray = ByteBufferJni::array(env, j_key_buf,
+ m_jbytebuffer_clazz);
+ if (jarray == nullptr) {
+
+ if (!reuse_jbuf_key) {
+ DeleteBuffer(env, j_key_buf);
+ }
+ MaybeUnlockForReuse(mtx_short, reuse_jbuf_key);
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return;
+ }
+ jboolean has_exception = JNI_FALSE;
+ JniUtil::byteString<std::string>(env, jarray, [key, jkey_len](const char* data, const size_t) {
+ return key->assign(data, static_cast<size_t>(jkey_len));
+ }, &has_exception);
+ env->DeleteLocalRef(jarray);
+ if (has_exception == JNI_TRUE) {
+ if (!reuse_jbuf_key) {
+ DeleteBuffer(env, j_key_buf);
+ }
+ MaybeUnlockForReuse(mtx_short, reuse_jbuf_key);
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return;
+ }
+ }
+ }
+
+ if (!reuse_jbuf_key) {
+ DeleteBuffer(env, j_key_buf);
+ }
+
+ MaybeUnlockForReuse(mtx_short, reuse_jbuf_key);
+
+ releaseJniEnv(attached_thread);
+}
+
+inline void ComparatorJniCallback::MaybeLockForReuse(
+ const std::unique_ptr<port::Mutex>& mutex, const bool cond) const {
+ // no need to lock if using thread_local
+ if (m_options->reused_synchronisation_type != ReusedSynchronisationType::THREAD_LOCAL
+ && cond) {
+ mutex.get()->Lock();
+ }
+}
+
+inline void ComparatorJniCallback::MaybeUnlockForReuse(
+ const std::unique_ptr<port::Mutex>& mutex, const bool cond) const {
+ // no need to unlock if using thread_local
+ if (m_options->reused_synchronisation_type != ReusedSynchronisationType::THREAD_LOCAL
+ && cond) {
+ mutex.get()->Unlock();
+ }
+}
+
+jobject ComparatorJniCallback::GetBuffer(JNIEnv* env, const Slice& src,
+ bool reuse_buffer, ThreadLocalPtr* tl_buf, jobject jreuse_buffer) const {
+ if (reuse_buffer) {
+ if (m_options->reused_synchronisation_type
+ == ReusedSynchronisationType::THREAD_LOCAL) {
+
+ // reuse thread-local bufffer
+ ThreadLocalBuf* tlb = reinterpret_cast<ThreadLocalBuf*>(tl_buf->Get());
+ if (tlb == nullptr) {
+ // thread-local buffer has not yet been created, so create it
+ jobject jtl_buf = env->NewGlobalRef(ByteBufferJni::construct(
+ env, m_options->direct_buffer, m_options->max_reused_buffer_size,
+ m_jbytebuffer_clazz));
+ if (jtl_buf == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ tlb = new ThreadLocalBuf(m_jvm, m_options->direct_buffer, jtl_buf);
+ tl_buf->Reset(tlb);
+ }
+ return ReuseBuffer(env, src, tlb->jbuf);
+ } else {
+
+ // reuse class member buffer
+ return ReuseBuffer(env, src, jreuse_buffer);
+ }
+ } else {
+
+ // new buffer
+ return NewBuffer(env, src);
+ }
+}
+
+jobject ComparatorJniCallback::ReuseBuffer(
+ JNIEnv* env, const Slice& src, jobject jreuse_buffer) const {
+ // we can reuse the buffer
+ if (m_options->direct_buffer) {
+ // copy into direct buffer
+ void* buf = env->GetDirectBufferAddress(jreuse_buffer);
+ if (buf == nullptr) {
+ // either memory region is undefined, given object is not a direct java.nio.Buffer, or JNI access to direct buffers is not supported by this virtual machine.
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, "Unable to get Direct Buffer Address");
+ return nullptr;
+ }
+ memcpy(buf, src.data(), src.size());
+ } else {
+ // copy into non-direct buffer
+ const jbyteArray jarray = ByteBufferJni::array(env, jreuse_buffer,
+ m_jbytebuffer_clazz);
+ if (jarray == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+ env->SetByteArrayRegion(jarray, 0, static_cast<jsize>(src.size()),
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(src.data())));
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ env->DeleteLocalRef(jarray);
+ return nullptr;
+ }
+ env->DeleteLocalRef(jarray);
+ }
+ return jreuse_buffer;
+}
+
+jobject ComparatorJniCallback::NewBuffer(JNIEnv* env, const Slice& src) const {
+ // we need a new buffer
+ jobject jbuf = ByteBufferJni::constructWith(env, m_options->direct_buffer,
+ src.data(), src.size(), m_jbytebuffer_clazz);
+ if (jbuf == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+ return jbuf;
+}
+
+void ComparatorJniCallback::DeleteBuffer(JNIEnv* env, jobject jbuffer) const {
+ env->DeleteLocalRef(jbuffer);
+}
+
+} // namespace ROCKSDB_NAMESPACE
diff --git a/src/rocksdb/java/rocksjni/comparatorjnicallback.h b/src/rocksdb/java/rocksjni/comparatorjnicallback.h
new file mode 100644
index 000000000..2e27de008
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/comparatorjnicallback.h
@@ -0,0 +1,137 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::Comparator
+
+#ifndef JAVA_ROCKSJNI_COMPARATORJNICALLBACK_H_
+#define JAVA_ROCKSJNI_COMPARATORJNICALLBACK_H_
+
+#include <jni.h>
+#include <memory>
+#include <string>
+#include "rocksjni/jnicallback.h"
+#include "rocksdb/comparator.h"
+#include "rocksdb/slice.h"
+#include "port/port.h"
+#include "util/thread_local.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+enum ReusedSynchronisationType {
+ /**
+ * Standard mutex.
+ */
+ MUTEX,
+
+ /**
+ * Use adaptive mutex, which spins in the user space before resorting
+ * to kernel. This could reduce context switch when the mutex is not
+ * heavily contended. However, if the mutex is hot, we could end up
+ * wasting spin time.
+ */
+ ADAPTIVE_MUTEX,
+
+ /**
+ * There is a reused buffer per-thread.
+ */
+ THREAD_LOCAL
+};
+
+struct ComparatorJniCallbackOptions {
+
+ // Set the synchronisation type used to guard the reused buffers.
+ // Only used if max_reused_buffer_size > 0.
+ // Default: ADAPTIVE_MUTEX
+ ReusedSynchronisationType reused_synchronisation_type =
+ ReusedSynchronisationType::ADAPTIVE_MUTEX;
+
+ // Indicates if a direct byte buffer (i.e. outside of the normal
+ // garbage-collected heap) is used for the callbacks to Java,
+ // as opposed to a non-direct byte buffer which is a wrapper around
+ // an on-heap byte[].
+ // Default: true
+ bool direct_buffer = true;
+
+ // Maximum size of a buffer (in bytes) that will be reused.
+ // Comparators will use 5 of these buffers,
+ // so the retained memory size will be 5 * max_reused_buffer_size.
+ // When a buffer is needed for transferring data to a callback,
+ // if it requires less than max_reused_buffer_size, then an
+ // existing buffer will be reused, else a new buffer will be
+ // allocated just for that callback. -1 to disable.
+ // Default: 64 bytes
+ int32_t max_reused_buffer_size = 64;
+};
+
+/**
+ * This class acts as a bridge between C++
+ * and Java. The methods in this class will be
+ * called back from the RocksDB storage engine (C++)
+ * we then callback to the appropriate Java method
+ * this enables Comparators to be implemented in Java.
+ *
+ * The design of this Comparator caches the Java Slice
+ * objects that are used in the compare and findShortestSeparator
+ * method callbacks. Instead of creating new objects for each callback
+ * of those functions, by reuse via setHandle we are a lot
+ * faster; Unfortunately this means that we have to
+ * introduce independent locking in regions of each of those methods
+ * via the mutexs mtx_compare and mtx_findShortestSeparator respectively
+ */
+class ComparatorJniCallback : public JniCallback, public Comparator {
+ public:
+ ComparatorJniCallback(
+ JNIEnv* env, jobject jcomparator,
+ const ComparatorJniCallbackOptions* options);
+ ~ComparatorJniCallback();
+ virtual const char* Name() const;
+ virtual int Compare(const Slice& a, const Slice& b) const;
+ virtual void FindShortestSeparator(
+ std::string* start, const Slice& limit) const;
+ virtual void FindShortSuccessor(std::string* key) const;
+ const ComparatorJniCallbackOptions* m_options;
+
+ private:
+ struct ThreadLocalBuf {
+ ThreadLocalBuf(JavaVM* _jvm, bool _direct_buffer, jobject _jbuf) :
+ jvm(_jvm), direct_buffer(_direct_buffer), jbuf(_jbuf) {}
+ JavaVM* jvm;
+ bool direct_buffer;
+ jobject jbuf;
+ };
+ inline void MaybeLockForReuse(const std::unique_ptr<port::Mutex>& mutex,
+ const bool cond) const;
+ inline void MaybeUnlockForReuse(const std::unique_ptr<port::Mutex>& mutex,
+ const bool cond) const;
+ jobject GetBuffer(JNIEnv* env, const Slice& src, bool reuse_buffer,
+ ThreadLocalPtr* tl_buf, jobject jreuse_buffer) const;
+ jobject ReuseBuffer(JNIEnv* env, const Slice& src,
+ jobject jreuse_buffer) const;
+ jobject NewBuffer(JNIEnv* env, const Slice& src) const;
+ void DeleteBuffer(JNIEnv* env, jobject jbuffer) const;
+ // used for synchronisation in compare method
+ std::unique_ptr<port::Mutex> mtx_compare;
+ // used for synchronisation in findShortestSeparator method
+ std::unique_ptr<port::Mutex> mtx_shortest;
+ // used for synchronisation in findShortSuccessor method
+ std::unique_ptr<port::Mutex> mtx_short;
+ std::unique_ptr<const char[]> m_name;
+ jclass m_abstract_comparator_jni_bridge_clazz; // TODO(AR) could we make this static somehow?
+ jclass m_jbytebuffer_clazz; // TODO(AR) we could cache this globally for the entire VM if we switch more APIs to use ByteBuffer // TODO(AR) could we make this static somehow?
+ jmethodID m_jcompare_mid; // TODO(AR) could we make this static somehow?
+ jmethodID m_jshortest_mid; // TODO(AR) could we make this static somehow?
+ jmethodID m_jshort_mid; // TODO(AR) could we make this static somehow?
+ jobject m_jcompare_buf_a;
+ jobject m_jcompare_buf_b;
+ jobject m_jshortest_buf_start;
+ jobject m_jshortest_buf_limit;
+ jobject m_jshort_buf_key;
+ ThreadLocalPtr* m_tl_buf_a;
+ ThreadLocalPtr* m_tl_buf_b;
+};
+} // namespace ROCKSDB_NAMESPACE
+
+#endif // JAVA_ROCKSJNI_COMPARATORJNICALLBACK_H_
diff --git a/src/rocksdb/java/rocksjni/compression_options.cc b/src/rocksdb/java/rocksjni/compression_options.cc
new file mode 100644
index 000000000..4fed5ba5f
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/compression_options.cc
@@ -0,0 +1,164 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::CompressionOptions.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_CompressionOptions.h"
+#include "rocksdb/advanced_options.h"
+
+/*
+ * Class: org_rocksdb_CompressionOptions
+ * Method: newCompressionOptions
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_CompressionOptions_newCompressionOptions(
+ JNIEnv*, jclass) {
+ const auto* opt = new ROCKSDB_NAMESPACE::CompressionOptions();
+ return reinterpret_cast<jlong>(opt);
+}
+
+/*
+ * Class: org_rocksdb_CompressionOptions
+ * Method: setWindowBits
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_CompressionOptions_setWindowBits(
+ JNIEnv*, jobject, jlong jhandle, jint jwindow_bits) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::CompressionOptions*>(jhandle);
+ opt->window_bits = static_cast<int>(jwindow_bits);
+}
+
+/*
+ * Class: org_rocksdb_CompressionOptions
+ * Method: windowBits
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompressionOptions_windowBits(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::CompressionOptions*>(jhandle);
+ return static_cast<jint>(opt->window_bits);
+}
+
+/*
+ * Class: org_rocksdb_CompressionOptions
+ * Method: setLevel
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_CompressionOptions_setLevel(
+ JNIEnv*, jobject, jlong jhandle, jint jlevel) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::CompressionOptions*>(jhandle);
+ opt->level = static_cast<int>(jlevel);
+}
+
+/*
+ * Class: org_rocksdb_CompressionOptions
+ * Method: level
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompressionOptions_level(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::CompressionOptions*>(jhandle);
+ return static_cast<jint>(opt->level);
+}
+
+/*
+ * Class: org_rocksdb_CompressionOptions
+ * Method: setStrategy
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_CompressionOptions_setStrategy(
+ JNIEnv*, jobject, jlong jhandle, jint jstrategy) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::CompressionOptions*>(jhandle);
+ opt->strategy = static_cast<int>(jstrategy);
+}
+
+/*
+ * Class: org_rocksdb_CompressionOptions
+ * Method: strategy
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompressionOptions_strategy(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::CompressionOptions*>(jhandle);
+ return static_cast<jint>(opt->strategy);
+}
+
+/*
+ * Class: org_rocksdb_CompressionOptions
+ * Method: setMaxDictBytes
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_CompressionOptions_setMaxDictBytes(
+ JNIEnv*, jobject, jlong jhandle, jint jmax_dict_bytes) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::CompressionOptions*>(jhandle);
+ opt->max_dict_bytes = static_cast<uint32_t>(jmax_dict_bytes);
+}
+
+/*
+ * Class: org_rocksdb_CompressionOptions
+ * Method: maxDictBytes
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompressionOptions_maxDictBytes(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::CompressionOptions*>(jhandle);
+ return static_cast<jint>(opt->max_dict_bytes);
+}
+
+/*
+ * Class: org_rocksdb_CompressionOptions
+ * Method: setZstdMaxTrainBytes
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_CompressionOptions_setZstdMaxTrainBytes(
+ JNIEnv*, jobject, jlong jhandle, jint jzstd_max_train_bytes) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::CompressionOptions*>(jhandle);
+ opt->zstd_max_train_bytes = static_cast<uint32_t>(jzstd_max_train_bytes);
+}
+
+/*
+ * Class: org_rocksdb_CompressionOptions
+ * Method: zstdMaxTrainBytes
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompressionOptions_zstdMaxTrainBytes(
+ JNIEnv *, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::CompressionOptions*>(jhandle);
+ return static_cast<jint>(opt->zstd_max_train_bytes);
+}
+
+/*
+ * Class: org_rocksdb_CompressionOptions
+ * Method: setEnabled
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_CompressionOptions_setEnabled(
+ JNIEnv*, jobject, jlong jhandle, jboolean jenabled) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::CompressionOptions*>(jhandle);
+ opt->enabled = jenabled == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_CompressionOptions
+ * Method: enabled
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_CompressionOptions_enabled(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::CompressionOptions*>(jhandle);
+ return static_cast<bool>(opt->enabled);
+}
+/*
+ * Class: org_rocksdb_CompressionOptions
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_CompressionOptions_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ delete reinterpret_cast<ROCKSDB_NAMESPACE::CompressionOptions*>(jhandle);
+}
diff --git a/src/rocksdb/java/rocksjni/env.cc b/src/rocksdb/java/rocksjni/env.cc
new file mode 100644
index 000000000..f450c560b
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/env.cc
@@ -0,0 +1,238 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling c++ ROCKSDB_NAMESPACE::Env methods from Java side.
+
+#include <jni.h>
+#include <vector>
+
+#include "portal.h"
+#include "rocksdb/env.h"
+#include "include/org_rocksdb_Env.h"
+#include "include/org_rocksdb_HdfsEnv.h"
+#include "include/org_rocksdb_RocksEnv.h"
+#include "include/org_rocksdb_RocksMemEnv.h"
+#include "include/org_rocksdb_TimedEnv.h"
+
+/*
+ * Class: org_rocksdb_Env
+ * Method: getDefaultEnvInternal
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_Env_getDefaultEnvInternal(
+ JNIEnv*, jclass) {
+ return reinterpret_cast<jlong>(ROCKSDB_NAMESPACE::Env::Default());
+}
+
+/*
+ * Class: org_rocksdb_RocksEnv
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksEnv_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* e = reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jhandle);
+ assert(e != nullptr);
+ delete e;
+}
+
+/*
+ * Class: org_rocksdb_Env
+ * Method: setBackgroundThreads
+ * Signature: (JIB)V
+ */
+void Java_org_rocksdb_Env_setBackgroundThreads(
+ JNIEnv*, jobject, jlong jhandle, jint jnum, jbyte jpriority_value) {
+ auto* rocks_env = reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jhandle);
+ rocks_env->SetBackgroundThreads(
+ static_cast<int>(jnum),
+ ROCKSDB_NAMESPACE::PriorityJni::toCppPriority(jpriority_value));
+}
+
+/*
+ * Class: org_rocksdb_Env
+ * Method: getBackgroundThreads
+ * Signature: (JB)I
+ */
+jint Java_org_rocksdb_Env_getBackgroundThreads(
+ JNIEnv*, jobject, jlong jhandle, jbyte jpriority_value) {
+ auto* rocks_env = reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jhandle);
+ const int num = rocks_env->GetBackgroundThreads(
+ ROCKSDB_NAMESPACE::PriorityJni::toCppPriority(jpriority_value));
+ return static_cast<jint>(num);
+}
+
+/*
+ * Class: org_rocksdb_Env
+ * Method: getThreadPoolQueueLen
+ * Signature: (JB)I
+ */
+jint Java_org_rocksdb_Env_getThreadPoolQueueLen(
+ JNIEnv*, jobject, jlong jhandle, jbyte jpriority_value) {
+ auto* rocks_env = reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jhandle);
+ const int queue_len = rocks_env->GetThreadPoolQueueLen(
+ ROCKSDB_NAMESPACE::PriorityJni::toCppPriority(jpriority_value));
+ return static_cast<jint>(queue_len);
+}
+
+/*
+ * Class: org_rocksdb_Env
+ * Method: incBackgroundThreadsIfNeeded
+ * Signature: (JIB)V
+ */
+void Java_org_rocksdb_Env_incBackgroundThreadsIfNeeded(
+ JNIEnv*, jobject, jlong jhandle, jint jnum, jbyte jpriority_value) {
+ auto* rocks_env = reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jhandle);
+ rocks_env->IncBackgroundThreadsIfNeeded(
+ static_cast<int>(jnum),
+ ROCKSDB_NAMESPACE::PriorityJni::toCppPriority(jpriority_value));
+}
+
+/*
+ * Class: org_rocksdb_Env
+ * Method: lowerThreadPoolIOPriority
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_Env_lowerThreadPoolIOPriority(
+ JNIEnv*, jobject, jlong jhandle, jbyte jpriority_value) {
+ auto* rocks_env = reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jhandle);
+ rocks_env->LowerThreadPoolIOPriority(
+ ROCKSDB_NAMESPACE::PriorityJni::toCppPriority(jpriority_value));
+}
+
+/*
+ * Class: org_rocksdb_Env
+ * Method: lowerThreadPoolCPUPriority
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_Env_lowerThreadPoolCPUPriority(
+ JNIEnv*, jobject, jlong jhandle, jbyte jpriority_value) {
+ auto* rocks_env = reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jhandle);
+ rocks_env->LowerThreadPoolCPUPriority(
+ ROCKSDB_NAMESPACE::PriorityJni::toCppPriority(jpriority_value));
+}
+
+/*
+ * Class: org_rocksdb_Env
+ * Method: getThreadList
+ * Signature: (J)[Lorg/rocksdb/ThreadStatus;
+ */
+jobjectArray Java_org_rocksdb_Env_getThreadList(
+ JNIEnv* env, jobject, jlong jhandle) {
+ auto* rocks_env = reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jhandle);
+ std::vector<ROCKSDB_NAMESPACE::ThreadStatus> thread_status;
+ ROCKSDB_NAMESPACE::Status s = rocks_env->GetThreadList(&thread_status);
+ if (!s.ok()) {
+ // error, throw exception
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+
+ // object[]
+ const jsize len = static_cast<jsize>(thread_status.size());
+ jobjectArray jthread_status = env->NewObjectArray(
+ len, ROCKSDB_NAMESPACE::ThreadStatusJni::getJClass(env), nullptr);
+ if (jthread_status == nullptr) {
+ // an exception occurred
+ return nullptr;
+ }
+ for (jsize i = 0; i < len; ++i) {
+ jobject jts =
+ ROCKSDB_NAMESPACE::ThreadStatusJni::construct(env, &(thread_status[i]));
+ env->SetObjectArrayElement(jthread_status, i, jts);
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ env->DeleteLocalRef(jthread_status);
+ return nullptr;
+ }
+ }
+
+ return jthread_status;
+}
+
+/*
+ * Class: org_rocksdb_RocksMemEnv
+ * Method: createMemEnv
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_RocksMemEnv_createMemEnv(
+ JNIEnv*, jclass, jlong jbase_env_handle) {
+ auto* base_env = reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jbase_env_handle);
+ return reinterpret_cast<jlong>(ROCKSDB_NAMESPACE::NewMemEnv(base_env));
+}
+
+/*
+ * Class: org_rocksdb_RocksMemEnv
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksMemEnv_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* e = reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jhandle);
+ assert(e != nullptr);
+ delete e;
+}
+
+/*
+ * Class: org_rocksdb_HdfsEnv
+ * Method: createHdfsEnv
+ * Signature: (Ljava/lang/String;)J
+ */
+jlong Java_org_rocksdb_HdfsEnv_createHdfsEnv(
+ JNIEnv* env, jclass, jstring jfsname) {
+ jboolean has_exception = JNI_FALSE;
+ auto fsname =
+ ROCKSDB_NAMESPACE::JniUtil::copyStdString(env, jfsname, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return 0;
+ }
+ ROCKSDB_NAMESPACE::Env* hdfs_env;
+ ROCKSDB_NAMESPACE::Status s =
+ ROCKSDB_NAMESPACE::NewHdfsEnv(&hdfs_env, fsname);
+ if (!s.ok()) {
+ // error occurred
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return 0;
+ }
+ return reinterpret_cast<jlong>(hdfs_env);
+}
+
+/*
+ * Class: org_rocksdb_HdfsEnv
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_HdfsEnv_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* e = reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jhandle);
+ assert(e != nullptr);
+ delete e;
+}
+
+/*
+ * Class: org_rocksdb_TimedEnv
+ * Method: createTimedEnv
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_TimedEnv_createTimedEnv(
+ JNIEnv*, jclass, jlong jbase_env_handle) {
+ auto* base_env = reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jbase_env_handle);
+ return reinterpret_cast<jlong>(ROCKSDB_NAMESPACE::NewTimedEnv(base_env));
+}
+
+/*
+ * Class: org_rocksdb_TimedEnv
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_TimedEnv_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* e = reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jhandle);
+ assert(e != nullptr);
+ delete e;
+}
+
diff --git a/src/rocksdb/java/rocksjni/env_options.cc b/src/rocksdb/java/rocksjni/env_options.cc
new file mode 100644
index 000000000..2a9c8aeb0
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/env_options.cc
@@ -0,0 +1,298 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling C++ ROCKSDB_NAMESPACE::EnvOptions methods
+// from Java side.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_EnvOptions.h"
+#include "rocksdb/env.h"
+
+#define ENV_OPTIONS_SET_BOOL(_jhandle, _opt) \
+ reinterpret_cast<ROCKSDB_NAMESPACE::EnvOptions *>(_jhandle)->_opt = \
+ static_cast<bool>(_opt)
+
+#define ENV_OPTIONS_SET_SIZE_T(_jhandle, _opt) \
+ reinterpret_cast<ROCKSDB_NAMESPACE::EnvOptions *>(_jhandle)->_opt = \
+ static_cast<size_t>(_opt)
+
+#define ENV_OPTIONS_SET_UINT64_T(_jhandle, _opt) \
+ reinterpret_cast<ROCKSDB_NAMESPACE::EnvOptions *>(_jhandle)->_opt = \
+ static_cast<uint64_t>(_opt)
+
+#define ENV_OPTIONS_GET(_jhandle, _opt) \
+ reinterpret_cast<ROCKSDB_NAMESPACE::EnvOptions *>(_jhandle)->_opt
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: newEnvOptions
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_EnvOptions_newEnvOptions__(
+ JNIEnv*, jclass) {
+ auto *env_opt = new ROCKSDB_NAMESPACE::EnvOptions();
+ return reinterpret_cast<jlong>(env_opt);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: newEnvOptions
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_EnvOptions_newEnvOptions__J(
+ JNIEnv*, jclass, jlong jdboptions_handle) {
+ auto *db_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions *>(jdboptions_handle);
+ auto *env_opt = new ROCKSDB_NAMESPACE::EnvOptions(*db_options);
+ return reinterpret_cast<jlong>(env_opt);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_EnvOptions_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto *eo = reinterpret_cast<ROCKSDB_NAMESPACE::EnvOptions *>(jhandle);
+ assert(eo != nullptr);
+ delete eo;
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: setUseMmapReads
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_EnvOptions_setUseMmapReads(
+ JNIEnv*, jobject, jlong jhandle, jboolean use_mmap_reads) {
+ ENV_OPTIONS_SET_BOOL(jhandle, use_mmap_reads);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: useMmapReads
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_EnvOptions_useMmapReads(
+ JNIEnv*, jobject, jlong jhandle) {
+ return ENV_OPTIONS_GET(jhandle, use_mmap_reads);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: setUseMmapWrites
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_EnvOptions_setUseMmapWrites(
+ JNIEnv*, jobject, jlong jhandle, jboolean use_mmap_writes) {
+ ENV_OPTIONS_SET_BOOL(jhandle, use_mmap_writes);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: useMmapWrites
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_EnvOptions_useMmapWrites(
+ JNIEnv*, jobject, jlong jhandle) {
+ return ENV_OPTIONS_GET(jhandle, use_mmap_writes);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: setUseDirectReads
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_EnvOptions_setUseDirectReads(
+ JNIEnv*, jobject, jlong jhandle, jboolean use_direct_reads) {
+ ENV_OPTIONS_SET_BOOL(jhandle, use_direct_reads);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: useDirectReads
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_EnvOptions_useDirectReads(
+ JNIEnv*, jobject, jlong jhandle) {
+ return ENV_OPTIONS_GET(jhandle, use_direct_reads);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: setUseDirectWrites
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_EnvOptions_setUseDirectWrites(
+ JNIEnv*, jobject, jlong jhandle, jboolean use_direct_writes) {
+ ENV_OPTIONS_SET_BOOL(jhandle, use_direct_writes);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: useDirectWrites
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_EnvOptions_useDirectWrites(
+ JNIEnv*, jobject, jlong jhandle) {
+ return ENV_OPTIONS_GET(jhandle, use_direct_writes);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: setAllowFallocate
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_EnvOptions_setAllowFallocate(
+ JNIEnv*, jobject, jlong jhandle, jboolean allow_fallocate) {
+ ENV_OPTIONS_SET_BOOL(jhandle, allow_fallocate);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: allowFallocate
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_EnvOptions_allowFallocate(
+ JNIEnv*, jobject, jlong jhandle) {
+ return ENV_OPTIONS_GET(jhandle, allow_fallocate);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: setSetFdCloexec
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_EnvOptions_setSetFdCloexec(
+ JNIEnv*, jobject, jlong jhandle, jboolean set_fd_cloexec) {
+ ENV_OPTIONS_SET_BOOL(jhandle, set_fd_cloexec);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: setFdCloexec
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_EnvOptions_setFdCloexec(
+ JNIEnv*, jobject, jlong jhandle) {
+ return ENV_OPTIONS_GET(jhandle, set_fd_cloexec);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: setBytesPerSync
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_EnvOptions_setBytesPerSync(
+ JNIEnv*, jobject, jlong jhandle, jlong bytes_per_sync) {
+ ENV_OPTIONS_SET_UINT64_T(jhandle, bytes_per_sync);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: bytesPerSync
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_EnvOptions_bytesPerSync(
+ JNIEnv*, jobject, jlong jhandle) {
+ return ENV_OPTIONS_GET(jhandle, bytes_per_sync);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: setFallocateWithKeepSize
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_EnvOptions_setFallocateWithKeepSize(
+ JNIEnv*, jobject, jlong jhandle, jboolean fallocate_with_keep_size) {
+ ENV_OPTIONS_SET_BOOL(jhandle, fallocate_with_keep_size);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: fallocateWithKeepSize
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_EnvOptions_fallocateWithKeepSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return ENV_OPTIONS_GET(jhandle, fallocate_with_keep_size);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: setCompactionReadaheadSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_EnvOptions_setCompactionReadaheadSize(
+ JNIEnv*, jobject, jlong jhandle, jlong compaction_readahead_size) {
+ ENV_OPTIONS_SET_SIZE_T(jhandle, compaction_readahead_size);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: compactionReadaheadSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_EnvOptions_compactionReadaheadSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return ENV_OPTIONS_GET(jhandle, compaction_readahead_size);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: setRandomAccessMaxBufferSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_EnvOptions_setRandomAccessMaxBufferSize(
+ JNIEnv*, jobject, jlong jhandle, jlong random_access_max_buffer_size) {
+ ENV_OPTIONS_SET_SIZE_T(jhandle, random_access_max_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: randomAccessMaxBufferSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_EnvOptions_randomAccessMaxBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return ENV_OPTIONS_GET(jhandle, random_access_max_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: setWritableFileMaxBufferSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_EnvOptions_setWritableFileMaxBufferSize(
+ JNIEnv*, jobject, jlong jhandle, jlong writable_file_max_buffer_size) {
+ ENV_OPTIONS_SET_SIZE_T(jhandle, writable_file_max_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: writableFileMaxBufferSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_EnvOptions_writableFileMaxBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return ENV_OPTIONS_GET(jhandle, writable_file_max_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: setRateLimiter
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_EnvOptions_setRateLimiter(
+ JNIEnv*, jobject, jlong jhandle, jlong rl_handle) {
+ auto *sptr_rate_limiter =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::RateLimiter> *>(
+ rl_handle);
+ auto *env_opt = reinterpret_cast<ROCKSDB_NAMESPACE::EnvOptions *>(jhandle);
+ env_opt->rate_limiter = sptr_rate_limiter->get();
+}
diff --git a/src/rocksdb/java/rocksjni/filter.cc b/src/rocksdb/java/rocksjni/filter.cc
new file mode 100644
index 000000000..4143fc96f
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/filter.cc
@@ -0,0 +1,45 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::FilterPolicy.
+
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string>
+
+#include "include/org_rocksdb_BloomFilter.h"
+#include "include/org_rocksdb_Filter.h"
+#include "rocksdb/filter_policy.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_BloomFilter
+ * Method: createBloomFilter
+ * Signature: (DZ)J
+ */
+jlong Java_org_rocksdb_BloomFilter_createNewBloomFilter(
+ JNIEnv* /*env*/, jclass /*jcls*/, jdouble bits_per_key,
+ jboolean use_block_base_builder) {
+ auto* sptr_filter =
+ new std::shared_ptr<const ROCKSDB_NAMESPACE::FilterPolicy>(
+ ROCKSDB_NAMESPACE::NewBloomFilterPolicy(bits_per_key,
+ use_block_base_builder));
+ return reinterpret_cast<jlong>(sptr_filter);
+}
+
+/*
+ * Class: org_rocksdb_Filter
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_Filter_disposeInternal(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong jhandle) {
+ auto* handle =
+ reinterpret_cast<std::shared_ptr<const ROCKSDB_NAMESPACE::FilterPolicy>*>(
+ jhandle);
+ delete handle; // delete std::shared_ptr
+}
diff --git a/src/rocksdb/java/rocksjni/ingest_external_file_options.cc b/src/rocksdb/java/rocksjni/ingest_external_file_options.cc
new file mode 100644
index 000000000..ceaa6b179
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/ingest_external_file_options.cc
@@ -0,0 +1,196 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::FilterPolicy.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_IngestExternalFileOptions.h"
+#include "rocksdb/options.h"
+
+/*
+ * Class: org_rocksdb_IngestExternalFileOptions
+ * Method: newIngestExternalFileOptions
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_IngestExternalFileOptions_newIngestExternalFileOptions__(
+ JNIEnv*, jclass) {
+ auto* options = new ROCKSDB_NAMESPACE::IngestExternalFileOptions();
+ return reinterpret_cast<jlong>(options);
+}
+
+/*
+ * Class: org_rocksdb_IngestExternalFileOptions
+ * Method: newIngestExternalFileOptions
+ * Signature: (ZZZZ)J
+ */
+jlong Java_org_rocksdb_IngestExternalFileOptions_newIngestExternalFileOptions__ZZZZ(
+ JNIEnv*, jclass, jboolean jmove_files,
+ jboolean jsnapshot_consistency, jboolean jallow_global_seqno,
+ jboolean jallow_blocking_flush) {
+ auto* options = new ROCKSDB_NAMESPACE::IngestExternalFileOptions();
+ options->move_files = static_cast<bool>(jmove_files);
+ options->snapshot_consistency = static_cast<bool>(jsnapshot_consistency);
+ options->allow_global_seqno = static_cast<bool>(jallow_global_seqno);
+ options->allow_blocking_flush = static_cast<bool>(jallow_blocking_flush);
+ return reinterpret_cast<jlong>(options);
+}
+
+/*
+ * Class: org_rocksdb_IngestExternalFileOptions
+ * Method: moveFiles
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_IngestExternalFileOptions_moveFiles(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::IngestExternalFileOptions*>(jhandle);
+ return static_cast<jboolean>(options->move_files);
+}
+
+/*
+ * Class: org_rocksdb_IngestExternalFileOptions
+ * Method: setMoveFiles
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_IngestExternalFileOptions_setMoveFiles(
+ JNIEnv*, jobject, jlong jhandle, jboolean jmove_files) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::IngestExternalFileOptions*>(jhandle);
+ options->move_files = static_cast<bool>(jmove_files);
+}
+
+/*
+ * Class: org_rocksdb_IngestExternalFileOptions
+ * Method: snapshotConsistency
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_IngestExternalFileOptions_snapshotConsistency(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::IngestExternalFileOptions*>(jhandle);
+ return static_cast<jboolean>(options->snapshot_consistency);
+}
+
+/*
+ * Class: org_rocksdb_IngestExternalFileOptions
+ * Method: setSnapshotConsistency
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_IngestExternalFileOptions_setSnapshotConsistency(
+ JNIEnv*, jobject, jlong jhandle, jboolean jsnapshot_consistency) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::IngestExternalFileOptions*>(jhandle);
+ options->snapshot_consistency = static_cast<bool>(jsnapshot_consistency);
+}
+
+/*
+ * Class: org_rocksdb_IngestExternalFileOptions
+ * Method: allowGlobalSeqNo
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_IngestExternalFileOptions_allowGlobalSeqNo(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::IngestExternalFileOptions*>(jhandle);
+ return static_cast<jboolean>(options->allow_global_seqno);
+}
+
+/*
+ * Class: org_rocksdb_IngestExternalFileOptions
+ * Method: setAllowGlobalSeqNo
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_IngestExternalFileOptions_setAllowGlobalSeqNo(
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_global_seqno) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::IngestExternalFileOptions*>(jhandle);
+ options->allow_global_seqno = static_cast<bool>(jallow_global_seqno);
+}
+
+/*
+ * Class: org_rocksdb_IngestExternalFileOptions
+ * Method: allowBlockingFlush
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_IngestExternalFileOptions_allowBlockingFlush(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::IngestExternalFileOptions*>(jhandle);
+ return static_cast<jboolean>(options->allow_blocking_flush);
+}
+
+/*
+ * Class: org_rocksdb_IngestExternalFileOptions
+ * Method: setAllowBlockingFlush
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_IngestExternalFileOptions_setAllowBlockingFlush(
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_blocking_flush) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::IngestExternalFileOptions*>(jhandle);
+ options->allow_blocking_flush = static_cast<bool>(jallow_blocking_flush);
+}
+
+/*
+ * Class: org_rocksdb_IngestExternalFileOptions
+ * Method: ingestBehind
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_IngestExternalFileOptions_ingestBehind(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::IngestExternalFileOptions*>(jhandle);
+ return options->ingest_behind == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_IngestExternalFileOptions
+ * Method: setIngestBehind
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_IngestExternalFileOptions_setIngestBehind(
+ JNIEnv*, jobject, jlong jhandle, jboolean jingest_behind) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::IngestExternalFileOptions*>(jhandle);
+ options->ingest_behind = jingest_behind == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_IngestExternalFileOptions
+ * Method: writeGlobalSeqno
+ * Signature: (J)Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_rocksdb_IngestExternalFileOptions_writeGlobalSeqno(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::IngestExternalFileOptions*>(jhandle);
+ return options->write_global_seqno == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_IngestExternalFileOptions
+ * Method: setWriteGlobalSeqno
+ * Signature: (JZ)V
+ */
+JNIEXPORT void JNICALL Java_org_rocksdb_IngestExternalFileOptions_setWriteGlobalSeqno(
+ JNIEnv*, jobject, jlong jhandle, jboolean jwrite_global_seqno) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::IngestExternalFileOptions*>(jhandle);
+ options->write_global_seqno = jwrite_global_seqno == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_IngestExternalFileOptions
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_IngestExternalFileOptions_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::IngestExternalFileOptions*>(jhandle);
+ delete options;
+}
diff --git a/src/rocksdb/java/rocksjni/iterator.cc b/src/rocksdb/java/rocksjni/iterator.cc
new file mode 100644
index 000000000..2935adc58
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/iterator.cc
@@ -0,0 +1,252 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling c++ ROCKSDB_NAMESPACE::Iterator methods from Java side.
+
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <algorithm>
+
+#include "include/org_rocksdb_RocksIterator.h"
+#include "rocksdb/iterator.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_RocksIterator
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksIterator_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ assert(it != nullptr);
+ delete it;
+}
+
+/*
+ * Class: org_rocksdb_RocksIterator
+ * Method: isValid0
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_RocksIterator_isValid0(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle)->Valid();
+}
+
+/*
+ * Class: org_rocksdb_RocksIterator
+ * Method: seekToFirst0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksIterator_seekToFirst0(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle)->SeekToFirst();
+}
+
+/*
+ * Class: org_rocksdb_RocksIterator
+ * Method: seekToLast0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksIterator_seekToLast0(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle)->SeekToLast();
+}
+
+/*
+ * Class: org_rocksdb_RocksIterator
+ * Method: next0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksIterator_next0(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle)->Next();
+}
+
+/*
+ * Class: org_rocksdb_RocksIterator
+ * Method: prev0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksIterator_prev0(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle)->Prev();
+}
+
+/*
+ * Class: org_rocksdb_RocksIterator
+ * Method: seek0
+ * Signature: (J[BI)V
+ */
+void Java_org_rocksdb_RocksIterator_seek0(JNIEnv* env, jobject /*jobj*/,
+ jlong handle, jbyteArray jtarget,
+ jint jtarget_len) {
+ jbyte* target = env->GetByteArrayElements(jtarget, nullptr);
+ if (target == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::Slice target_slice(reinterpret_cast<char*>(target),
+ jtarget_len);
+
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ it->Seek(target_slice);
+
+ env->ReleaseByteArrayElements(jtarget, target, JNI_ABORT);
+}
+
+/*
+ * Class: org_rocksdb_RocksIterator
+ * Method: seekDirect0
+ * Signature: (JLjava/nio/ByteBuffer;II)V
+ */
+void Java_org_rocksdb_RocksIterator_seekDirect0(JNIEnv* env, jobject /*jobj*/,
+ jlong handle, jobject jtarget,
+ jint jtarget_off,
+ jint jtarget_len) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ auto seek = [&it](ROCKSDB_NAMESPACE::Slice& target_slice) {
+ it->Seek(target_slice);
+ };
+ ROCKSDB_NAMESPACE::JniUtil::k_op_direct(seek, env, jtarget, jtarget_off,
+ jtarget_len);
+}
+
+/*
+ * Class: org_rocksdb_RocksIterator
+ * Method: seekForPrevDirect0
+ * Signature: (JLjava/nio/ByteBuffer;II)V
+ */
+void Java_org_rocksdb_RocksIterator_seekForPrevDirect0(
+ JNIEnv* env, jobject /*jobj*/, jlong handle, jobject jtarget,
+ jint jtarget_off, jint jtarget_len) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ auto seekPrev = [&it](ROCKSDB_NAMESPACE::Slice& target_slice) {
+ it->SeekForPrev(target_slice);
+ };
+ ROCKSDB_NAMESPACE::JniUtil::k_op_direct(seekPrev, env, jtarget, jtarget_off,
+ jtarget_len);
+}
+
+/*
+ * Class: org_rocksdb_RocksIterator
+ * Method: seekForPrev0
+ * Signature: (J[BI)V
+ */
+void Java_org_rocksdb_RocksIterator_seekForPrev0(JNIEnv* env, jobject /*jobj*/,
+ jlong handle,
+ jbyteArray jtarget,
+ jint jtarget_len) {
+ jbyte* target = env->GetByteArrayElements(jtarget, nullptr);
+ if (target == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::Slice target_slice(reinterpret_cast<char*>(target),
+ jtarget_len);
+
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ it->SeekForPrev(target_slice);
+
+ env->ReleaseByteArrayElements(jtarget, target, JNI_ABORT);
+}
+
+/*
+ * Class: org_rocksdb_RocksIterator
+ * Method: status0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksIterator_status0(JNIEnv* env, jobject /*jobj*/,
+ jlong handle) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ ROCKSDB_NAMESPACE::Status s = it->status();
+
+ if (s.ok()) {
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+}
+
+/*
+ * Class: org_rocksdb_RocksIterator
+ * Method: key0
+ * Signature: (J)[B
+ */
+jbyteArray Java_org_rocksdb_RocksIterator_key0(JNIEnv* env, jobject /*jobj*/,
+ jlong handle) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ ROCKSDB_NAMESPACE::Slice key_slice = it->key();
+
+ jbyteArray jkey = env->NewByteArray(static_cast<jsize>(key_slice.size()));
+ if (jkey == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ env->SetByteArrayRegion(
+ jkey, 0, static_cast<jsize>(key_slice.size()),
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(key_slice.data())));
+ return jkey;
+}
+
+/*
+ * Class: org_rocksdb_RocksIterator
+ * Method: keyDirect0
+ * Signature: (JLjava/nio/ByteBuffer;II)I
+ */
+jint Java_org_rocksdb_RocksIterator_keyDirect0(JNIEnv* env, jobject /*jobj*/,
+ jlong handle, jobject jtarget,
+ jint jtarget_off,
+ jint jtarget_len) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ ROCKSDB_NAMESPACE::Slice key_slice = it->key();
+ return ROCKSDB_NAMESPACE::JniUtil::copyToDirect(env, key_slice, jtarget,
+ jtarget_off, jtarget_len);
+}
+
+/*
+ * Class: org_rocksdb_RocksIterator
+ * Method: value0
+ * Signature: (J)[B
+ */
+jbyteArray Java_org_rocksdb_RocksIterator_value0(JNIEnv* env, jobject /*jobj*/,
+ jlong handle) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ ROCKSDB_NAMESPACE::Slice value_slice = it->value();
+
+ jbyteArray jkeyValue =
+ env->NewByteArray(static_cast<jsize>(value_slice.size()));
+ if (jkeyValue == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ env->SetByteArrayRegion(
+ jkeyValue, 0, static_cast<jsize>(value_slice.size()),
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(value_slice.data())));
+ return jkeyValue;
+}
+
+/*
+ * Class: org_rocksdb_RocksIterator
+ * Method: valueDirect0
+ * Signature: (JLjava/nio/ByteBuffer;II)I
+ */
+jint Java_org_rocksdb_RocksIterator_valueDirect0(JNIEnv* env, jobject /*jobj*/,
+ jlong handle, jobject jtarget,
+ jint jtarget_off,
+ jint jtarget_len) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ ROCKSDB_NAMESPACE::Slice value_slice = it->value();
+ return ROCKSDB_NAMESPACE::JniUtil::copyToDirect(env, value_slice, jtarget,
+ jtarget_off, jtarget_len);
+}
diff --git a/src/rocksdb/java/rocksjni/jnicallback.cc b/src/rocksdb/java/rocksjni/jnicallback.cc
new file mode 100644
index 000000000..40fb4514d
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/jnicallback.cc
@@ -0,0 +1,53 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// JNI Callbacks from C++ to sub-classes or org.rocksdb.RocksCallbackObject
+
+#include <assert.h>
+#include "rocksjni/jnicallback.h"
+#include "rocksjni/portal.h"
+
+namespace ROCKSDB_NAMESPACE {
+JniCallback::JniCallback(JNIEnv* env, jobject jcallback_obj) {
+ // Note: jcallback_obj may be accessed by multiple threads,
+ // so we ref the jvm not the env
+ const jint rs = env->GetJavaVM(&m_jvm);
+ if(rs != JNI_OK) {
+ // exception thrown
+ return;
+ }
+
+ // Note: we may want to access the Java callback object instance
+ // across multiple method calls, so we create a global ref
+ assert(jcallback_obj != nullptr);
+ m_jcallback_obj = env->NewGlobalRef(jcallback_obj);
+ if(jcallback_obj == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+}
+
+JNIEnv* JniCallback::getJniEnv(jboolean* attached) const {
+ return JniUtil::getJniEnv(m_jvm, attached);
+}
+
+void JniCallback::releaseJniEnv(jboolean& attached) const {
+ JniUtil::releaseJniEnv(m_jvm, attached);
+}
+
+JniCallback::~JniCallback() {
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* env = getJniEnv(&attached_thread);
+ assert(env != nullptr);
+
+ if(m_jcallback_obj != nullptr) {
+ env->DeleteGlobalRef(m_jcallback_obj);
+ }
+
+ releaseJniEnv(attached_thread);
+}
+// @lint-ignore TXT4 T25377293 Grandfathered in
+} // namespace ROCKSDB_NAMESPACE \ No newline at end of file
diff --git a/src/rocksdb/java/rocksjni/jnicallback.h b/src/rocksdb/java/rocksjni/jnicallback.h
new file mode 100644
index 000000000..54264eade
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/jnicallback.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// JNI Callbacks from C++ to sub-classes or org.rocksdb.RocksCallbackObject
+
+#ifndef JAVA_ROCKSJNI_JNICALLBACK_H_
+#define JAVA_ROCKSJNI_JNICALLBACK_H_
+
+#include <jni.h>
+
+#include "rocksdb/rocksdb_namespace.h"
+
+namespace ROCKSDB_NAMESPACE {
+class JniCallback {
+ public:
+ JniCallback(JNIEnv* env, jobject jcallback_obj);
+ virtual ~JniCallback();
+
+ protected:
+ JavaVM* m_jvm;
+ jobject m_jcallback_obj;
+ JNIEnv* getJniEnv(jboolean* attached) const;
+ void releaseJniEnv(jboolean& attached) const;
+ };
+ } // namespace ROCKSDB_NAMESPACE
+
+// @lint-ignore TXT4 T25377293 Grandfathered in
+#endif // JAVA_ROCKSJNI_JNICALLBACK_H_
diff --git a/src/rocksdb/java/rocksjni/loggerjnicallback.cc b/src/rocksdb/java/rocksjni/loggerjnicallback.cc
new file mode 100644
index 000000000..e9a9ce689
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/loggerjnicallback.cc
@@ -0,0 +1,297 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::Logger.
+
+#include "include/org_rocksdb_Logger.h"
+
+#include <cstdarg>
+#include <cstdio>
+#include "rocksjni/loggerjnicallback.h"
+#include "rocksjni/portal.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+LoggerJniCallback::LoggerJniCallback(JNIEnv* env, jobject jlogger)
+ : JniCallback(env, jlogger) {
+ m_jLogMethodId = LoggerJni::getLogMethodId(env);
+ if (m_jLogMethodId == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+
+ jobject jdebug_level = InfoLogLevelJni::DEBUG_LEVEL(env);
+ if (jdebug_level == nullptr) {
+ // exception thrown: NoSuchFieldError, ExceptionInInitializerError
+ // or OutOfMemoryError
+ return;
+ }
+ m_jdebug_level = env->NewGlobalRef(jdebug_level);
+ if (m_jdebug_level == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ jobject jinfo_level = InfoLogLevelJni::INFO_LEVEL(env);
+ if (jinfo_level == nullptr) {
+ // exception thrown: NoSuchFieldError, ExceptionInInitializerError
+ // or OutOfMemoryError
+ return;
+ }
+ m_jinfo_level = env->NewGlobalRef(jinfo_level);
+ if (m_jinfo_level == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ jobject jwarn_level = InfoLogLevelJni::WARN_LEVEL(env);
+ if (jwarn_level == nullptr) {
+ // exception thrown: NoSuchFieldError, ExceptionInInitializerError
+ // or OutOfMemoryError
+ return;
+ }
+ m_jwarn_level = env->NewGlobalRef(jwarn_level);
+ if (m_jwarn_level == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ jobject jerror_level = InfoLogLevelJni::ERROR_LEVEL(env);
+ if (jerror_level == nullptr) {
+ // exception thrown: NoSuchFieldError, ExceptionInInitializerError
+ // or OutOfMemoryError
+ return;
+ }
+ m_jerror_level = env->NewGlobalRef(jerror_level);
+ if (m_jerror_level == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ jobject jfatal_level = InfoLogLevelJni::FATAL_LEVEL(env);
+ if (jfatal_level == nullptr) {
+ // exception thrown: NoSuchFieldError, ExceptionInInitializerError
+ // or OutOfMemoryError
+ return;
+ }
+ m_jfatal_level = env->NewGlobalRef(jfatal_level);
+ if (m_jfatal_level == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ jobject jheader_level = InfoLogLevelJni::HEADER_LEVEL(env);
+ if (jheader_level == nullptr) {
+ // exception thrown: NoSuchFieldError, ExceptionInInitializerError
+ // or OutOfMemoryError
+ return;
+ }
+ m_jheader_level = env->NewGlobalRef(jheader_level);
+ if (m_jheader_level == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+}
+
+void LoggerJniCallback::Logv(const char* /*format*/, va_list /*ap*/) {
+ // We implement this method because it is virtual but we don't
+ // use it because we need to know about the log level.
+}
+
+void LoggerJniCallback::Logv(const InfoLogLevel log_level, const char* format,
+ va_list ap) {
+ if (GetInfoLogLevel() <= log_level) {
+ // determine InfoLogLevel java enum instance
+ jobject jlog_level;
+ switch (log_level) {
+ case ROCKSDB_NAMESPACE::InfoLogLevel::DEBUG_LEVEL:
+ jlog_level = m_jdebug_level;
+ break;
+ case ROCKSDB_NAMESPACE::InfoLogLevel::INFO_LEVEL:
+ jlog_level = m_jinfo_level;
+ break;
+ case ROCKSDB_NAMESPACE::InfoLogLevel::WARN_LEVEL:
+ jlog_level = m_jwarn_level;
+ break;
+ case ROCKSDB_NAMESPACE::InfoLogLevel::ERROR_LEVEL:
+ jlog_level = m_jerror_level;
+ break;
+ case ROCKSDB_NAMESPACE::InfoLogLevel::FATAL_LEVEL:
+ jlog_level = m_jfatal_level;
+ break;
+ case ROCKSDB_NAMESPACE::InfoLogLevel::HEADER_LEVEL:
+ jlog_level = m_jheader_level;
+ break;
+ default:
+ jlog_level = m_jfatal_level;
+ break;
+ }
+
+ assert(format != nullptr);
+ const std::unique_ptr<char[]> msg = format_str(format, ap);
+
+ // pass msg to java callback handler
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* env = getJniEnv(&attached_thread);
+ assert(env != nullptr);
+
+ jstring jmsg = env->NewStringUTF(msg.get());
+ if (jmsg == nullptr) {
+ // unable to construct string
+ if (env->ExceptionCheck()) {
+ env->ExceptionDescribe(); // print out exception to stderr
+ }
+ releaseJniEnv(attached_thread);
+ return;
+ }
+ if (env->ExceptionCheck()) {
+ // exception thrown: OutOfMemoryError
+ env->ExceptionDescribe(); // print out exception to stderr
+ env->DeleteLocalRef(jmsg);
+ releaseJniEnv(attached_thread);
+ return;
+ }
+
+ env->CallVoidMethod(m_jcallback_obj, m_jLogMethodId, jlog_level, jmsg);
+ if (env->ExceptionCheck()) {
+ // exception thrown
+ env->ExceptionDescribe(); // print out exception to stderr
+ env->DeleteLocalRef(jmsg);
+ releaseJniEnv(attached_thread);
+ return;
+ }
+
+ env->DeleteLocalRef(jmsg);
+ releaseJniEnv(attached_thread);
+ }
+}
+
+std::unique_ptr<char[]> LoggerJniCallback::format_str(const char* format,
+ va_list ap) const {
+ va_list ap_copy;
+
+ va_copy(ap_copy, ap);
+ const size_t required =
+ vsnprintf(nullptr, 0, format, ap_copy) + 1; // Extra space for '\0'
+ va_end(ap_copy);
+
+ std::unique_ptr<char[]> buf(new char[required]);
+
+ va_copy(ap_copy, ap);
+ vsnprintf(buf.get(), required, format, ap_copy);
+ va_end(ap_copy);
+
+ return buf;
+}
+LoggerJniCallback::~LoggerJniCallback() {
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* env = getJniEnv(&attached_thread);
+ assert(env != nullptr);
+
+ if (m_jdebug_level != nullptr) {
+ env->DeleteGlobalRef(m_jdebug_level);
+ }
+
+ if (m_jinfo_level != nullptr) {
+ env->DeleteGlobalRef(m_jinfo_level);
+ }
+
+ if (m_jwarn_level != nullptr) {
+ env->DeleteGlobalRef(m_jwarn_level);
+ }
+
+ if (m_jerror_level != nullptr) {
+ env->DeleteGlobalRef(m_jerror_level);
+ }
+
+ if (m_jfatal_level != nullptr) {
+ env->DeleteGlobalRef(m_jfatal_level);
+ }
+
+ if (m_jheader_level != nullptr) {
+ env->DeleteGlobalRef(m_jheader_level);
+ }
+
+ releaseJniEnv(attached_thread);
+}
+
+} // namespace ROCKSDB_NAMESPACE
+
+/*
+ * Class: org_rocksdb_Logger
+ * Method: createNewLoggerOptions
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Logger_createNewLoggerOptions(JNIEnv* env, jobject jobj,
+ jlong joptions) {
+ auto* sptr_logger = new std::shared_ptr<ROCKSDB_NAMESPACE::LoggerJniCallback>(
+ new ROCKSDB_NAMESPACE::LoggerJniCallback(env, jobj));
+
+ // set log level
+ auto* options = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(joptions);
+ sptr_logger->get()->SetInfoLogLevel(options->info_log_level);
+
+ return reinterpret_cast<jlong>(sptr_logger);
+}
+
+/*
+ * Class: org_rocksdb_Logger
+ * Method: createNewLoggerDbOptions
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Logger_createNewLoggerDbOptions(JNIEnv* env,
+ jobject jobj,
+ jlong jdb_options) {
+ auto* sptr_logger = new std::shared_ptr<ROCKSDB_NAMESPACE::LoggerJniCallback>(
+ new ROCKSDB_NAMESPACE::LoggerJniCallback(env, jobj));
+
+ // set log level
+ auto* db_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jdb_options);
+ sptr_logger->get()->SetInfoLogLevel(db_options->info_log_level);
+
+ return reinterpret_cast<jlong>(sptr_logger);
+}
+
+/*
+ * Class: org_rocksdb_Logger
+ * Method: setInfoLogLevel
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_Logger_setInfoLogLevel(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong jhandle, jbyte jlog_level) {
+ auto* handle =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::LoggerJniCallback>*>(
+ jhandle);
+ handle->get()->SetInfoLogLevel(
+ static_cast<ROCKSDB_NAMESPACE::InfoLogLevel>(jlog_level));
+}
+
+/*
+ * Class: org_rocksdb_Logger
+ * Method: infoLogLevel
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_Logger_infoLogLevel(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong jhandle) {
+ auto* handle =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::LoggerJniCallback>*>(
+ jhandle);
+ return static_cast<jbyte>(handle->get()->GetInfoLogLevel());
+}
+
+/*
+ * Class: org_rocksdb_Logger
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_Logger_disposeInternal(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong jhandle) {
+ auto* handle =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::LoggerJniCallback>*>(
+ jhandle);
+ delete handle; // delete std::shared_ptr
+}
diff --git a/src/rocksdb/java/rocksjni/loggerjnicallback.h b/src/rocksdb/java/rocksjni/loggerjnicallback.h
new file mode 100644
index 000000000..7bcba82ee
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/loggerjnicallback.h
@@ -0,0 +1,49 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::Logger
+
+#ifndef JAVA_ROCKSJNI_LOGGERJNICALLBACK_H_
+#define JAVA_ROCKSJNI_LOGGERJNICALLBACK_H_
+
+#include <jni.h>
+#include <memory>
+#include <string>
+#include "rocksjni/jnicallback.h"
+#include "port/port.h"
+#include "rocksdb/env.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+class LoggerJniCallback : public JniCallback, public Logger {
+ public:
+ LoggerJniCallback(JNIEnv* env, jobject jLogger);
+ ~LoggerJniCallback();
+
+ using Logger::SetInfoLogLevel;
+ using Logger::GetInfoLogLevel;
+ // Write an entry to the log file with the specified format.
+ virtual void Logv(const char* format, va_list ap);
+ // Write an entry to the log file with the specified log level
+ // and format. Any log with level under the internal log level
+ // of *this (see @SetInfoLogLevel and @GetInfoLogLevel) will not be
+ // printed.
+ virtual void Logv(const InfoLogLevel log_level, const char* format,
+ va_list ap);
+
+ private:
+ jmethodID m_jLogMethodId;
+ jobject m_jdebug_level;
+ jobject m_jinfo_level;
+ jobject m_jwarn_level;
+ jobject m_jerror_level;
+ jobject m_jfatal_level;
+ jobject m_jheader_level;
+ std::unique_ptr<char[]> format_str(const char* format, va_list ap) const;
+ };
+ } // namespace ROCKSDB_NAMESPACE
+
+#endif // JAVA_ROCKSJNI_LOGGERJNICALLBACK_H_
diff --git a/src/rocksdb/java/rocksjni/lru_cache.cc b/src/rocksdb/java/rocksjni/lru_cache.cc
new file mode 100644
index 000000000..cfdcb525b
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/lru_cache.cc
@@ -0,0 +1,43 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::LRUCache.
+
+#include <jni.h>
+
+#include "cache/lru_cache.h"
+#include "include/org_rocksdb_LRUCache.h"
+
+/*
+ * Class: org_rocksdb_LRUCache
+ * Method: newLRUCache
+ * Signature: (JIZD)J
+ */
+jlong Java_org_rocksdb_LRUCache_newLRUCache(JNIEnv* /*env*/, jclass /*jcls*/,
+ jlong jcapacity,
+ jint jnum_shard_bits,
+ jboolean jstrict_capacity_limit,
+ jdouble jhigh_pri_pool_ratio) {
+ auto* sptr_lru_cache = new std::shared_ptr<ROCKSDB_NAMESPACE::Cache>(
+ ROCKSDB_NAMESPACE::NewLRUCache(
+ static_cast<size_t>(jcapacity), static_cast<int>(jnum_shard_bits),
+ static_cast<bool>(jstrict_capacity_limit),
+ static_cast<double>(jhigh_pri_pool_ratio)));
+ return reinterpret_cast<jlong>(sptr_lru_cache);
+}
+
+/*
+ * Class: org_rocksdb_LRUCache
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_LRUCache_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* sptr_lru_cache =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Cache>*>(jhandle);
+ delete sptr_lru_cache; // delete std::shared_ptr
+}
diff --git a/src/rocksdb/java/rocksjni/memory_util.cc b/src/rocksdb/java/rocksjni/memory_util.cc
new file mode 100644
index 000000000..fac288c92
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/memory_util.cc
@@ -0,0 +1,107 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+#include <jni.h>
+#include <map>
+#include <string>
+#include <unordered_set>
+#include <vector>
+
+#include "include/org_rocksdb_MemoryUtil.h"
+
+#include "rocksjni/portal.h"
+
+#include "rocksdb/utilities/memory_util.h"
+
+
+/*
+ * Class: org_rocksdb_MemoryUtil
+ * Method: getApproximateMemoryUsageByType
+ * Signature: ([J[J)Ljava/util/Map;
+ */
+jobject Java_org_rocksdb_MemoryUtil_getApproximateMemoryUsageByType(
+ JNIEnv *env, jclass /*jclazz*/, jlongArray jdb_handles, jlongArray jcache_handles) {
+ std::vector<ROCKSDB_NAMESPACE::DB *> dbs;
+ jsize db_handle_count = env->GetArrayLength(jdb_handles);
+ if(db_handle_count > 0) {
+ jlong *ptr_jdb_handles = env->GetLongArrayElements(jdb_handles, nullptr);
+ if (ptr_jdb_handles == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ for (jsize i = 0; i < db_handle_count; i++) {
+ dbs.push_back(
+ reinterpret_cast<ROCKSDB_NAMESPACE::DB *>(ptr_jdb_handles[i]));
+ }
+ env->ReleaseLongArrayElements(jdb_handles, ptr_jdb_handles, JNI_ABORT);
+ }
+
+ std::unordered_set<const ROCKSDB_NAMESPACE::Cache *> cache_set;
+ jsize cache_handle_count = env->GetArrayLength(jcache_handles);
+ if(cache_handle_count > 0) {
+ jlong *ptr_jcache_handles = env->GetLongArrayElements(jcache_handles, nullptr);
+ if (ptr_jcache_handles == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ for (jsize i = 0; i < cache_handle_count; i++) {
+ auto *cache_ptr =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Cache> *>(
+ ptr_jcache_handles[i]);
+ cache_set.insert(cache_ptr->get());
+ }
+ env->ReleaseLongArrayElements(jcache_handles, ptr_jcache_handles, JNI_ABORT);
+ }
+
+ std::map<ROCKSDB_NAMESPACE::MemoryUtil::UsageType, uint64_t> usage_by_type;
+ if (ROCKSDB_NAMESPACE::MemoryUtil::GetApproximateMemoryUsageByType(
+ dbs, cache_set, &usage_by_type) != ROCKSDB_NAMESPACE::Status::OK()) {
+ // Non-OK status
+ return nullptr;
+ }
+
+ jobject jusage_by_type = ROCKSDB_NAMESPACE::HashMapJni::construct(
+ env, static_cast<uint32_t>(usage_by_type.size()));
+ if (jusage_by_type == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+ const ROCKSDB_NAMESPACE::HashMapJni::FnMapKV<
+ const ROCKSDB_NAMESPACE::MemoryUtil::UsageType, const uint64_t, jobject,
+ jobject>
+ fn_map_kv = [env](
+ const std::pair<ROCKSDB_NAMESPACE::MemoryUtil::UsageType,
+ uint64_t> &pair) {
+ // Construct key
+ const jobject jusage_type = ROCKSDB_NAMESPACE::ByteJni::valueOf(
+ env, ROCKSDB_NAMESPACE::MemoryUsageTypeJni::toJavaMemoryUsageType(
+ pair.first));
+ if (jusage_type == nullptr) {
+ // an error occurred
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+ // Construct value
+ const jobject jusage_value =
+ ROCKSDB_NAMESPACE::LongJni::valueOf(env, pair.second);
+ if (jusage_value == nullptr) {
+ // an error occurred
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+ // Construct and return pointer to pair of jobjects
+ return std::unique_ptr<std::pair<jobject, jobject>>(
+ new std::pair<jobject, jobject>(jusage_type,
+ jusage_value));
+ };
+
+ if (!ROCKSDB_NAMESPACE::HashMapJni::putAll(env, jusage_by_type,
+ usage_by_type.begin(),
+ usage_by_type.end(), fn_map_kv)) {
+ // exception occcurred
+ jusage_by_type = nullptr;
+ }
+
+ return jusage_by_type;
+
+}
diff --git a/src/rocksdb/java/rocksjni/memtablejni.cc b/src/rocksdb/java/rocksjni/memtablejni.cc
new file mode 100644
index 000000000..1188c5e59
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/memtablejni.cc
@@ -0,0 +1,93 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for MemTables.
+
+#include "include/org_rocksdb_HashLinkedListMemTableConfig.h"
+#include "include/org_rocksdb_HashSkipListMemTableConfig.h"
+#include "include/org_rocksdb_SkipListMemTableConfig.h"
+#include "include/org_rocksdb_VectorMemTableConfig.h"
+#include "rocksdb/memtablerep.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_HashSkipListMemTableConfig
+ * Method: newMemTableFactoryHandle
+ * Signature: (JII)J
+ */
+jlong Java_org_rocksdb_HashSkipListMemTableConfig_newMemTableFactoryHandle(
+ JNIEnv* env, jobject /*jobj*/, jlong jbucket_count, jint jheight,
+ jint jbranching_factor) {
+ ROCKSDB_NAMESPACE::Status s =
+ ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(jbucket_count);
+ if (s.ok()) {
+ return reinterpret_cast<jlong>(ROCKSDB_NAMESPACE::NewHashSkipListRepFactory(
+ static_cast<size_t>(jbucket_count), static_cast<int32_t>(jheight),
+ static_cast<int32_t>(jbranching_factor)));
+ }
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ return 0;
+}
+
+/*
+ * Class: org_rocksdb_HashLinkedListMemTableConfig
+ * Method: newMemTableFactoryHandle
+ * Signature: (JJIZI)J
+ */
+jlong Java_org_rocksdb_HashLinkedListMemTableConfig_newMemTableFactoryHandle(
+ JNIEnv* env, jobject /*jobj*/, jlong jbucket_count,
+ jlong jhuge_page_tlb_size, jint jbucket_entries_logging_threshold,
+ jboolean jif_log_bucket_dist_when_flash, jint jthreshold_use_skiplist) {
+ ROCKSDB_NAMESPACE::Status statusBucketCount =
+ ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(jbucket_count);
+ ROCKSDB_NAMESPACE::Status statusHugePageTlb =
+ ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(
+ jhuge_page_tlb_size);
+ if (statusBucketCount.ok() && statusHugePageTlb.ok()) {
+ return reinterpret_cast<jlong>(ROCKSDB_NAMESPACE::NewHashLinkListRepFactory(
+ static_cast<size_t>(jbucket_count),
+ static_cast<size_t>(jhuge_page_tlb_size),
+ static_cast<int32_t>(jbucket_entries_logging_threshold),
+ static_cast<bool>(jif_log_bucket_dist_when_flash),
+ static_cast<int32_t>(jthreshold_use_skiplist)));
+ }
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(
+ env, !statusBucketCount.ok() ? statusBucketCount : statusHugePageTlb);
+ return 0;
+}
+
+/*
+ * Class: org_rocksdb_VectorMemTableConfig
+ * Method: newMemTableFactoryHandle
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_VectorMemTableConfig_newMemTableFactoryHandle(
+ JNIEnv* env, jobject /*jobj*/, jlong jreserved_size) {
+ ROCKSDB_NAMESPACE::Status s =
+ ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(jreserved_size);
+ if (s.ok()) {
+ return reinterpret_cast<jlong>(new ROCKSDB_NAMESPACE::VectorRepFactory(
+ static_cast<size_t>(jreserved_size)));
+ }
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ return 0;
+}
+
+/*
+ * Class: org_rocksdb_SkipListMemTableConfig
+ * Method: newMemTableFactoryHandle0
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_SkipListMemTableConfig_newMemTableFactoryHandle0(
+ JNIEnv* env, jobject /*jobj*/, jlong jlookahead) {
+ ROCKSDB_NAMESPACE::Status s =
+ ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(jlookahead);
+ if (s.ok()) {
+ return reinterpret_cast<jlong>(new ROCKSDB_NAMESPACE::SkipListFactory(
+ static_cast<size_t>(jlookahead)));
+ }
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ return 0;
+}
diff --git a/src/rocksdb/java/rocksjni/merge_operator.cc b/src/rocksdb/java/rocksjni/merge_operator.cc
new file mode 100644
index 000000000..edc3e7231
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/merge_operator.cc
@@ -0,0 +1,81 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+// Copyright (c) 2014, Vlad Balan (vlad.gm@gmail.com). All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++
+// for ROCKSDB_NAMESPACE::MergeOperator.
+
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory>
+#include <string>
+
+#include "include/org_rocksdb_StringAppendOperator.h"
+#include "include/org_rocksdb_UInt64AddOperator.h"
+#include "rocksdb/db.h"
+#include "rocksdb/memtablerep.h"
+#include "rocksdb/merge_operator.h"
+#include "rocksdb/options.h"
+#include "rocksdb/slice_transform.h"
+#include "rocksdb/statistics.h"
+#include "rocksdb/table.h"
+#include "rocksjni/portal.h"
+#include "utilities/merge_operators.h"
+
+/*
+ * Class: org_rocksdb_StringAppendOperator
+ * Method: newSharedStringAppendOperator
+ * Signature: (C)J
+ */
+jlong Java_org_rocksdb_StringAppendOperator_newSharedStringAppendOperator(
+ JNIEnv* /*env*/, jclass /*jclazz*/, jchar jdelim) {
+ auto* sptr_string_append_op =
+ new std::shared_ptr<ROCKSDB_NAMESPACE::MergeOperator>(
+ ROCKSDB_NAMESPACE::MergeOperators::CreateStringAppendOperator(
+ (char)jdelim));
+ return reinterpret_cast<jlong>(sptr_string_append_op);
+}
+
+/*
+ * Class: org_rocksdb_StringAppendOperator
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_StringAppendOperator_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* sptr_string_append_op =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::MergeOperator>*>(
+ jhandle);
+ delete sptr_string_append_op; // delete std::shared_ptr
+}
+
+/*
+ * Class: org_rocksdb_UInt64AddOperator
+ * Method: newSharedUInt64AddOperator
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_UInt64AddOperator_newSharedUInt64AddOperator(
+ JNIEnv* /*env*/, jclass /*jclazz*/) {
+ auto* sptr_uint64_add_op =
+ new std::shared_ptr<ROCKSDB_NAMESPACE::MergeOperator>(
+ ROCKSDB_NAMESPACE::MergeOperators::CreateUInt64AddOperator());
+ return reinterpret_cast<jlong>(sptr_uint64_add_op);
+}
+
+/*
+ * Class: org_rocksdb_UInt64AddOperator
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_UInt64AddOperator_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* sptr_uint64_add_op =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::MergeOperator>*>(
+ jhandle);
+ delete sptr_uint64_add_op; // delete std::shared_ptr
+}
diff --git a/src/rocksdb/java/rocksjni/native_comparator_wrapper_test.cc b/src/rocksdb/java/rocksjni/native_comparator_wrapper_test.cc
new file mode 100644
index 000000000..d2f5c1bda
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/native_comparator_wrapper_test.cc
@@ -0,0 +1,44 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+#include <jni.h>
+#include <string>
+
+#include "rocksdb/comparator.h"
+#include "rocksdb/slice.h"
+
+#include "include/org_rocksdb_NativeComparatorWrapperTest_NativeStringComparatorWrapper.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+class NativeComparatorWrapperTestStringComparator : public Comparator {
+ const char* Name() const {
+ return "NativeComparatorWrapperTestStringComparator";
+ }
+
+ int Compare(const Slice& a, const Slice& b) const {
+ return a.ToString().compare(b.ToString());
+ }
+
+ void FindShortestSeparator(std::string* /*start*/,
+ const Slice& /*limit*/) const {
+ return;
+ }
+
+ void FindShortSuccessor(std::string* /*key*/) const { return; }
+};
+} // namespace ROCKSDB_NAMESPACE
+
+/*
+ * Class: org_rocksdb_NativeComparatorWrapperTest_NativeStringComparatorWrapper
+ * Method: newStringComparator
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_NativeComparatorWrapperTest_00024NativeStringComparatorWrapper_newStringComparator(
+ JNIEnv* /*env*/, jobject /*jobj*/) {
+ auto* comparator =
+ new ROCKSDB_NAMESPACE::NativeComparatorWrapperTestStringComparator();
+ return reinterpret_cast<jlong>(comparator);
+}
diff --git a/src/rocksdb/java/rocksjni/optimistic_transaction_db.cc b/src/rocksdb/java/rocksjni/optimistic_transaction_db.cc
new file mode 100644
index 000000000..4f966cdd7
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/optimistic_transaction_db.cc
@@ -0,0 +1,284 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++
+// for ROCKSDB_NAMESPACE::TransactionDB.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_OptimisticTransactionDB.h"
+
+#include "rocksdb/options.h"
+#include "rocksdb/utilities/optimistic_transaction_db.h"
+#include "rocksdb/utilities/transaction.h"
+
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_OptimisticTransactionDB
+ * Method: open
+ * Signature: (JLjava/lang/String;)J
+ */
+jlong Java_org_rocksdb_OptimisticTransactionDB_open__JLjava_lang_String_2(
+ JNIEnv* env, jclass, jlong joptions_handle, jstring jdb_path) {
+ const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
+ if (db_path == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return 0;
+ }
+
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(joptions_handle);
+ ROCKSDB_NAMESPACE::OptimisticTransactionDB* otdb = nullptr;
+ ROCKSDB_NAMESPACE::Status s =
+ ROCKSDB_NAMESPACE::OptimisticTransactionDB::Open(*options, db_path,
+ &otdb);
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+
+ if (s.ok()) {
+ return reinterpret_cast<jlong>(otdb);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return 0;
+ }
+}
+
+/*
+ * Class: org_rocksdb_OptimisticTransactionDB
+ * Method: open
+ * Signature: (JLjava/lang/String;[[B[J)[J
+ */
+jlongArray
+Java_org_rocksdb_OptimisticTransactionDB_open__JLjava_lang_String_2_3_3B_3J(
+ JNIEnv* env, jclass, jlong jdb_options_handle, jstring jdb_path,
+ jobjectArray jcolumn_names, jlongArray jcolumn_options_handles) {
+ const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
+ if (db_path == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyDescriptor> column_families;
+ const jsize len_cols = env->GetArrayLength(jcolumn_names);
+ if (len_cols > 0) {
+ if (env->EnsureLocalCapacity(len_cols) != 0) {
+ // out of memory
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+ return nullptr;
+ }
+
+ jlong* jco = env->GetLongArrayElements(jcolumn_options_handles, nullptr);
+ if (jco == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+ return nullptr;
+ }
+
+ for (int i = 0; i < len_cols; i++) {
+ const jobject jcn = env->GetObjectArrayElement(jcolumn_names, i);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT);
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+ return nullptr;
+ }
+
+ const jbyteArray jcn_ba = reinterpret_cast<jbyteArray>(jcn);
+ const jsize jcf_name_len = env->GetArrayLength(jcn_ba);
+ if (env->EnsureLocalCapacity(jcf_name_len) != 0) {
+ // out of memory
+ env->DeleteLocalRef(jcn);
+ env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT);
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+ return nullptr;
+ }
+
+ jbyte* jcf_name = env->GetByteArrayElements(jcn_ba, nullptr);
+ if (jcf_name == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->DeleteLocalRef(jcn);
+ env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT);
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+ return nullptr;
+ }
+
+ const std::string cf_name(reinterpret_cast<char*>(jcf_name),
+ jcf_name_len);
+ const ROCKSDB_NAMESPACE::ColumnFamilyOptions* cf_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jco[i]);
+ column_families.push_back(
+ ROCKSDB_NAMESPACE::ColumnFamilyDescriptor(cf_name, *cf_options));
+
+ env->ReleaseByteArrayElements(jcn_ba, jcf_name, JNI_ABORT);
+ env->DeleteLocalRef(jcn);
+ }
+ env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT);
+ }
+
+ auto* db_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jdb_options_handle);
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*> handles;
+ ROCKSDB_NAMESPACE::OptimisticTransactionDB* otdb = nullptr;
+ const ROCKSDB_NAMESPACE::Status s =
+ ROCKSDB_NAMESPACE::OptimisticTransactionDB::Open(
+ *db_options, db_path, column_families, &handles, &otdb);
+
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+
+ // check if open operation was successful
+ if (s.ok()) {
+ const jsize resultsLen = 1 + len_cols; // db handle + column family handles
+ std::unique_ptr<jlong[]> results =
+ std::unique_ptr<jlong[]>(new jlong[resultsLen]);
+ results[0] = reinterpret_cast<jlong>(otdb);
+ for (int i = 1; i <= len_cols; i++) {
+ results[i] = reinterpret_cast<jlong>(handles[i - 1]);
+ }
+
+ jlongArray jresults = env->NewLongArray(resultsLen);
+ if (jresults == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ env->SetLongArrayRegion(jresults, 0, resultsLen, results.get());
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ return nullptr;
+ }
+ return jresults;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+}
+
+/*
+ * Class: org_rocksdb_OptimisticTransactionDB
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_OptimisticTransactionDB_disposeInternal(
+ JNIEnv *, jobject, jlong jhandle) {
+ auto* optimistic_txn_db =
+ reinterpret_cast<ROCKSDB_NAMESPACE::OptimisticTransactionDB*>(jhandle);
+ assert(optimistic_txn_db != nullptr);
+ delete optimistic_txn_db;
+}
+
+/*
+ * Class: org_rocksdb_OptimisticTransactionDB
+ * Method: closeDatabase
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_OptimisticTransactionDB_closeDatabase(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* optimistic_txn_db =
+ reinterpret_cast<ROCKSDB_NAMESPACE::OptimisticTransactionDB*>(jhandle);
+ assert(optimistic_txn_db != nullptr);
+ ROCKSDB_NAMESPACE::Status s = optimistic_txn_db->Close();
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+}
+
+/*
+ * Class: org_rocksdb_OptimisticTransactionDB
+ * Method: beginTransaction
+ * Signature: (JJ)J
+ */
+jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction__JJ(
+ JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle) {
+ auto* optimistic_txn_db =
+ reinterpret_cast<ROCKSDB_NAMESPACE::OptimisticTransactionDB*>(jhandle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options_handle);
+ ROCKSDB_NAMESPACE::Transaction* txn =
+ optimistic_txn_db->BeginTransaction(*write_options);
+ return reinterpret_cast<jlong>(txn);
+}
+
+/*
+ * Class: org_rocksdb_OptimisticTransactionDB
+ * Method: beginTransaction
+ * Signature: (JJJ)J
+ */
+jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction__JJJ(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ jlong jwrite_options_handle, jlong joptimistic_txn_options_handle) {
+ auto* optimistic_txn_db =
+ reinterpret_cast<ROCKSDB_NAMESPACE::OptimisticTransactionDB*>(jhandle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options_handle);
+ auto* optimistic_txn_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::OptimisticTransactionOptions*>(
+ joptimistic_txn_options_handle);
+ ROCKSDB_NAMESPACE::Transaction* txn = optimistic_txn_db->BeginTransaction(
+ *write_options, *optimistic_txn_options);
+ return reinterpret_cast<jlong>(txn);
+}
+
+/*
+ * Class: org_rocksdb_OptimisticTransactionDB
+ * Method: beginTransaction_withOld
+ * Signature: (JJJ)J
+ */
+jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction_1withOld__JJJ(
+ JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle,
+ jlong jold_txn_handle) {
+ auto* optimistic_txn_db =
+ reinterpret_cast<ROCKSDB_NAMESPACE::OptimisticTransactionDB*>(jhandle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options_handle);
+ auto* old_txn =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jold_txn_handle);
+ ROCKSDB_NAMESPACE::OptimisticTransactionOptions optimistic_txn_options;
+ ROCKSDB_NAMESPACE::Transaction* txn = optimistic_txn_db->BeginTransaction(
+ *write_options, optimistic_txn_options, old_txn);
+
+ // RocksJava relies on the assumption that
+ // we do not allocate a new Transaction object
+ // when providing an old_optimistic_txn
+ assert(txn == old_txn);
+
+ return reinterpret_cast<jlong>(txn);
+}
+
+/*
+ * Class: org_rocksdb_OptimisticTransactionDB
+ * Method: beginTransaction_withOld
+ * Signature: (JJJJ)J
+ */
+jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction_1withOld__JJJJ(
+ JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle,
+ jlong joptimistic_txn_options_handle, jlong jold_txn_handle) {
+ auto* optimistic_txn_db =
+ reinterpret_cast<ROCKSDB_NAMESPACE::OptimisticTransactionDB*>(jhandle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options_handle);
+ auto* optimistic_txn_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::OptimisticTransactionOptions*>(
+ joptimistic_txn_options_handle);
+ auto* old_txn =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jold_txn_handle);
+ ROCKSDB_NAMESPACE::Transaction* txn = optimistic_txn_db->BeginTransaction(
+ *write_options, *optimistic_txn_options, old_txn);
+
+ // RocksJava relies on the assumption that
+ // we do not allocate a new Transaction object
+ // when providing an old_optimisic_txn
+ assert(txn == old_txn);
+
+ return reinterpret_cast<jlong>(txn);
+}
+
+/*
+ * Class: org_rocksdb_OptimisticTransactionDB
+ * Method: getBaseDB
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_OptimisticTransactionDB_getBaseDB(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* optimistic_txn_db =
+ reinterpret_cast<ROCKSDB_NAMESPACE::OptimisticTransactionDB*>(jhandle);
+ return reinterpret_cast<jlong>(optimistic_txn_db->GetBaseDB());
+}
diff --git a/src/rocksdb/java/rocksjni/optimistic_transaction_options.cc b/src/rocksdb/java/rocksjni/optimistic_transaction_options.cc
new file mode 100644
index 000000000..6bc80fdf0
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/optimistic_transaction_options.cc
@@ -0,0 +1,78 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++
+// for ROCKSDB_NAMESPACE::OptimisticTransactionOptions.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_OptimisticTransactionOptions.h"
+
+#include "rocksdb/comparator.h"
+#include "rocksdb/utilities/optimistic_transaction_db.h"
+
+/*
+ * Class: org_rocksdb_OptimisticTransactionOptions
+ * Method: newOptimisticTransactionOptions
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_OptimisticTransactionOptions_newOptimisticTransactionOptions(
+ JNIEnv* /*env*/, jclass /*jcls*/) {
+ ROCKSDB_NAMESPACE::OptimisticTransactionOptions* opts =
+ new ROCKSDB_NAMESPACE::OptimisticTransactionOptions();
+ return reinterpret_cast<jlong>(opts);
+}
+
+/*
+ * Class: org_rocksdb_OptimisticTransactionOptions
+ * Method: isSetSnapshot
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_OptimisticTransactionOptions_isSetSnapshot(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::OptimisticTransactionOptions*>(
+ jhandle);
+ return opts->set_snapshot;
+}
+
+/*
+ * Class: org_rocksdb_OptimisticTransactionOptions
+ * Method: setSetSnapshot
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_OptimisticTransactionOptions_setSetSnapshot(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jboolean jset_snapshot) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::OptimisticTransactionOptions*>(
+ jhandle);
+ opts->set_snapshot = jset_snapshot;
+}
+
+/*
+ * Class: org_rocksdb_OptimisticTransactionOptions
+ * Method: setComparator
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_OptimisticTransactionOptions_setComparator(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ jlong jcomparator_handle) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::OptimisticTransactionOptions*>(
+ jhandle);
+ opts->cmp =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Comparator*>(jcomparator_handle);
+}
+
+/*
+ * Class: org_rocksdb_OptimisticTransactionOptions
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_OptimisticTransactionOptions_disposeInternal(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ delete reinterpret_cast<ROCKSDB_NAMESPACE::OptimisticTransactionOptions*>(
+ jhandle);
+}
diff --git a/src/rocksdb/java/rocksjni/options.cc b/src/rocksdb/java/rocksjni/options.cc
new file mode 100644
index 000000000..c13613373
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/options.cc
@@ -0,0 +1,7240 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::Options.
+
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory>
+#include <vector>
+
+#include "include/org_rocksdb_ColumnFamilyOptions.h"
+#include "include/org_rocksdb_ComparatorOptions.h"
+#include "include/org_rocksdb_DBOptions.h"
+#include "include/org_rocksdb_FlushOptions.h"
+#include "include/org_rocksdb_Options.h"
+#include "include/org_rocksdb_ReadOptions.h"
+#include "include/org_rocksdb_WriteOptions.h"
+
+#include "rocksjni/comparatorjnicallback.h"
+#include "rocksjni/portal.h"
+#include "rocksjni/statisticsjni.h"
+#include "rocksjni/table_filter_jnicallback.h"
+
+#include "rocksdb/comparator.h"
+#include "rocksdb/convenience.h"
+#include "rocksdb/db.h"
+#include "rocksdb/memtablerep.h"
+#include "rocksdb/merge_operator.h"
+#include "rocksdb/options.h"
+#include "rocksdb/rate_limiter.h"
+#include "rocksdb/slice_transform.h"
+#include "rocksdb/statistics.h"
+#include "rocksdb/table.h"
+#include "utilities/merge_operators.h"
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: newOptions
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_Options_newOptions__(
+ JNIEnv*, jclass) {
+ auto* op = new ROCKSDB_NAMESPACE::Options();
+ return reinterpret_cast<jlong>(op);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: newOptions
+ * Signature: (JJ)J
+ */
+jlong Java_org_rocksdb_Options_newOptions__JJ(
+ JNIEnv*, jclass, jlong jdboptions, jlong jcfoptions) {
+ auto* dbOpt =
+ reinterpret_cast<const ROCKSDB_NAMESPACE::DBOptions*>(jdboptions);
+ auto* cfOpt = reinterpret_cast<const ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(
+ jcfoptions);
+ auto* op = new ROCKSDB_NAMESPACE::Options(*dbOpt, *cfOpt);
+ return reinterpret_cast<jlong>(op);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: copyOptions
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_copyOptions(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto new_opt = new ROCKSDB_NAMESPACE::Options(
+ *(reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)));
+ return reinterpret_cast<jlong>(new_opt);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_Options_disposeInternal(
+ JNIEnv*, jobject, jlong handle) {
+ auto* op = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(handle);
+ assert(op != nullptr);
+ delete op;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setIncreaseParallelism
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setIncreaseParallelism(
+ JNIEnv*, jobject, jlong jhandle, jint totalThreads) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->IncreaseParallelism(
+ static_cast<int>(totalThreads));
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setCreateIfMissing
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setCreateIfMissing(
+ JNIEnv*, jobject, jlong jhandle, jboolean flag) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->create_if_missing =
+ flag;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: createIfMissing
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_createIfMissing(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->create_if_missing;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setCreateMissingColumnFamilies
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setCreateMissingColumnFamilies(
+ JNIEnv*, jobject, jlong jhandle, jboolean flag) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->create_missing_column_families = flag;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: createMissingColumnFamilies
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_createMissingColumnFamilies(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->create_missing_column_families;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setComparatorHandle
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setComparatorHandle__JI(
+ JNIEnv*, jobject, jlong jhandle, jint builtinComparator) {
+ switch (builtinComparator) {
+ case 1:
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->comparator =
+ ROCKSDB_NAMESPACE::ReverseBytewiseComparator();
+ break;
+ default:
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->comparator =
+ ROCKSDB_NAMESPACE::BytewiseComparator();
+ break;
+ }
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setComparatorHandle
+ * Signature: (JJB)V
+ */
+void Java_org_rocksdb_Options_setComparatorHandle__JJB(
+ JNIEnv*, jobject, jlong jopt_handle, jlong jcomparator_handle,
+ jbyte jcomparator_type) {
+ ROCKSDB_NAMESPACE::Comparator* comparator = nullptr;
+ switch (jcomparator_type) {
+ // JAVA_COMPARATOR
+ case 0x0:
+ comparator = reinterpret_cast<ROCKSDB_NAMESPACE::ComparatorJniCallback*>(
+ jcomparator_handle);
+ break;
+
+ // JAVA_NATIVE_COMPARATOR_WRAPPER
+ case 0x1:
+ comparator =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Comparator*>(jcomparator_handle);
+ break;
+ }
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jopt_handle);
+ opt->comparator = comparator;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMergeOperatorName
+ * Signature: (JJjava/lang/String)V
+ */
+void Java_org_rocksdb_Options_setMergeOperatorName(
+ JNIEnv* env, jobject, jlong jhandle, jstring jop_name) {
+ const char* op_name = env->GetStringUTFChars(jop_name, nullptr);
+ if (op_name == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ auto* options = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ options->merge_operator =
+ ROCKSDB_NAMESPACE::MergeOperators::CreateFromStringId(op_name);
+
+ env->ReleaseStringUTFChars(jop_name, op_name);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMergeOperator
+ * Signature: (JJjava/lang/String)V
+ */
+void Java_org_rocksdb_Options_setMergeOperator(
+ JNIEnv*, jobject, jlong jhandle, jlong mergeOperatorHandle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->merge_operator =
+ *(reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::MergeOperator>*>(
+ mergeOperatorHandle));
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setCompactionFilterHandle
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setCompactionFilterHandle(
+ JNIEnv*, jobject, jlong jopt_handle,
+ jlong jcompactionfilter_handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jopt_handle)
+ ->compaction_filter =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionFilter*>(
+ jcompactionfilter_handle);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setCompactionFilterFactoryHandle
+ * Signature: (JJ)V
+ */
+void JNICALL Java_org_rocksdb_Options_setCompactionFilterFactoryHandle(
+ JNIEnv*, jobject, jlong jopt_handle,
+ jlong jcompactionfilterfactory_handle) {
+ auto* cff_factory = reinterpret_cast<
+ std::shared_ptr<ROCKSDB_NAMESPACE::CompactionFilterFactory>*>(
+ jcompactionfilterfactory_handle);
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jopt_handle)
+ ->compaction_filter_factory = *cff_factory;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setWriteBufferSize
+ * Signature: (JJ)I
+ */
+void Java_org_rocksdb_Options_setWriteBufferSize(
+ JNIEnv* env, jobject, jlong jhandle, jlong jwrite_buffer_size) {
+ auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(
+ jwrite_buffer_size);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->write_buffer_size =
+ jwrite_buffer_size;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setWriteBufferManager
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setWriteBufferManager(
+ JNIEnv*, jobject, jlong joptions_handle,
+ jlong jwrite_buffer_manager_handle) {
+ auto* write_buffer_manager =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::WriteBufferManager>*>(
+ jwrite_buffer_manager_handle);
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(joptions_handle)
+ ->write_buffer_manager = *write_buffer_manager;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: writeBufferSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_writeBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->write_buffer_size;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMaxWriteBufferNumber
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setMaxWriteBufferNumber(
+ JNIEnv*, jobject, jlong jhandle,
+ jint jmax_write_buffer_number) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_write_buffer_number = jmax_write_buffer_number;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setStatistics
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setStatistics(
+ JNIEnv*, jobject, jlong jhandle, jlong jstatistics_handle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ auto* pSptr =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::StatisticsJni>*>(
+ jstatistics_handle);
+ opt->statistics = *pSptr;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: statistics
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_statistics(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ std::shared_ptr<ROCKSDB_NAMESPACE::Statistics> sptr = opt->statistics;
+ if (sptr == nullptr) {
+ return 0;
+ } else {
+ std::shared_ptr<ROCKSDB_NAMESPACE::Statistics>* pSptr =
+ new std::shared_ptr<ROCKSDB_NAMESPACE::Statistics>(sptr);
+ return reinterpret_cast<jlong>(pSptr);
+ }
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: maxWriteBufferNumber
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_maxWriteBufferNumber(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_write_buffer_number;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: errorIfExists
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_errorIfExists(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->error_if_exists;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setErrorIfExists
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setErrorIfExists(
+ JNIEnv*, jobject, jlong jhandle, jboolean error_if_exists) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->error_if_exists =
+ static_cast<bool>(error_if_exists);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: paranoidChecks
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_paranoidChecks(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->paranoid_checks;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setParanoidChecks
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setParanoidChecks(
+ JNIEnv*, jobject, jlong jhandle, jboolean paranoid_checks) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->paranoid_checks =
+ static_cast<bool>(paranoid_checks);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setEnv
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setEnv(
+ JNIEnv*, jobject, jlong jhandle, jlong jenv) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->env =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jenv);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMaxTotalWalSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setMaxTotalWalSize(
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_total_wal_size) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->max_total_wal_size =
+ static_cast<jlong>(jmax_total_wal_size);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: maxTotalWalSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_maxTotalWalSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_total_wal_size;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: maxOpenFiles
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_maxOpenFiles(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->max_open_files;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMaxOpenFiles
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setMaxOpenFiles(
+ JNIEnv*, jobject, jlong jhandle, jint max_open_files) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->max_open_files =
+ static_cast<int>(max_open_files);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMaxFileOpeningThreads
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setMaxFileOpeningThreads(
+ JNIEnv*, jobject, jlong jhandle, jint jmax_file_opening_threads) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_file_opening_threads = static_cast<int>(jmax_file_opening_threads);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: maxFileOpeningThreads
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_maxFileOpeningThreads(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<int>(opt->max_file_opening_threads);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: useFsync
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_useFsync(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->use_fsync;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setUseFsync
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setUseFsync(
+ JNIEnv*, jobject, jlong jhandle, jboolean use_fsync) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->use_fsync =
+ static_cast<bool>(use_fsync);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setDbPaths
+ * Signature: (J[Ljava/lang/String;[J)V
+ */
+void Java_org_rocksdb_Options_setDbPaths(
+ JNIEnv* env, jobject, jlong jhandle, jobjectArray jpaths,
+ jlongArray jtarget_sizes) {
+ std::vector<ROCKSDB_NAMESPACE::DbPath> db_paths;
+ jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr);
+ if (ptr_jtarget_size == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ jboolean has_exception = JNI_FALSE;
+ const jsize len = env->GetArrayLength(jpaths);
+ for (jsize i = 0; i < len; i++) {
+ jobject jpath =
+ reinterpret_cast<jstring>(env->GetObjectArrayElement(jpaths, i));
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
+ return;
+ }
+ std::string path = ROCKSDB_NAMESPACE::JniUtil::copyStdString(
+ env, static_cast<jstring>(jpath), &has_exception);
+ env->DeleteLocalRef(jpath);
+
+ if (has_exception == JNI_TRUE) {
+ env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
+ return;
+ }
+
+ jlong jtarget_size = ptr_jtarget_size[i];
+
+ db_paths.push_back(
+ ROCKSDB_NAMESPACE::DbPath(path, static_cast<uint64_t>(jtarget_size)));
+ }
+
+ env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
+
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->db_paths = db_paths;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: dbPathsLen
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_dbPathsLen(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jlong>(opt->db_paths.size());
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: dbPaths
+ * Signature: (J[Ljava/lang/String;[J)V
+ */
+void Java_org_rocksdb_Options_dbPaths(
+ JNIEnv* env, jobject, jlong jhandle, jobjectArray jpaths,
+ jlongArray jtarget_sizes) {
+ jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr);
+ if (ptr_jtarget_size == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ const jsize len = env->GetArrayLength(jpaths);
+ for (jsize i = 0; i < len; i++) {
+ ROCKSDB_NAMESPACE::DbPath db_path = opt->db_paths[i];
+
+ jstring jpath = env->NewStringUTF(db_path.path.c_str());
+ if (jpath == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
+ return;
+ }
+ env->SetObjectArrayElement(jpaths, i, jpath);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jpath);
+ env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
+ return;
+ }
+
+ ptr_jtarget_size[i] = static_cast<jint>(db_path.target_size);
+ }
+
+ env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_COMMIT);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: dbLogDir
+ * Signature: (J)Ljava/lang/String
+ */
+jstring Java_org_rocksdb_Options_dbLogDir(
+ JNIEnv* env, jobject, jlong jhandle) {
+ return env->NewStringUTF(
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->db_log_dir.c_str());
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setDbLogDir
+ * Signature: (JLjava/lang/String)V
+ */
+void Java_org_rocksdb_Options_setDbLogDir(
+ JNIEnv* env, jobject, jlong jhandle, jstring jdb_log_dir) {
+ const char* log_dir = env->GetStringUTFChars(jdb_log_dir, nullptr);
+ if (log_dir == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->db_log_dir.assign(
+ log_dir);
+ env->ReleaseStringUTFChars(jdb_log_dir, log_dir);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: walDir
+ * Signature: (J)Ljava/lang/String
+ */
+jstring Java_org_rocksdb_Options_walDir(
+ JNIEnv* env, jobject, jlong jhandle) {
+ return env->NewStringUTF(
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->wal_dir.c_str());
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setWalDir
+ * Signature: (JLjava/lang/String)V
+ */
+void Java_org_rocksdb_Options_setWalDir(
+ JNIEnv* env, jobject, jlong jhandle, jstring jwal_dir) {
+ const char* wal_dir = env->GetStringUTFChars(jwal_dir, nullptr);
+ if (wal_dir == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->wal_dir.assign(
+ wal_dir);
+ env->ReleaseStringUTFChars(jwal_dir, wal_dir);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: deleteObsoleteFilesPeriodMicros
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_deleteObsoleteFilesPeriodMicros(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->delete_obsolete_files_period_micros;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setDeleteObsoleteFilesPeriodMicros
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setDeleteObsoleteFilesPeriodMicros(
+ JNIEnv*, jobject, jlong jhandle, jlong micros) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->delete_obsolete_files_period_micros = static_cast<int64_t>(micros);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setBaseBackgroundCompactions
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setBaseBackgroundCompactions(
+ JNIEnv*, jobject, jlong jhandle, jint max) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->base_background_compactions = static_cast<int>(max);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: baseBackgroundCompactions
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_baseBackgroundCompactions(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->base_background_compactions;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: maxBackgroundCompactions
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_maxBackgroundCompactions(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_background_compactions;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMaxBackgroundCompactions
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setMaxBackgroundCompactions(
+ JNIEnv*, jobject, jlong jhandle, jint max) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_background_compactions = static_cast<int>(max);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMaxSubcompactions
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setMaxSubcompactions(
+ JNIEnv*, jobject, jlong jhandle, jint max) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->max_subcompactions =
+ static_cast<int32_t>(max);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: maxSubcompactions
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_maxSubcompactions(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_subcompactions;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: maxBackgroundFlushes
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_maxBackgroundFlushes(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_background_flushes;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMaxBackgroundFlushes
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setMaxBackgroundFlushes(
+ JNIEnv*, jobject, jlong jhandle, jint max_background_flushes) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_background_flushes = static_cast<int>(max_background_flushes);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: maxBackgroundJobs
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_maxBackgroundJobs(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_background_jobs;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMaxBackgroundJobs
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setMaxBackgroundJobs(
+ JNIEnv*, jobject, jlong jhandle, jint max_background_jobs) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->max_background_jobs =
+ static_cast<int>(max_background_jobs);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: maxLogFileSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_maxLogFileSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_log_file_size;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMaxLogFileSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setMaxLogFileSize(
+ JNIEnv* env, jobject, jlong jhandle, jlong max_log_file_size) {
+ auto s =
+ ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(max_log_file_size);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->max_log_file_size =
+ max_log_file_size;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: logFileTimeToRoll
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_logFileTimeToRoll(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->log_file_time_to_roll;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setLogFileTimeToRoll
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setLogFileTimeToRoll(
+ JNIEnv* env, jobject, jlong jhandle, jlong log_file_time_to_roll) {
+ auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(
+ log_file_time_to_roll);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->log_file_time_to_roll = log_file_time_to_roll;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: keepLogFileNum
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_keepLogFileNum(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->keep_log_file_num;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setKeepLogFileNum
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setKeepLogFileNum(
+ JNIEnv* env, jobject, jlong jhandle, jlong keep_log_file_num) {
+ auto s =
+ ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(keep_log_file_num);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->keep_log_file_num =
+ keep_log_file_num;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: recycleLogFileNum
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_recycleLogFileNum(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->recycle_log_file_num;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setRecycleLogFileNum
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setRecycleLogFileNum(
+ JNIEnv* env, jobject, jlong jhandle, jlong recycle_log_file_num) {
+ auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(
+ recycle_log_file_num);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->recycle_log_file_num = recycle_log_file_num;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: maxManifestFileSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_maxManifestFileSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_manifest_file_size;
+}
+
+/*
+ * Method: memTableFactoryName
+ * Signature: (J)Ljava/lang/String
+ */
+jstring Java_org_rocksdb_Options_memTableFactoryName(
+ JNIEnv* env, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ ROCKSDB_NAMESPACE::MemTableRepFactory* tf = opt->memtable_factory.get();
+
+ // Should never be nullptr.
+ // Default memtable factory is SkipListFactory
+ assert(tf);
+
+ // temporarly fix for the historical typo
+ if (strcmp(tf->Name(), "HashLinkListRepFactory") == 0) {
+ return env->NewStringUTF("HashLinkedListRepFactory");
+ }
+
+ return env->NewStringUTF(tf->Name());
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMaxManifestFileSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setMaxManifestFileSize(
+ JNIEnv*, jobject, jlong jhandle, jlong max_manifest_file_size) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_manifest_file_size = static_cast<int64_t>(max_manifest_file_size);
+}
+
+/*
+ * Method: setMemTableFactory
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setMemTableFactory(
+ JNIEnv*, jobject, jlong jhandle, jlong jfactory_handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->memtable_factory.reset(
+ reinterpret_cast<ROCKSDB_NAMESPACE::MemTableRepFactory*>(
+ jfactory_handle));
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setRateLimiter
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setRateLimiter(
+ JNIEnv*, jobject, jlong jhandle, jlong jrate_limiter_handle) {
+ std::shared_ptr<ROCKSDB_NAMESPACE::RateLimiter>* pRateLimiter =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::RateLimiter>*>(
+ jrate_limiter_handle);
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->rate_limiter =
+ *pRateLimiter;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setSstFileManager
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setSstFileManager(
+ JNIEnv*, jobject, jlong jhandle, jlong jsst_file_manager_handle) {
+ auto* sptr_sst_file_manager =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::SstFileManager>*>(
+ jsst_file_manager_handle);
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->sst_file_manager =
+ *sptr_sst_file_manager;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setLogger
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setLogger(
+ JNIEnv*, jobject, jlong jhandle, jlong jlogger_handle) {
+ std::shared_ptr<ROCKSDB_NAMESPACE::LoggerJniCallback>* pLogger =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::LoggerJniCallback>*>(
+ jlogger_handle);
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->info_log = *pLogger;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setInfoLogLevel
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_Options_setInfoLogLevel(
+ JNIEnv*, jobject, jlong jhandle, jbyte jlog_level) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->info_log_level =
+ static_cast<ROCKSDB_NAMESPACE::InfoLogLevel>(jlog_level);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: infoLogLevel
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_Options_infoLogLevel(
+ JNIEnv*, jobject, jlong jhandle) {
+ return static_cast<jbyte>(
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->info_log_level);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: tableCacheNumshardbits
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_tableCacheNumshardbits(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->table_cache_numshardbits;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setTableCacheNumshardbits
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setTableCacheNumshardbits(
+ JNIEnv*, jobject, jlong jhandle, jint table_cache_numshardbits) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->table_cache_numshardbits = static_cast<int>(table_cache_numshardbits);
+}
+
+/*
+ * Method: useFixedLengthPrefixExtractor
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_useFixedLengthPrefixExtractor(
+ JNIEnv*, jobject, jlong jhandle, jint jprefix_length) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->prefix_extractor.reset(ROCKSDB_NAMESPACE::NewFixedPrefixTransform(
+ static_cast<int>(jprefix_length)));
+}
+
+/*
+ * Method: useCappedPrefixExtractor
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_useCappedPrefixExtractor(
+ JNIEnv*, jobject, jlong jhandle, jint jprefix_length) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->prefix_extractor.reset(ROCKSDB_NAMESPACE::NewCappedPrefixTransform(
+ static_cast<int>(jprefix_length)));
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: walTtlSeconds
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_walTtlSeconds(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->WAL_ttl_seconds;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setWalTtlSeconds
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setWalTtlSeconds(
+ JNIEnv*, jobject, jlong jhandle, jlong WAL_ttl_seconds) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->WAL_ttl_seconds =
+ static_cast<int64_t>(WAL_ttl_seconds);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: walTtlSeconds
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_walSizeLimitMB(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->WAL_size_limit_MB;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setWalSizeLimitMB
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setWalSizeLimitMB(
+ JNIEnv*, jobject, jlong jhandle, jlong WAL_size_limit_MB) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->WAL_size_limit_MB =
+ static_cast<int64_t>(WAL_size_limit_MB);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: manifestPreallocationSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_manifestPreallocationSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->manifest_preallocation_size;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setManifestPreallocationSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setManifestPreallocationSize(
+ JNIEnv* env, jobject, jlong jhandle, jlong preallocation_size) {
+ auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(
+ preallocation_size);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->manifest_preallocation_size = preallocation_size;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Method: setTableFactory
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setTableFactory(
+ JNIEnv*, jobject, jlong jhandle, jlong jtable_factory_handle) {
+ auto* options = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ auto* table_factory =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TableFactory*>(jtable_factory_handle);
+ options->table_factory.reset(table_factory);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: allowMmapReads
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_allowMmapReads(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->allow_mmap_reads;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setAllowMmapReads
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setAllowMmapReads(
+ JNIEnv*, jobject, jlong jhandle, jboolean allow_mmap_reads) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->allow_mmap_reads =
+ static_cast<bool>(allow_mmap_reads);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: allowMmapWrites
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_allowMmapWrites(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->allow_mmap_writes;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setAllowMmapWrites
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setAllowMmapWrites(
+ JNIEnv*, jobject, jlong jhandle, jboolean allow_mmap_writes) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->allow_mmap_writes =
+ static_cast<bool>(allow_mmap_writes);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: useDirectReads
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_useDirectReads(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->use_direct_reads;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setUseDirectReads
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setUseDirectReads(
+ JNIEnv*, jobject, jlong jhandle, jboolean use_direct_reads) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->use_direct_reads =
+ static_cast<bool>(use_direct_reads);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: useDirectIoForFlushAndCompaction
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_useDirectIoForFlushAndCompaction(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->use_direct_io_for_flush_and_compaction;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setUseDirectIoForFlushAndCompaction
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setUseDirectIoForFlushAndCompaction(
+ JNIEnv*, jobject, jlong jhandle,
+ jboolean use_direct_io_for_flush_and_compaction) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->use_direct_io_for_flush_and_compaction =
+ static_cast<bool>(use_direct_io_for_flush_and_compaction);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setAllowFAllocate
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setAllowFAllocate(
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_fallocate) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->allow_fallocate =
+ static_cast<bool>(jallow_fallocate);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: allowFAllocate
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_allowFAllocate(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jboolean>(opt->allow_fallocate);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: isFdCloseOnExec
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_isFdCloseOnExec(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->is_fd_close_on_exec;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setIsFdCloseOnExec
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setIsFdCloseOnExec(
+ JNIEnv*, jobject, jlong jhandle, jboolean is_fd_close_on_exec) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->is_fd_close_on_exec =
+ static_cast<bool>(is_fd_close_on_exec);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: statsDumpPeriodSec
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_statsDumpPeriodSec(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->stats_dump_period_sec;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setStatsDumpPeriodSec
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setStatsDumpPeriodSec(
+ JNIEnv*, jobject, jlong jhandle,
+ jint jstats_dump_period_sec) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->stats_dump_period_sec =
+ static_cast<unsigned int>(jstats_dump_period_sec);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: statsPersistPeriodSec
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_statsPersistPeriodSec(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->stats_persist_period_sec;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setStatsPersistPeriodSec
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setStatsPersistPeriodSec(
+ JNIEnv*, jobject, jlong jhandle, jint jstats_persist_period_sec) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->stats_persist_period_sec =
+ static_cast<unsigned int>(jstats_persist_period_sec);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: statsHistoryBufferSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_statsHistoryBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->stats_history_buffer_size;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setStatsHistoryBufferSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setStatsHistoryBufferSize(
+ JNIEnv*, jobject, jlong jhandle, jlong jstats_history_buffer_size) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->stats_history_buffer_size =
+ static_cast<size_t>(jstats_history_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: adviseRandomOnOpen
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_adviseRandomOnOpen(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->advise_random_on_open;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setAdviseRandomOnOpen
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setAdviseRandomOnOpen(
+ JNIEnv*, jobject, jlong jhandle,
+ jboolean advise_random_on_open) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->advise_random_on_open = static_cast<bool>(advise_random_on_open);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setDbWriteBufferSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setDbWriteBufferSize(
+ JNIEnv*, jobject, jlong jhandle,
+ jlong jdb_write_buffer_size) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->db_write_buffer_size = static_cast<size_t>(jdb_write_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: dbWriteBufferSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_dbWriteBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jlong>(opt->db_write_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setAccessHintOnCompactionStart
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_Options_setAccessHintOnCompactionStart(
+ JNIEnv*, jobject, jlong jhandle,
+ jbyte jaccess_hint_value) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->access_hint_on_compaction_start =
+ ROCKSDB_NAMESPACE::AccessHintJni::toCppAccessHint(jaccess_hint_value);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: accessHintOnCompactionStart
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_Options_accessHintOnCompactionStart(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return ROCKSDB_NAMESPACE::AccessHintJni::toJavaAccessHint(
+ opt->access_hint_on_compaction_start);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setNewTableReaderForCompactionInputs
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setNewTableReaderForCompactionInputs(
+ JNIEnv*, jobject, jlong jhandle,
+ jboolean jnew_table_reader_for_compaction_inputs) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->new_table_reader_for_compaction_inputs =
+ static_cast<bool>(jnew_table_reader_for_compaction_inputs);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: newTableReaderForCompactionInputs
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_newTableReaderForCompactionInputs(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<bool>(opt->new_table_reader_for_compaction_inputs);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setCompactionReadaheadSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setCompactionReadaheadSize(
+ JNIEnv*, jobject, jlong jhandle,
+ jlong jcompaction_readahead_size) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->compaction_readahead_size =
+ static_cast<size_t>(jcompaction_readahead_size);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: compactionReadaheadSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_compactionReadaheadSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jlong>(opt->compaction_readahead_size);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setRandomAccessMaxBufferSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setRandomAccessMaxBufferSize(
+ JNIEnv*, jobject, jlong jhandle, jlong jrandom_access_max_buffer_size) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->random_access_max_buffer_size =
+ static_cast<size_t>(jrandom_access_max_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: randomAccessMaxBufferSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_randomAccessMaxBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jlong>(opt->random_access_max_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setWritableFileMaxBufferSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setWritableFileMaxBufferSize(
+ JNIEnv*, jobject, jlong jhandle,
+ jlong jwritable_file_max_buffer_size) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->writable_file_max_buffer_size =
+ static_cast<size_t>(jwritable_file_max_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: writableFileMaxBufferSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_writableFileMaxBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jlong>(opt->writable_file_max_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: useAdaptiveMutex
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_useAdaptiveMutex(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->use_adaptive_mutex;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setUseAdaptiveMutex
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setUseAdaptiveMutex(
+ JNIEnv*, jobject, jlong jhandle, jboolean use_adaptive_mutex) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->use_adaptive_mutex =
+ static_cast<bool>(use_adaptive_mutex);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: bytesPerSync
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_bytesPerSync(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->bytes_per_sync;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setBytesPerSync
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setBytesPerSync(
+ JNIEnv*, jobject, jlong jhandle, jlong bytes_per_sync) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->bytes_per_sync =
+ static_cast<int64_t>(bytes_per_sync);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setWalBytesPerSync
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setWalBytesPerSync(
+ JNIEnv*, jobject, jlong jhandle, jlong jwal_bytes_per_sync) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->wal_bytes_per_sync =
+ static_cast<int64_t>(jwal_bytes_per_sync);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: walBytesPerSync
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_walBytesPerSync(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jlong>(opt->wal_bytes_per_sync);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setStrictBytesPerSync
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setStrictBytesPerSync(
+ JNIEnv*, jobject, jlong jhandle, jboolean jstrict_bytes_per_sync) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->strict_bytes_per_sync = jstrict_bytes_per_sync == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: strictBytesPerSync
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_strictBytesPerSync(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jboolean>(opt->strict_bytes_per_sync);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setEnableThreadTracking
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setEnableThreadTracking(
+ JNIEnv*, jobject, jlong jhandle, jboolean jenable_thread_tracking) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->enable_thread_tracking = static_cast<bool>(jenable_thread_tracking);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: enableThreadTracking
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_enableThreadTracking(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jboolean>(opt->enable_thread_tracking);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setDelayedWriteRate
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setDelayedWriteRate(
+ JNIEnv*, jobject, jlong jhandle, jlong jdelayed_write_rate) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->delayed_write_rate = static_cast<uint64_t>(jdelayed_write_rate);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: delayedWriteRate
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_delayedWriteRate(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jlong>(opt->delayed_write_rate);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setEnablePipelinedWrite
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setEnablePipelinedWrite(
+ JNIEnv*, jobject, jlong jhandle, jboolean jenable_pipelined_write) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->enable_pipelined_write = jenable_pipelined_write == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: enablePipelinedWrite
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_enablePipelinedWrite(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jboolean>(opt->enable_pipelined_write);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setUnorderedWrite
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setUnorderedWrite(
+ JNIEnv*, jobject, jlong jhandle, jboolean unordered_write) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->unordered_write =
+ static_cast<bool>(unordered_write);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: unorderedWrite
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_unorderedWrite(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->unordered_write;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setAllowConcurrentMemtableWrite
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setAllowConcurrentMemtableWrite(
+ JNIEnv*, jobject, jlong jhandle, jboolean allow) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->allow_concurrent_memtable_write = static_cast<bool>(allow);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: allowConcurrentMemtableWrite
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_allowConcurrentMemtableWrite(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->allow_concurrent_memtable_write;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setEnableWriteThreadAdaptiveYield
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setEnableWriteThreadAdaptiveYield(
+ JNIEnv*, jobject, jlong jhandle, jboolean yield) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->enable_write_thread_adaptive_yield = static_cast<bool>(yield);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: enableWriteThreadAdaptiveYield
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_enableWriteThreadAdaptiveYield(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->enable_write_thread_adaptive_yield;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setWriteThreadMaxYieldUsec
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setWriteThreadMaxYieldUsec(
+ JNIEnv*, jobject, jlong jhandle, jlong max) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->write_thread_max_yield_usec = static_cast<int64_t>(max);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: writeThreadMaxYieldUsec
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_writeThreadMaxYieldUsec(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->write_thread_max_yield_usec;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setWriteThreadSlowYieldUsec
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setWriteThreadSlowYieldUsec(
+ JNIEnv*, jobject, jlong jhandle, jlong slow) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->write_thread_slow_yield_usec = static_cast<int64_t>(slow);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: writeThreadSlowYieldUsec
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_writeThreadSlowYieldUsec(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->write_thread_slow_yield_usec;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setSkipStatsUpdateOnDbOpen
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setSkipStatsUpdateOnDbOpen(
+ JNIEnv*, jobject, jlong jhandle,
+ jboolean jskip_stats_update_on_db_open) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->skip_stats_update_on_db_open =
+ static_cast<bool>(jskip_stats_update_on_db_open);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: skipStatsUpdateOnDbOpen
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_skipStatsUpdateOnDbOpen(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jboolean>(opt->skip_stats_update_on_db_open);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setSkipCheckingSstFileSizesOnDbOpen
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setSkipCheckingSstFileSizesOnDbOpen(
+ JNIEnv*, jobject, jlong jhandle,
+ jboolean jskip_checking_sst_file_sizes_on_db_open) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->skip_checking_sst_file_sizes_on_db_open =
+ static_cast<bool>(jskip_checking_sst_file_sizes_on_db_open);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: skipCheckingSstFileSizesOnDbOpen
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_skipCheckingSstFileSizesOnDbOpen(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jboolean>(opt->skip_checking_sst_file_sizes_on_db_open);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setWalRecoveryMode
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_Options_setWalRecoveryMode(
+ JNIEnv*, jobject, jlong jhandle,
+ jbyte jwal_recovery_mode_value) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->wal_recovery_mode =
+ ROCKSDB_NAMESPACE::WALRecoveryModeJni::toCppWALRecoveryMode(
+ jwal_recovery_mode_value);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: walRecoveryMode
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_Options_walRecoveryMode(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return ROCKSDB_NAMESPACE::WALRecoveryModeJni::toJavaWALRecoveryMode(
+ opt->wal_recovery_mode);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setAllow2pc
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setAllow2pc(
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_2pc) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->allow_2pc = static_cast<bool>(jallow_2pc);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: allow2pc
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_allow2pc(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jboolean>(opt->allow_2pc);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setRowCache
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setRowCache(
+ JNIEnv*, jobject, jlong jhandle, jlong jrow_cache_handle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ auto* row_cache =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Cache>*>(
+ jrow_cache_handle);
+ opt->row_cache = *row_cache;
+}
+
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setWalFilter
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setWalFilter(
+ JNIEnv*, jobject, jlong jhandle, jlong jwal_filter_handle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ auto* wal_filter = reinterpret_cast<ROCKSDB_NAMESPACE::WalFilterJniCallback*>(
+ jwal_filter_handle);
+ opt->wal_filter = wal_filter;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setFailIfOptionsFileError
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setFailIfOptionsFileError(
+ JNIEnv*, jobject, jlong jhandle, jboolean jfail_if_options_file_error) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->fail_if_options_file_error =
+ static_cast<bool>(jfail_if_options_file_error);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: failIfOptionsFileError
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_failIfOptionsFileError(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jboolean>(opt->fail_if_options_file_error);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setDumpMallocStats
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setDumpMallocStats(
+ JNIEnv*, jobject, jlong jhandle, jboolean jdump_malloc_stats) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->dump_malloc_stats = static_cast<bool>(jdump_malloc_stats);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: dumpMallocStats
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_dumpMallocStats(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jboolean>(opt->dump_malloc_stats);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setAvoidFlushDuringRecovery
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setAvoidFlushDuringRecovery(
+ JNIEnv*, jobject, jlong jhandle, jboolean javoid_flush_during_recovery) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->avoid_flush_during_recovery =
+ static_cast<bool>(javoid_flush_during_recovery);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: avoidFlushDuringRecovery
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_avoidFlushDuringRecovery(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jboolean>(opt->avoid_flush_during_recovery);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setAvoidFlushDuringShutdown
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setAvoidFlushDuringShutdown(
+ JNIEnv*, jobject, jlong jhandle, jboolean javoid_flush_during_shutdown) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->avoid_flush_during_shutdown =
+ static_cast<bool>(javoid_flush_during_shutdown);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: avoidFlushDuringShutdown
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_avoidFlushDuringShutdown(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jboolean>(opt->avoid_flush_during_shutdown);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setAllowIngestBehind
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setAllowIngestBehind(
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_ingest_behind) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->allow_ingest_behind = jallow_ingest_behind == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: allowIngestBehind
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_allowIngestBehind(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jboolean>(opt->allow_ingest_behind);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setPreserveDeletes
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setPreserveDeletes(
+ JNIEnv*, jobject, jlong jhandle, jboolean jpreserve_deletes) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->preserve_deletes = jpreserve_deletes == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: preserveDeletes
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_preserveDeletes(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jboolean>(opt->preserve_deletes);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setTwoWriteQueues
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setTwoWriteQueues(
+ JNIEnv*, jobject, jlong jhandle, jboolean jtwo_write_queues) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->two_write_queues = jtwo_write_queues == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: twoWriteQueues
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_twoWriteQueues(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jboolean>(opt->two_write_queues);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setManualWalFlush
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setManualWalFlush(
+ JNIEnv*, jobject, jlong jhandle, jboolean jmanual_wal_flush) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->manual_wal_flush = jmanual_wal_flush == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: manualWalFlush
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_manualWalFlush(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jboolean>(opt->manual_wal_flush);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setAtomicFlush
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setAtomicFlush(
+ JNIEnv*, jobject, jlong jhandle, jboolean jatomic_flush) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->atomic_flush = jatomic_flush == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: atomicFlush
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_atomicFlush(
+ JNIEnv *, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jboolean>(opt->atomic_flush);
+}
+
+/*
+ * Method: tableFactoryName
+ * Signature: (J)Ljava/lang/String
+ */
+jstring Java_org_rocksdb_Options_tableFactoryName(
+ JNIEnv* env, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ ROCKSDB_NAMESPACE::TableFactory* tf = opt->table_factory.get();
+
+ // Should never be nullptr.
+ // Default memtable factory is SkipListFactory
+ assert(tf);
+
+ return env->NewStringUTF(tf->Name());
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: minWriteBufferNumberToMerge
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_minWriteBufferNumberToMerge(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->min_write_buffer_number_to_merge;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMinWriteBufferNumberToMerge
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setMinWriteBufferNumberToMerge(
+ JNIEnv*, jobject, jlong jhandle, jint jmin_write_buffer_number_to_merge) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->min_write_buffer_number_to_merge =
+ static_cast<int>(jmin_write_buffer_number_to_merge);
+}
+/*
+ * Class: org_rocksdb_Options
+ * Method: maxWriteBufferNumberToMaintain
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_maxWriteBufferNumberToMaintain(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_write_buffer_number_to_maintain;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMaxWriteBufferNumberToMaintain
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setMaxWriteBufferNumberToMaintain(
+ JNIEnv*, jobject, jlong jhandle,
+ jint jmax_write_buffer_number_to_maintain) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_write_buffer_number_to_maintain =
+ static_cast<int>(jmax_write_buffer_number_to_maintain);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setCompressionType
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_Options_setCompressionType(
+ JNIEnv*, jobject, jlong jhandle, jbyte jcompression_type_value) {
+ auto* opts = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opts->compression =
+ ROCKSDB_NAMESPACE::CompressionTypeJni::toCppCompressionType(
+ jcompression_type_value);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: compressionType
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_Options_compressionType(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opts = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return ROCKSDB_NAMESPACE::CompressionTypeJni::toJavaCompressionType(
+ opts->compression);
+}
+
+/**
+ * Helper method to convert a Java byte array of compression levels
+ * to a C++ vector of ROCKSDB_NAMESPACE::CompressionType
+ *
+ * @param env A pointer to the Java environment
+ * @param jcompression_levels A reference to a java byte array
+ * where each byte indicates a compression level
+ *
+ * @return A std::unique_ptr to the vector, or std::unique_ptr(nullptr) if a JNI
+ * exception occurs
+ */
+std::unique_ptr<std::vector<ROCKSDB_NAMESPACE::CompressionType>>
+rocksdb_compression_vector_helper(JNIEnv* env, jbyteArray jcompression_levels) {
+ jsize len = env->GetArrayLength(jcompression_levels);
+ jbyte* jcompression_level =
+ env->GetByteArrayElements(jcompression_levels, nullptr);
+ if (jcompression_level == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return std::unique_ptr<std::vector<ROCKSDB_NAMESPACE::CompressionType>>();
+ }
+
+ auto* compression_levels =
+ new std::vector<ROCKSDB_NAMESPACE::CompressionType>();
+ std::unique_ptr<std::vector<ROCKSDB_NAMESPACE::CompressionType>>
+ uptr_compression_levels(compression_levels);
+
+ for (jsize i = 0; i < len; i++) {
+ jbyte jcl = jcompression_level[i];
+ compression_levels->push_back(
+ static_cast<ROCKSDB_NAMESPACE::CompressionType>(jcl));
+ }
+
+ env->ReleaseByteArrayElements(jcompression_levels, jcompression_level,
+ JNI_ABORT);
+
+ return uptr_compression_levels;
+}
+
+/**
+ * Helper method to convert a C++ vector of ROCKSDB_NAMESPACE::CompressionType
+ * to a Java byte array of compression levels
+ *
+ * @param env A pointer to the Java environment
+ * @param jcompression_levels A reference to a java byte array
+ * where each byte indicates a compression level
+ *
+ * @return A jbytearray or nullptr if an exception occurs
+ */
+jbyteArray rocksdb_compression_list_helper(
+ JNIEnv* env,
+ std::vector<ROCKSDB_NAMESPACE::CompressionType> compression_levels) {
+ const size_t len = compression_levels.size();
+ jbyte* jbuf = new jbyte[len];
+
+ for (size_t i = 0; i < len; i++) {
+ jbuf[i] = compression_levels[i];
+ }
+
+ // insert in java array
+ jbyteArray jcompression_levels = env->NewByteArray(static_cast<jsize>(len));
+ if (jcompression_levels == nullptr) {
+ // exception thrown: OutOfMemoryError
+ delete[] jbuf;
+ return nullptr;
+ }
+ env->SetByteArrayRegion(jcompression_levels, 0, static_cast<jsize>(len),
+ jbuf);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jcompression_levels);
+ delete[] jbuf;
+ return nullptr;
+ }
+
+ delete[] jbuf;
+
+ return jcompression_levels;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setCompressionPerLevel
+ * Signature: (J[B)V
+ */
+void Java_org_rocksdb_Options_setCompressionPerLevel(
+ JNIEnv* env, jobject, jlong jhandle, jbyteArray jcompressionLevels) {
+ auto uptr_compression_levels =
+ rocksdb_compression_vector_helper(env, jcompressionLevels);
+ if (!uptr_compression_levels) {
+ // exception occurred
+ return;
+ }
+ auto* options = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ options->compression_per_level = *(uptr_compression_levels.get());
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: compressionPerLevel
+ * Signature: (J)[B
+ */
+jbyteArray Java_org_rocksdb_Options_compressionPerLevel(
+ JNIEnv* env, jobject, jlong jhandle) {
+ auto* options = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return rocksdb_compression_list_helper(env, options->compression_per_level);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setBottommostCompressionType
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_Options_setBottommostCompressionType(
+ JNIEnv*, jobject, jlong jhandle, jbyte jcompression_type_value) {
+ auto* options = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ options->bottommost_compression =
+ ROCKSDB_NAMESPACE::CompressionTypeJni::toCppCompressionType(
+ jcompression_type_value);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: bottommostCompressionType
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_Options_bottommostCompressionType(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* options = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return ROCKSDB_NAMESPACE::CompressionTypeJni::toJavaCompressionType(
+ options->bottommost_compression);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setBottommostCompressionOptions
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setBottommostCompressionOptions(
+ JNIEnv*, jobject, jlong jhandle,
+ jlong jbottommost_compression_options_handle) {
+ auto* options = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ auto* bottommost_compression_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompressionOptions*>(
+ jbottommost_compression_options_handle);
+ options->bottommost_compression_opts = *bottommost_compression_options;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setCompressionOptions
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setCompressionOptions(
+ JNIEnv*, jobject, jlong jhandle, jlong jcompression_options_handle) {
+ auto* options = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ auto* compression_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompressionOptions*>(
+ jcompression_options_handle);
+ options->compression_opts = *compression_options;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setCompactionStyle
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_Options_setCompactionStyle(
+ JNIEnv*, jobject, jlong jhandle, jbyte jcompaction_style) {
+ auto* options = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ options->compaction_style =
+ ROCKSDB_NAMESPACE::CompactionStyleJni::toCppCompactionStyle(
+ jcompaction_style);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: compactionStyle
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_Options_compactionStyle(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* options = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return ROCKSDB_NAMESPACE::CompactionStyleJni::toJavaCompactionStyle(
+ options->compaction_style);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMaxTableFilesSizeFIFO
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setMaxTableFilesSizeFIFO(
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_table_files_size) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->compaction_options_fifo.max_table_files_size =
+ static_cast<uint64_t>(jmax_table_files_size);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: maxTableFilesSizeFIFO
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_maxTableFilesSizeFIFO(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->compaction_options_fifo.max_table_files_size;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: numLevels
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_numLevels(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->num_levels;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setNumLevels
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setNumLevels(
+ JNIEnv*, jobject, jlong jhandle, jint jnum_levels) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->num_levels =
+ static_cast<int>(jnum_levels);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: levelZeroFileNumCompactionTrigger
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_levelZeroFileNumCompactionTrigger(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->level0_file_num_compaction_trigger;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setLevelZeroFileNumCompactionTrigger
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setLevelZeroFileNumCompactionTrigger(
+ JNIEnv*, jobject, jlong jhandle,
+ jint jlevel0_file_num_compaction_trigger) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->level0_file_num_compaction_trigger =
+ static_cast<int>(jlevel0_file_num_compaction_trigger);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: levelZeroSlowdownWritesTrigger
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_levelZeroSlowdownWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->level0_slowdown_writes_trigger;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setLevelSlowdownWritesTrigger
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setLevelZeroSlowdownWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle, jint jlevel0_slowdown_writes_trigger) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->level0_slowdown_writes_trigger =
+ static_cast<int>(jlevel0_slowdown_writes_trigger);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: levelZeroStopWritesTrigger
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_levelZeroStopWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->level0_stop_writes_trigger;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setLevelStopWritesTrigger
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setLevelZeroStopWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle, jint jlevel0_stop_writes_trigger) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->level0_stop_writes_trigger =
+ static_cast<int>(jlevel0_stop_writes_trigger);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: targetFileSizeBase
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_targetFileSizeBase(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->target_file_size_base;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setTargetFileSizeBase
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setTargetFileSizeBase(
+ JNIEnv*, jobject, jlong jhandle, jlong jtarget_file_size_base) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->target_file_size_base = static_cast<uint64_t>(jtarget_file_size_base);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: targetFileSizeMultiplier
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_targetFileSizeMultiplier(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->target_file_size_multiplier;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setTargetFileSizeMultiplier
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setTargetFileSizeMultiplier(
+ JNIEnv*, jobject, jlong jhandle, jint jtarget_file_size_multiplier) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->target_file_size_multiplier =
+ static_cast<int>(jtarget_file_size_multiplier);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: maxBytesForLevelBase
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_maxBytesForLevelBase(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_bytes_for_level_base;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMaxBytesForLevelBase
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setMaxBytesForLevelBase(
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_bytes_for_level_base) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_bytes_for_level_base =
+ static_cast<int64_t>(jmax_bytes_for_level_base);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: levelCompactionDynamicLevelBytes
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_levelCompactionDynamicLevelBytes(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->level_compaction_dynamic_level_bytes;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setLevelCompactionDynamicLevelBytes
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setLevelCompactionDynamicLevelBytes(
+ JNIEnv*, jobject, jlong jhandle, jboolean jenable_dynamic_level_bytes) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->level_compaction_dynamic_level_bytes = (jenable_dynamic_level_bytes);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: maxBytesForLevelMultiplier
+ * Signature: (J)D
+ */
+jdouble Java_org_rocksdb_Options_maxBytesForLevelMultiplier(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_bytes_for_level_multiplier;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMaxBytesForLevelMultiplier
+ * Signature: (JD)V
+ */
+void Java_org_rocksdb_Options_setMaxBytesForLevelMultiplier(
+ JNIEnv*, jobject, jlong jhandle, jdouble jmax_bytes_for_level_multiplier) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_bytes_for_level_multiplier =
+ static_cast<double>(jmax_bytes_for_level_multiplier);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: maxCompactionBytes
+ * Signature: (J)I
+ */
+jlong Java_org_rocksdb_Options_maxCompactionBytes(
+ JNIEnv*, jobject, jlong jhandle) {
+ return static_cast<jlong>(
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_compaction_bytes);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMaxCompactionBytes
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setMaxCompactionBytes(
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_compaction_bytes) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->max_compaction_bytes =
+ static_cast<uint64_t>(jmax_compaction_bytes);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: arenaBlockSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_arenaBlockSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->arena_block_size;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setArenaBlockSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setArenaBlockSize(
+ JNIEnv* env, jobject, jlong jhandle, jlong jarena_block_size) {
+ auto s =
+ ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(jarena_block_size);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->arena_block_size =
+ jarena_block_size;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: disableAutoCompactions
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_disableAutoCompactions(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->disable_auto_compactions;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setDisableAutoCompactions
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setDisableAutoCompactions(
+ JNIEnv*, jobject, jlong jhandle, jboolean jdisable_auto_compactions) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->disable_auto_compactions = static_cast<bool>(jdisable_auto_compactions);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: maxSequentialSkipInIterations
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_maxSequentialSkipInIterations(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_sequential_skip_in_iterations;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMaxSequentialSkipInIterations
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setMaxSequentialSkipInIterations(
+ JNIEnv*, jobject, jlong jhandle,
+ jlong jmax_sequential_skip_in_iterations) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_sequential_skip_in_iterations =
+ static_cast<int64_t>(jmax_sequential_skip_in_iterations);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: inplaceUpdateSupport
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_inplaceUpdateSupport(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->inplace_update_support;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setInplaceUpdateSupport
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setInplaceUpdateSupport(
+ JNIEnv*, jobject, jlong jhandle, jboolean jinplace_update_support) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->inplace_update_support = static_cast<bool>(jinplace_update_support);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: inplaceUpdateNumLocks
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_inplaceUpdateNumLocks(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->inplace_update_num_locks;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setInplaceUpdateNumLocks
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setInplaceUpdateNumLocks(
+ JNIEnv* env, jobject, jlong jhandle, jlong jinplace_update_num_locks) {
+ auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(
+ jinplace_update_num_locks);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->inplace_update_num_locks = jinplace_update_num_locks;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: memtablePrefixBloomSizeRatio
+ * Signature: (J)I
+ */
+jdouble Java_org_rocksdb_Options_memtablePrefixBloomSizeRatio(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->memtable_prefix_bloom_size_ratio;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMemtablePrefixBloomSizeRatio
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setMemtablePrefixBloomSizeRatio(
+ JNIEnv*, jobject, jlong jhandle,
+ jdouble jmemtable_prefix_bloom_size_ratio) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->memtable_prefix_bloom_size_ratio =
+ static_cast<double>(jmemtable_prefix_bloom_size_ratio);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: bloomLocality
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_bloomLocality(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->bloom_locality;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setBloomLocality
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setBloomLocality(
+ JNIEnv*, jobject, jlong jhandle, jint jbloom_locality) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->bloom_locality =
+ static_cast<int32_t>(jbloom_locality);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: maxSuccessiveMerges
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_maxSuccessiveMerges(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_successive_merges;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMaxSuccessiveMerges
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setMaxSuccessiveMerges(
+ JNIEnv* env, jobject, jlong jhandle, jlong jmax_successive_merges) {
+ auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(
+ jmax_successive_merges);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_successive_merges = jmax_successive_merges;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: optimizeFiltersForHits
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_optimizeFiltersForHits(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->optimize_filters_for_hits;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setOptimizeFiltersForHits
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setOptimizeFiltersForHits(
+ JNIEnv*, jobject, jlong jhandle, jboolean joptimize_filters_for_hits) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->optimize_filters_for_hits =
+ static_cast<bool>(joptimize_filters_for_hits);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: optimizeForSmallDb
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_Options_optimizeForSmallDb(
+ JNIEnv*, jobject, jlong jhandle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->OptimizeForSmallDb();
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: optimizeForPointLookup
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_optimizeForPointLookup(
+ JNIEnv*, jobject, jlong jhandle, jlong block_cache_size_mb) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->OptimizeForPointLookup(block_cache_size_mb);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: optimizeLevelStyleCompaction
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_optimizeLevelStyleCompaction(
+ JNIEnv*, jobject, jlong jhandle, jlong memtable_memory_budget) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->OptimizeLevelStyleCompaction(memtable_memory_budget);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: optimizeUniversalStyleCompaction
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_optimizeUniversalStyleCompaction(
+ JNIEnv*, jobject, jlong jhandle, jlong memtable_memory_budget) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->OptimizeUniversalStyleCompaction(memtable_memory_budget);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: prepareForBulkLoad
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_Options_prepareForBulkLoad(
+ JNIEnv*, jobject, jlong jhandle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->PrepareForBulkLoad();
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: memtableHugePageSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_memtableHugePageSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->memtable_huge_page_size;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMemtableHugePageSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setMemtableHugePageSize(
+ JNIEnv* env, jobject, jlong jhandle, jlong jmemtable_huge_page_size) {
+ auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(
+ jmemtable_huge_page_size);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->memtable_huge_page_size = jmemtable_huge_page_size;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: softPendingCompactionBytesLimit
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_softPendingCompactionBytesLimit(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->soft_pending_compaction_bytes_limit;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setSoftPendingCompactionBytesLimit
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setSoftPendingCompactionBytesLimit(
+ JNIEnv*, jobject, jlong jhandle,
+ jlong jsoft_pending_compaction_bytes_limit) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->soft_pending_compaction_bytes_limit =
+ static_cast<int64_t>(jsoft_pending_compaction_bytes_limit);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: softHardCompactionBytesLimit
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_hardPendingCompactionBytesLimit(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->hard_pending_compaction_bytes_limit;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setHardPendingCompactionBytesLimit
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setHardPendingCompactionBytesLimit(
+ JNIEnv*, jobject, jlong jhandle,
+ jlong jhard_pending_compaction_bytes_limit) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->hard_pending_compaction_bytes_limit =
+ static_cast<int64_t>(jhard_pending_compaction_bytes_limit);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: level0FileNumCompactionTrigger
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_level0FileNumCompactionTrigger(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->level0_file_num_compaction_trigger;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setLevel0FileNumCompactionTrigger
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setLevel0FileNumCompactionTrigger(
+ JNIEnv*, jobject, jlong jhandle,
+ jint jlevel0_file_num_compaction_trigger) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->level0_file_num_compaction_trigger =
+ static_cast<int32_t>(jlevel0_file_num_compaction_trigger);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: level0SlowdownWritesTrigger
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_level0SlowdownWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->level0_slowdown_writes_trigger;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setLevel0SlowdownWritesTrigger
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setLevel0SlowdownWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle, jint jlevel0_slowdown_writes_trigger) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->level0_slowdown_writes_trigger =
+ static_cast<int32_t>(jlevel0_slowdown_writes_trigger);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: level0StopWritesTrigger
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_Options_level0StopWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->level0_stop_writes_trigger;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setLevel0StopWritesTrigger
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Options_setLevel0StopWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle, jint jlevel0_stop_writes_trigger) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->level0_stop_writes_trigger =
+ static_cast<int32_t>(jlevel0_stop_writes_trigger);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: maxBytesForLevelMultiplierAdditional
+ * Signature: (J)[I
+ */
+jintArray Java_org_rocksdb_Options_maxBytesForLevelMultiplierAdditional(
+ JNIEnv* env, jobject, jlong jhandle) {
+ auto mbflma = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->max_bytes_for_level_multiplier_additional;
+
+ const size_t size = mbflma.size();
+
+ jint* additionals = new jint[size];
+ for (size_t i = 0; i < size; i++) {
+ additionals[i] = static_cast<jint>(mbflma[i]);
+ }
+
+ jsize jlen = static_cast<jsize>(size);
+ jintArray result = env->NewIntArray(jlen);
+ if (result == nullptr) {
+ // exception thrown: OutOfMemoryError
+ delete[] additionals;
+ return nullptr;
+ }
+
+ env->SetIntArrayRegion(result, 0, jlen, additionals);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(result);
+ delete[] additionals;
+ return nullptr;
+ }
+
+ delete[] additionals;
+
+ return result;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setMaxBytesForLevelMultiplierAdditional
+ * Signature: (J[I)V
+ */
+void Java_org_rocksdb_Options_setMaxBytesForLevelMultiplierAdditional(
+ JNIEnv* env, jobject, jlong jhandle,
+ jintArray jmax_bytes_for_level_multiplier_additional) {
+ jsize len = env->GetArrayLength(jmax_bytes_for_level_multiplier_additional);
+ jint* additionals = env->GetIntArrayElements(
+ jmax_bytes_for_level_multiplier_additional, nullptr);
+ if (additionals == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opt->max_bytes_for_level_multiplier_additional.clear();
+ for (jsize i = 0; i < len; i++) {
+ opt->max_bytes_for_level_multiplier_additional.push_back(
+ static_cast<int32_t>(additionals[i]));
+ }
+
+ env->ReleaseIntArrayElements(jmax_bytes_for_level_multiplier_additional,
+ additionals, JNI_ABORT);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: paranoidFileChecks
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_paranoidFileChecks(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)
+ ->paranoid_file_checks;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setParanoidFileChecks
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setParanoidFileChecks(
+ JNIEnv*, jobject, jlong jhandle, jboolean jparanoid_file_checks) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle)->paranoid_file_checks =
+ static_cast<bool>(jparanoid_file_checks);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setCompactionPriority
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_Options_setCompactionPriority(
+ JNIEnv*, jobject, jlong jhandle, jbyte jcompaction_priority_value) {
+ auto* opts = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opts->compaction_pri =
+ ROCKSDB_NAMESPACE::CompactionPriorityJni::toCppCompactionPriority(
+ jcompaction_priority_value);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: compactionPriority
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_Options_compactionPriority(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opts = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return ROCKSDB_NAMESPACE::CompactionPriorityJni::toJavaCompactionPriority(
+ opts->compaction_pri);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setReportBgIoStats
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setReportBgIoStats(
+ JNIEnv*, jobject, jlong jhandle, jboolean jreport_bg_io_stats) {
+ auto* opts = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opts->report_bg_io_stats = static_cast<bool>(jreport_bg_io_stats);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: reportBgIoStats
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_reportBgIoStats(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opts = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<bool>(opts->report_bg_io_stats);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setTtl
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setTtl(
+ JNIEnv*, jobject, jlong jhandle, jlong jttl) {
+ auto* opts = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opts->ttl = static_cast<uint64_t>(jttl);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: ttl
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_ttl(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opts = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<jlong>(opts->ttl);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setCompactionOptionsUniversal
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setCompactionOptionsUniversal(
+ JNIEnv*, jobject, jlong jhandle,
+ jlong jcompaction_options_universal_handle) {
+ auto* opts = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ auto* opts_uni =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsUniversal*>(
+ jcompaction_options_universal_handle);
+ opts->compaction_options_universal = *opts_uni;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setCompactionOptionsFIFO
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setCompactionOptionsFIFO(
+ JNIEnv*, jobject, jlong jhandle, jlong jcompaction_options_fifo_handle) {
+ auto* opts = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ auto* opts_fifo = reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsFIFO*>(
+ jcompaction_options_fifo_handle);
+ opts->compaction_options_fifo = *opts_fifo;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setForceConsistencyChecks
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setForceConsistencyChecks(
+ JNIEnv*, jobject, jlong jhandle, jboolean jforce_consistency_checks) {
+ auto* opts = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ opts->force_consistency_checks = static_cast<bool>(jforce_consistency_checks);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: forceConsistencyChecks
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_forceConsistencyChecks(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opts = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
+ return static_cast<bool>(opts->force_consistency_checks);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// ROCKSDB_NAMESPACE::ColumnFamilyOptions
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: newColumnFamilyOptions
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptions(
+ JNIEnv*, jclass) {
+ auto* op = new ROCKSDB_NAMESPACE::ColumnFamilyOptions();
+ return reinterpret_cast<jlong>(op);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: copyColumnFamilyOptions
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ColumnFamilyOptions_copyColumnFamilyOptions(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto new_opt = new ROCKSDB_NAMESPACE::ColumnFamilyOptions(
+ *(reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)));
+ return reinterpret_cast<jlong>(new_opt);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: newColumnFamilyOptionsFromOptions
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptionsFromOptions(
+ JNIEnv*, jclass, jlong joptions_handle) {
+ auto new_opt = new ROCKSDB_NAMESPACE::ColumnFamilyOptions(
+ *reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(joptions_handle));
+ return reinterpret_cast<jlong>(new_opt);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: getColumnFamilyOptionsFromProps
+ * Signature: (Ljava/util/String;)J
+ */
+jlong Java_org_rocksdb_ColumnFamilyOptions_getColumnFamilyOptionsFromProps(
+ JNIEnv* env, jclass, jstring jopt_string) {
+ const char* opt_string = env->GetStringUTFChars(jopt_string, nullptr);
+ if (opt_string == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return 0;
+ }
+
+ auto* cf_options = new ROCKSDB_NAMESPACE::ColumnFamilyOptions();
+ ROCKSDB_NAMESPACE::Status status =
+ ROCKSDB_NAMESPACE::GetColumnFamilyOptionsFromString(
+ ROCKSDB_NAMESPACE::ColumnFamilyOptions(), opt_string, cf_options);
+
+ env->ReleaseStringUTFChars(jopt_string, opt_string);
+
+ // Check if ColumnFamilyOptions creation was possible.
+ jlong ret_value = 0;
+ if (status.ok()) {
+ ret_value = reinterpret_cast<jlong>(cf_options);
+ } else {
+ // if operation failed the ColumnFamilyOptions need to be deleted
+ // again to prevent a memory leak.
+ delete cf_options;
+ }
+ return ret_value;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_disposeInternal(
+ JNIEnv*, jobject, jlong handle) {
+ auto* cfo = reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(handle);
+ assert(cfo != nullptr);
+ delete cfo;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: optimizeForSmallDb
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_optimizeForSmallDb(
+ JNIEnv*, jobject, jlong jhandle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->OptimizeForSmallDb();
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: optimizeForPointLookup
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_optimizeForPointLookup(
+ JNIEnv*, jobject, jlong jhandle, jlong block_cache_size_mb) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->OptimizeForPointLookup(block_cache_size_mb);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: optimizeLevelStyleCompaction
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_optimizeLevelStyleCompaction(
+ JNIEnv*, jobject, jlong jhandle, jlong memtable_memory_budget) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->OptimizeLevelStyleCompaction(memtable_memory_budget);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: optimizeUniversalStyleCompaction
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_optimizeUniversalStyleCompaction(
+ JNIEnv*, jobject, jlong jhandle, jlong memtable_memory_budget) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->OptimizeUniversalStyleCompaction(memtable_memory_budget);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setComparatorHandle
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setComparatorHandle__JI(
+ JNIEnv*, jobject, jlong jhandle, jint builtinComparator) {
+ switch (builtinComparator) {
+ case 1:
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->comparator = ROCKSDB_NAMESPACE::ReverseBytewiseComparator();
+ break;
+ default:
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->comparator = ROCKSDB_NAMESPACE::BytewiseComparator();
+ break;
+ }
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setComparatorHandle
+ * Signature: (JJB)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setComparatorHandle__JJB(
+ JNIEnv*, jobject, jlong jopt_handle, jlong jcomparator_handle,
+ jbyte jcomparator_type) {
+ ROCKSDB_NAMESPACE::Comparator* comparator = nullptr;
+ switch (jcomparator_type) {
+ // JAVA_COMPARATOR
+ case 0x0:
+ comparator = reinterpret_cast<ROCKSDB_NAMESPACE::ComparatorJniCallback*>(
+ jcomparator_handle);
+ break;
+
+ // JAVA_NATIVE_COMPARATOR_WRAPPER
+ case 0x1:
+ comparator =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Comparator*>(jcomparator_handle);
+ break;
+ }
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jopt_handle);
+ opt->comparator = comparator;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setMergeOperatorName
+ * Signature: (JJjava/lang/String)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setMergeOperatorName(
+ JNIEnv* env, jobject, jlong jhandle, jstring jop_name) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ const char* op_name = env->GetStringUTFChars(jop_name, nullptr);
+ if (op_name == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ options->merge_operator =
+ ROCKSDB_NAMESPACE::MergeOperators::CreateFromStringId(op_name);
+ env->ReleaseStringUTFChars(jop_name, op_name);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setMergeOperator
+ * Signature: (JJjava/lang/String)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setMergeOperator(
+ JNIEnv*, jobject, jlong jhandle, jlong mergeOperatorHandle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->merge_operator =
+ *(reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::MergeOperator>*>(
+ mergeOperatorHandle));
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setCompactionFilterHandle
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setCompactionFilterHandle(
+ JNIEnv*, jobject, jlong jopt_handle, jlong jcompactionfilter_handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jopt_handle)
+ ->compaction_filter =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionFilter*>(
+ jcompactionfilter_handle);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setCompactionFilterFactoryHandle
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setCompactionFilterFactoryHandle(
+ JNIEnv*, jobject, jlong jopt_handle,
+ jlong jcompactionfilterfactory_handle) {
+ auto* cff_factory = reinterpret_cast<
+ std::shared_ptr<ROCKSDB_NAMESPACE::CompactionFilterFactoryJniCallback>*>(
+ jcompactionfilterfactory_handle);
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jopt_handle)
+ ->compaction_filter_factory = *cff_factory;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setWriteBufferSize
+ * Signature: (JJ)I
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setWriteBufferSize(
+ JNIEnv* env, jobject, jlong jhandle, jlong jwrite_buffer_size) {
+ auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(
+ jwrite_buffer_size);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->write_buffer_size = jwrite_buffer_size;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: writeBufferSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ColumnFamilyOptions_writeBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->write_buffer_size;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setMaxWriteBufferNumber
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setMaxWriteBufferNumber(
+ JNIEnv*, jobject, jlong jhandle, jint jmax_write_buffer_number) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->max_write_buffer_number = jmax_write_buffer_number;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: maxWriteBufferNumber
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_ColumnFamilyOptions_maxWriteBufferNumber(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->max_write_buffer_number;
+}
+
+/*
+ * Method: setMemTableFactory
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setMemTableFactory(
+ JNIEnv*, jobject, jlong jhandle, jlong jfactory_handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->memtable_factory.reset(
+ reinterpret_cast<ROCKSDB_NAMESPACE::MemTableRepFactory*>(
+ jfactory_handle));
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: memTableFactoryName
+ * Signature: (J)Ljava/lang/String
+ */
+jstring Java_org_rocksdb_ColumnFamilyOptions_memTableFactoryName(
+ JNIEnv* env, jobject, jlong jhandle) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ ROCKSDB_NAMESPACE::MemTableRepFactory* tf = opt->memtable_factory.get();
+
+ // Should never be nullptr.
+ // Default memtable factory is SkipListFactory
+ assert(tf);
+
+ // temporarly fix for the historical typo
+ if (strcmp(tf->Name(), "HashLinkListRepFactory") == 0) {
+ return env->NewStringUTF("HashLinkedListRepFactory");
+ }
+
+ return env->NewStringUTF(tf->Name());
+}
+
+/*
+ * Method: useFixedLengthPrefixExtractor
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_useFixedLengthPrefixExtractor(
+ JNIEnv*, jobject, jlong jhandle, jint jprefix_length) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->prefix_extractor.reset(ROCKSDB_NAMESPACE::NewFixedPrefixTransform(
+ static_cast<int>(jprefix_length)));
+}
+
+/*
+ * Method: useCappedPrefixExtractor
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_useCappedPrefixExtractor(
+ JNIEnv*, jobject, jlong jhandle, jint jprefix_length) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->prefix_extractor.reset(ROCKSDB_NAMESPACE::NewCappedPrefixTransform(
+ static_cast<int>(jprefix_length)));
+}
+
+/*
+ * Method: setTableFactory
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setTableFactory(
+ JNIEnv*, jobject, jlong jhandle, jlong jfactory_handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->table_factory.reset(
+ reinterpret_cast<ROCKSDB_NAMESPACE::TableFactory*>(jfactory_handle));
+}
+
+/*
+ * Method: tableFactoryName
+ * Signature: (J)Ljava/lang/String
+ */
+jstring Java_org_rocksdb_ColumnFamilyOptions_tableFactoryName(
+ JNIEnv* env, jobject, jlong jhandle) {
+ auto* opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ ROCKSDB_NAMESPACE::TableFactory* tf = opt->table_factory.get();
+
+ // Should never be nullptr.
+ // Default memtable factory is SkipListFactory
+ assert(tf);
+
+ return env->NewStringUTF(tf->Name());
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: minWriteBufferNumberToMerge
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_ColumnFamilyOptions_minWriteBufferNumberToMerge(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->min_write_buffer_number_to_merge;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setMinWriteBufferNumberToMerge
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setMinWriteBufferNumberToMerge(
+ JNIEnv*, jobject, jlong jhandle, jint jmin_write_buffer_number_to_merge) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->min_write_buffer_number_to_merge =
+ static_cast<int>(jmin_write_buffer_number_to_merge);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: maxWriteBufferNumberToMaintain
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_ColumnFamilyOptions_maxWriteBufferNumberToMaintain(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->max_write_buffer_number_to_maintain;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setMaxWriteBufferNumberToMaintain
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setMaxWriteBufferNumberToMaintain(
+ JNIEnv*, jobject, jlong jhandle,
+ jint jmax_write_buffer_number_to_maintain) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->max_write_buffer_number_to_maintain =
+ static_cast<int>(jmax_write_buffer_number_to_maintain);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setCompressionType
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setCompressionType(
+ JNIEnv*, jobject, jlong jhandle, jbyte jcompression_type_value) {
+ auto* cf_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ cf_opts->compression =
+ ROCKSDB_NAMESPACE::CompressionTypeJni::toCppCompressionType(
+ jcompression_type_value);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: compressionType
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_ColumnFamilyOptions_compressionType(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* cf_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ return ROCKSDB_NAMESPACE::CompressionTypeJni::toJavaCompressionType(
+ cf_opts->compression);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setCompressionPerLevel
+ * Signature: (J[B)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setCompressionPerLevel(
+ JNIEnv* env, jobject, jlong jhandle, jbyteArray jcompressionLevels) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ auto uptr_compression_levels =
+ rocksdb_compression_vector_helper(env, jcompressionLevels);
+ if (!uptr_compression_levels) {
+ // exception occurred
+ return;
+ }
+ options->compression_per_level = *(uptr_compression_levels.get());
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: compressionPerLevel
+ * Signature: (J)[B
+ */
+jbyteArray Java_org_rocksdb_ColumnFamilyOptions_compressionPerLevel(
+ JNIEnv* env, jobject, jlong jhandle) {
+ auto* cf_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ return rocksdb_compression_list_helper(env,
+ cf_options->compression_per_level);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setBottommostCompressionType
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setBottommostCompressionType(
+ JNIEnv*, jobject, jlong jhandle, jbyte jcompression_type_value) {
+ auto* cf_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ cf_options->bottommost_compression =
+ ROCKSDB_NAMESPACE::CompressionTypeJni::toCppCompressionType(
+ jcompression_type_value);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: bottommostCompressionType
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_ColumnFamilyOptions_bottommostCompressionType(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* cf_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ return ROCKSDB_NAMESPACE::CompressionTypeJni::toJavaCompressionType(
+ cf_options->bottommost_compression);
+}
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setBottommostCompressionOptions
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setBottommostCompressionOptions(
+ JNIEnv*, jobject, jlong jhandle,
+ jlong jbottommost_compression_options_handle) {
+ auto* cf_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ auto* bottommost_compression_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompressionOptions*>(
+ jbottommost_compression_options_handle);
+ cf_options->bottommost_compression_opts = *bottommost_compression_options;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setCompressionOptions
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setCompressionOptions(
+ JNIEnv*, jobject, jlong jhandle, jlong jcompression_options_handle) {
+ auto* cf_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ auto* compression_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompressionOptions*>(
+ jcompression_options_handle);
+ cf_options->compression_opts = *compression_options;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setCompactionStyle
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setCompactionStyle(
+ JNIEnv*, jobject, jlong jhandle, jbyte jcompaction_style) {
+ auto* cf_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ cf_options->compaction_style =
+ ROCKSDB_NAMESPACE::CompactionStyleJni::toCppCompactionStyle(
+ jcompaction_style);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: compactionStyle
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_ColumnFamilyOptions_compactionStyle(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* cf_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ return ROCKSDB_NAMESPACE::CompactionStyleJni::toJavaCompactionStyle(
+ cf_options->compaction_style);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setMaxTableFilesSizeFIFO
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setMaxTableFilesSizeFIFO(
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_table_files_size) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->compaction_options_fifo.max_table_files_size =
+ static_cast<uint64_t>(jmax_table_files_size);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: maxTableFilesSizeFIFO
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ColumnFamilyOptions_maxTableFilesSizeFIFO(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->compaction_options_fifo.max_table_files_size;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: numLevels
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_ColumnFamilyOptions_numLevels(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->num_levels;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setNumLevels
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setNumLevels(
+ JNIEnv*, jobject, jlong jhandle, jint jnum_levels) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->num_levels = static_cast<int>(jnum_levels);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: levelZeroFileNumCompactionTrigger
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroFileNumCompactionTrigger(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->level0_file_num_compaction_trigger;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setLevelZeroFileNumCompactionTrigger
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroFileNumCompactionTrigger(
+ JNIEnv*, jobject, jlong jhandle,
+ jint jlevel0_file_num_compaction_trigger) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->level0_file_num_compaction_trigger =
+ static_cast<int>(jlevel0_file_num_compaction_trigger);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: levelZeroSlowdownWritesTrigger
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroSlowdownWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->level0_slowdown_writes_trigger;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setLevelSlowdownWritesTrigger
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroSlowdownWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle, jint jlevel0_slowdown_writes_trigger) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->level0_slowdown_writes_trigger =
+ static_cast<int>(jlevel0_slowdown_writes_trigger);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: levelZeroStopWritesTrigger
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroStopWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->level0_stop_writes_trigger;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setLevelStopWritesTrigger
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroStopWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle, jint jlevel0_stop_writes_trigger) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->level0_stop_writes_trigger =
+ static_cast<int>(jlevel0_stop_writes_trigger);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: targetFileSizeBase
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ColumnFamilyOptions_targetFileSizeBase(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->target_file_size_base;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setTargetFileSizeBase
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setTargetFileSizeBase(
+ JNIEnv*, jobject, jlong jhandle, jlong jtarget_file_size_base) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->target_file_size_base = static_cast<uint64_t>(jtarget_file_size_base);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: targetFileSizeMultiplier
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_ColumnFamilyOptions_targetFileSizeMultiplier(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->target_file_size_multiplier;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setTargetFileSizeMultiplier
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setTargetFileSizeMultiplier(
+ JNIEnv*, jobject, jlong jhandle, jint jtarget_file_size_multiplier) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->target_file_size_multiplier =
+ static_cast<int>(jtarget_file_size_multiplier);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: maxBytesForLevelBase
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelBase(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->max_bytes_for_level_base;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setMaxBytesForLevelBase
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelBase(
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_bytes_for_level_base) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->max_bytes_for_level_base =
+ static_cast<int64_t>(jmax_bytes_for_level_base);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: levelCompactionDynamicLevelBytes
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_ColumnFamilyOptions_levelCompactionDynamicLevelBytes(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->level_compaction_dynamic_level_bytes;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setLevelCompactionDynamicLevelBytes
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setLevelCompactionDynamicLevelBytes(
+ JNIEnv*, jobject, jlong jhandle, jboolean jenable_dynamic_level_bytes) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->level_compaction_dynamic_level_bytes = (jenable_dynamic_level_bytes);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: maxBytesForLevelMultiplier
+ * Signature: (J)D
+ */
+jdouble Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelMultiplier(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->max_bytes_for_level_multiplier;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setMaxBytesForLevelMultiplier
+ * Signature: (JD)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelMultiplier(
+ JNIEnv*, jobject, jlong jhandle, jdouble jmax_bytes_for_level_multiplier) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->max_bytes_for_level_multiplier =
+ static_cast<double>(jmax_bytes_for_level_multiplier);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: maxCompactionBytes
+ * Signature: (J)I
+ */
+jlong Java_org_rocksdb_ColumnFamilyOptions_maxCompactionBytes(
+ JNIEnv*, jobject, jlong jhandle) {
+ return static_cast<jlong>(
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->max_compaction_bytes);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setMaxCompactionBytes
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setMaxCompactionBytes(
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_compaction_bytes) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->max_compaction_bytes = static_cast<uint64_t>(jmax_compaction_bytes);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: arenaBlockSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ColumnFamilyOptions_arenaBlockSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->arena_block_size;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setArenaBlockSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setArenaBlockSize(
+ JNIEnv* env, jobject, jlong jhandle, jlong jarena_block_size) {
+ auto s =
+ ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(jarena_block_size);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->arena_block_size = jarena_block_size;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: disableAutoCompactions
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_ColumnFamilyOptions_disableAutoCompactions(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->disable_auto_compactions;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setDisableAutoCompactions
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setDisableAutoCompactions(
+ JNIEnv*, jobject, jlong jhandle, jboolean jdisable_auto_compactions) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->disable_auto_compactions = static_cast<bool>(jdisable_auto_compactions);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: maxSequentialSkipInIterations
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ColumnFamilyOptions_maxSequentialSkipInIterations(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->max_sequential_skip_in_iterations;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setMaxSequentialSkipInIterations
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setMaxSequentialSkipInIterations(
+ JNIEnv*, jobject, jlong jhandle,
+ jlong jmax_sequential_skip_in_iterations) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->max_sequential_skip_in_iterations =
+ static_cast<int64_t>(jmax_sequential_skip_in_iterations);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: inplaceUpdateSupport
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_ColumnFamilyOptions_inplaceUpdateSupport(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->inplace_update_support;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setInplaceUpdateSupport
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setInplaceUpdateSupport(
+ JNIEnv*, jobject, jlong jhandle, jboolean jinplace_update_support) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->inplace_update_support = static_cast<bool>(jinplace_update_support);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: inplaceUpdateNumLocks
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ColumnFamilyOptions_inplaceUpdateNumLocks(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->inplace_update_num_locks;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setInplaceUpdateNumLocks
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setInplaceUpdateNumLocks(
+ JNIEnv* env, jobject, jlong jhandle, jlong jinplace_update_num_locks) {
+ auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(
+ jinplace_update_num_locks);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->inplace_update_num_locks = jinplace_update_num_locks;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: memtablePrefixBloomSizeRatio
+ * Signature: (J)I
+ */
+jdouble Java_org_rocksdb_ColumnFamilyOptions_memtablePrefixBloomSizeRatio(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->memtable_prefix_bloom_size_ratio;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setMemtablePrefixBloomSizeRatio
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setMemtablePrefixBloomSizeRatio(
+ JNIEnv*, jobject, jlong jhandle,
+ jdouble jmemtable_prefix_bloom_size_ratio) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->memtable_prefix_bloom_size_ratio =
+ static_cast<double>(jmemtable_prefix_bloom_size_ratio);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: bloomLocality
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_ColumnFamilyOptions_bloomLocality(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->bloom_locality;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setBloomLocality
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setBloomLocality(
+ JNIEnv*, jobject, jlong jhandle, jint jbloom_locality) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->bloom_locality = static_cast<int32_t>(jbloom_locality);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: maxSuccessiveMerges
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ColumnFamilyOptions_maxSuccessiveMerges(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->max_successive_merges;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setMaxSuccessiveMerges
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setMaxSuccessiveMerges(
+ JNIEnv* env, jobject, jlong jhandle, jlong jmax_successive_merges) {
+ auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(
+ jmax_successive_merges);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->max_successive_merges = jmax_successive_merges;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: optimizeFiltersForHits
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_ColumnFamilyOptions_optimizeFiltersForHits(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->optimize_filters_for_hits;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setOptimizeFiltersForHits
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setOptimizeFiltersForHits(
+ JNIEnv*, jobject, jlong jhandle, jboolean joptimize_filters_for_hits) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->optimize_filters_for_hits =
+ static_cast<bool>(joptimize_filters_for_hits);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: memtableHugePageSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ColumnFamilyOptions_memtableHugePageSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->memtable_huge_page_size;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setMemtableHugePageSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setMemtableHugePageSize(
+ JNIEnv* env, jobject, jlong jhandle, jlong jmemtable_huge_page_size) {
+ auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(
+ jmemtable_huge_page_size);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->memtable_huge_page_size = jmemtable_huge_page_size;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: softPendingCompactionBytesLimit
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ColumnFamilyOptions_softPendingCompactionBytesLimit(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->soft_pending_compaction_bytes_limit;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setSoftPendingCompactionBytesLimit
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setSoftPendingCompactionBytesLimit(
+ JNIEnv*, jobject, jlong jhandle,
+ jlong jsoft_pending_compaction_bytes_limit) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->soft_pending_compaction_bytes_limit =
+ static_cast<int64_t>(jsoft_pending_compaction_bytes_limit);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: softHardCompactionBytesLimit
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ColumnFamilyOptions_hardPendingCompactionBytesLimit(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->hard_pending_compaction_bytes_limit;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setHardPendingCompactionBytesLimit
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setHardPendingCompactionBytesLimit(
+ JNIEnv*, jobject, jlong jhandle,
+ jlong jhard_pending_compaction_bytes_limit) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->hard_pending_compaction_bytes_limit =
+ static_cast<int64_t>(jhard_pending_compaction_bytes_limit);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: level0FileNumCompactionTrigger
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_ColumnFamilyOptions_level0FileNumCompactionTrigger(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->level0_file_num_compaction_trigger;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setLevel0FileNumCompactionTrigger
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setLevel0FileNumCompactionTrigger(
+ JNIEnv*, jobject, jlong jhandle,
+ jint jlevel0_file_num_compaction_trigger) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->level0_file_num_compaction_trigger =
+ static_cast<int32_t>(jlevel0_file_num_compaction_trigger);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: level0SlowdownWritesTrigger
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_ColumnFamilyOptions_level0SlowdownWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->level0_slowdown_writes_trigger;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setLevel0SlowdownWritesTrigger
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setLevel0SlowdownWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle, jint jlevel0_slowdown_writes_trigger) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->level0_slowdown_writes_trigger =
+ static_cast<int32_t>(jlevel0_slowdown_writes_trigger);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: level0StopWritesTrigger
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_ColumnFamilyOptions_level0StopWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->level0_stop_writes_trigger;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setLevel0StopWritesTrigger
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setLevel0StopWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle, jint jlevel0_stop_writes_trigger) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->level0_stop_writes_trigger =
+ static_cast<int32_t>(jlevel0_stop_writes_trigger);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: maxBytesForLevelMultiplierAdditional
+ * Signature: (J)[I
+ */
+jintArray Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelMultiplierAdditional(
+ JNIEnv* env, jobject, jlong jhandle) {
+ auto mbflma =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->max_bytes_for_level_multiplier_additional;
+
+ const size_t size = mbflma.size();
+
+ jint* additionals = new jint[size];
+ for (size_t i = 0; i < size; i++) {
+ additionals[i] = static_cast<jint>(mbflma[i]);
+ }
+
+ jsize jlen = static_cast<jsize>(size);
+ jintArray result = env->NewIntArray(jlen);
+ if (result == nullptr) {
+ // exception thrown: OutOfMemoryError
+ delete[] additionals;
+ return nullptr;
+ }
+ env->SetIntArrayRegion(result, 0, jlen, additionals);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(result);
+ delete[] additionals;
+ return nullptr;
+ }
+
+ delete[] additionals;
+
+ return result;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setMaxBytesForLevelMultiplierAdditional
+ * Signature: (J[I)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelMultiplierAdditional(
+ JNIEnv* env, jobject, jlong jhandle,
+ jintArray jmax_bytes_for_level_multiplier_additional) {
+ jsize len = env->GetArrayLength(jmax_bytes_for_level_multiplier_additional);
+ jint* additionals =
+ env->GetIntArrayElements(jmax_bytes_for_level_multiplier_additional, 0);
+ if (additionals == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ auto* cf_opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ cf_opt->max_bytes_for_level_multiplier_additional.clear();
+ for (jsize i = 0; i < len; i++) {
+ cf_opt->max_bytes_for_level_multiplier_additional.push_back(
+ static_cast<int32_t>(additionals[i]));
+ }
+
+ env->ReleaseIntArrayElements(jmax_bytes_for_level_multiplier_additional,
+ additionals, JNI_ABORT);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: paranoidFileChecks
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_ColumnFamilyOptions_paranoidFileChecks(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->paranoid_file_checks;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setParanoidFileChecks
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setParanoidFileChecks(
+ JNIEnv*, jobject, jlong jhandle, jboolean jparanoid_file_checks) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle)
+ ->paranoid_file_checks = static_cast<bool>(jparanoid_file_checks);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setCompactionPriority
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setCompactionPriority(
+ JNIEnv*, jobject, jlong jhandle, jbyte jcompaction_priority_value) {
+ auto* cf_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ cf_opts->compaction_pri =
+ ROCKSDB_NAMESPACE::CompactionPriorityJni::toCppCompactionPriority(
+ jcompaction_priority_value);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: compactionPriority
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_ColumnFamilyOptions_compactionPriority(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* cf_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ return ROCKSDB_NAMESPACE::CompactionPriorityJni::toJavaCompactionPriority(
+ cf_opts->compaction_pri);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setReportBgIoStats
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setReportBgIoStats(
+ JNIEnv*, jobject, jlong jhandle, jboolean jreport_bg_io_stats) {
+ auto* cf_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ cf_opts->report_bg_io_stats = static_cast<bool>(jreport_bg_io_stats);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: reportBgIoStats
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_ColumnFamilyOptions_reportBgIoStats(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* cf_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ return static_cast<bool>(cf_opts->report_bg_io_stats);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setTtl
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setTtl(
+ JNIEnv*, jobject, jlong jhandle, jlong jttl) {
+ auto* cf_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ cf_opts->ttl = static_cast<uint64_t>(jttl);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: ttl
+ * Signature: (J)J
+ */
+JNIEXPORT jlong JNICALL Java_org_rocksdb_ColumnFamilyOptions_ttl(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* cf_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ return static_cast<jlong>(cf_opts->ttl);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setCompactionOptionsUniversal
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setCompactionOptionsUniversal(
+ JNIEnv*, jobject, jlong jhandle,
+ jlong jcompaction_options_universal_handle) {
+ auto* cf_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ auto* opts_uni =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsUniversal*>(
+ jcompaction_options_universal_handle);
+ cf_opts->compaction_options_universal = *opts_uni;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setCompactionOptionsFIFO
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setCompactionOptionsFIFO(
+ JNIEnv*, jobject, jlong jhandle, jlong jcompaction_options_fifo_handle) {
+ auto* cf_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ auto* opts_fifo = reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptionsFIFO*>(
+ jcompaction_options_fifo_handle);
+ cf_opts->compaction_options_fifo = *opts_fifo;
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setForceConsistencyChecks
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setForceConsistencyChecks(
+ JNIEnv*, jobject, jlong jhandle, jboolean jforce_consistency_checks) {
+ auto* cf_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ cf_opts->force_consistency_checks =
+ static_cast<bool>(jforce_consistency_checks);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: forceConsistencyChecks
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_ColumnFamilyOptions_forceConsistencyChecks(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* cf_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jhandle);
+ return static_cast<bool>(cf_opts->force_consistency_checks);
+}
+
+/////////////////////////////////////////////////////////////////////
+// ROCKSDB_NAMESPACE::DBOptions
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: newDBOptions
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_DBOptions_newDBOptions(
+ JNIEnv*, jclass) {
+ auto* dbop = new ROCKSDB_NAMESPACE::DBOptions();
+ return reinterpret_cast<jlong>(dbop);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: copyDBOptions
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_copyDBOptions(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto new_opt = new ROCKSDB_NAMESPACE::DBOptions(
+ *(reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)));
+ return reinterpret_cast<jlong>(new_opt);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: newDBOptionsFromOptions
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_newDBOptionsFromOptions(
+ JNIEnv*, jclass, jlong joptions_handle) {
+ auto new_opt = new ROCKSDB_NAMESPACE::DBOptions(
+ *reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(joptions_handle));
+ return reinterpret_cast<jlong>(new_opt);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: getDBOptionsFromProps
+ * Signature: (Ljava/util/String;)J
+ */
+jlong Java_org_rocksdb_DBOptions_getDBOptionsFromProps(
+ JNIEnv* env, jclass, jstring jopt_string) {
+ const char* opt_string = env->GetStringUTFChars(jopt_string, nullptr);
+ if (opt_string == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return 0;
+ }
+
+ auto* db_options = new ROCKSDB_NAMESPACE::DBOptions();
+ ROCKSDB_NAMESPACE::Status status = ROCKSDB_NAMESPACE::GetDBOptionsFromString(
+ ROCKSDB_NAMESPACE::DBOptions(), opt_string, db_options);
+
+ env->ReleaseStringUTFChars(jopt_string, opt_string);
+
+ // Check if DBOptions creation was possible.
+ jlong ret_value = 0;
+ if (status.ok()) {
+ ret_value = reinterpret_cast<jlong>(db_options);
+ } else {
+ // if operation failed the DBOptions need to be deleted
+ // again to prevent a memory leak.
+ delete db_options;
+ }
+ return ret_value;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_DBOptions_disposeInternal(
+ JNIEnv*, jobject, jlong handle) {
+ auto* dbo = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(handle);
+ assert(dbo != nullptr);
+ delete dbo;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: optimizeForSmallDb
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_DBOptions_optimizeForSmallDb(
+ JNIEnv*, jobject, jlong jhandle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->OptimizeForSmallDb();
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setEnv
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setEnv(
+ JNIEnv*, jobject, jlong jhandle, jlong jenv_handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->env =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jenv_handle);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setIncreaseParallelism
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_DBOptions_setIncreaseParallelism(
+ JNIEnv*, jobject, jlong jhandle, jint totalThreads) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->IncreaseParallelism(
+ static_cast<int>(totalThreads));
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setCreateIfMissing
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setCreateIfMissing(
+ JNIEnv*, jobject, jlong jhandle, jboolean flag) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->create_if_missing =
+ flag;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: createIfMissing
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_createIfMissing(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->create_if_missing;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setCreateMissingColumnFamilies
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setCreateMissingColumnFamilies(
+ JNIEnv*, jobject, jlong jhandle, jboolean flag) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->create_missing_column_families = flag;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: createMissingColumnFamilies
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_createMissingColumnFamilies(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->create_missing_column_families;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setErrorIfExists
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setErrorIfExists(
+ JNIEnv*, jobject, jlong jhandle, jboolean error_if_exists) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->error_if_exists =
+ static_cast<bool>(error_if_exists);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: errorIfExists
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_errorIfExists(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->error_if_exists;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setParanoidChecks
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setParanoidChecks(
+ JNIEnv*, jobject, jlong jhandle, jboolean paranoid_checks) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->paranoid_checks =
+ static_cast<bool>(paranoid_checks);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: paranoidChecks
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_paranoidChecks(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->paranoid_checks;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setRateLimiter
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setRateLimiter(
+ JNIEnv*, jobject, jlong jhandle, jlong jrate_limiter_handle) {
+ std::shared_ptr<ROCKSDB_NAMESPACE::RateLimiter>* pRateLimiter =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::RateLimiter>*>(
+ jrate_limiter_handle);
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->rate_limiter =
+ *pRateLimiter;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setSstFileManager
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setSstFileManager(
+ JNIEnv*, jobject, jlong jhandle, jlong jsst_file_manager_handle) {
+ auto* sptr_sst_file_manager =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::SstFileManager>*>(
+ jsst_file_manager_handle);
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->sst_file_manager =
+ *sptr_sst_file_manager;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setLogger
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setLogger(
+ JNIEnv*, jobject, jlong jhandle, jlong jlogger_handle) {
+ std::shared_ptr<ROCKSDB_NAMESPACE::LoggerJniCallback>* pLogger =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::LoggerJniCallback>*>(
+ jlogger_handle);
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->info_log = *pLogger;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setInfoLogLevel
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_DBOptions_setInfoLogLevel(
+ JNIEnv*, jobject, jlong jhandle, jbyte jlog_level) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->info_log_level =
+ static_cast<ROCKSDB_NAMESPACE::InfoLogLevel>(jlog_level);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: infoLogLevel
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_DBOptions_infoLogLevel(
+ JNIEnv*, jobject, jlong jhandle) {
+ return static_cast<jbyte>(
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->info_log_level);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setMaxTotalWalSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setMaxTotalWalSize(
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_total_wal_size) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->max_total_wal_size =
+ static_cast<jlong>(jmax_total_wal_size);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: maxTotalWalSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_maxTotalWalSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->max_total_wal_size;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setMaxOpenFiles
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_DBOptions_setMaxOpenFiles(
+ JNIEnv*, jobject, jlong jhandle, jint max_open_files) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->max_open_files =
+ static_cast<int>(max_open_files);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: maxOpenFiles
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_DBOptions_maxOpenFiles(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->max_open_files;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setMaxFileOpeningThreads
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_DBOptions_setMaxFileOpeningThreads(
+ JNIEnv*, jobject, jlong jhandle, jint jmax_file_opening_threads) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->max_file_opening_threads = static_cast<int>(jmax_file_opening_threads);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: maxFileOpeningThreads
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_DBOptions_maxFileOpeningThreads(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<int>(opt->max_file_opening_threads);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setStatistics
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setStatistics(
+ JNIEnv*, jobject, jlong jhandle, jlong jstatistics_handle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ auto* pSptr =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::StatisticsJni>*>(
+ jstatistics_handle);
+ opt->statistics = *pSptr;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: statistics
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_statistics(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ std::shared_ptr<ROCKSDB_NAMESPACE::Statistics> sptr = opt->statistics;
+ if (sptr == nullptr) {
+ return 0;
+ } else {
+ std::shared_ptr<ROCKSDB_NAMESPACE::Statistics>* pSptr =
+ new std::shared_ptr<ROCKSDB_NAMESPACE::Statistics>(sptr);
+ return reinterpret_cast<jlong>(pSptr);
+ }
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setUseFsync
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setUseFsync(
+ JNIEnv*, jobject, jlong jhandle, jboolean use_fsync) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->use_fsync =
+ static_cast<bool>(use_fsync);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: useFsync
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_useFsync(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->use_fsync;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setDbPaths
+ * Signature: (J[Ljava/lang/String;[J)V
+ */
+void Java_org_rocksdb_DBOptions_setDbPaths(
+ JNIEnv* env, jobject, jlong jhandle, jobjectArray jpaths,
+ jlongArray jtarget_sizes) {
+ std::vector<ROCKSDB_NAMESPACE::DbPath> db_paths;
+ jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr);
+ if (ptr_jtarget_size == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ jboolean has_exception = JNI_FALSE;
+ const jsize len = env->GetArrayLength(jpaths);
+ for (jsize i = 0; i < len; i++) {
+ jobject jpath =
+ reinterpret_cast<jstring>(env->GetObjectArrayElement(jpaths, i));
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
+ return;
+ }
+ std::string path = ROCKSDB_NAMESPACE::JniUtil::copyStdString(
+ env, static_cast<jstring>(jpath), &has_exception);
+ env->DeleteLocalRef(jpath);
+
+ if (has_exception == JNI_TRUE) {
+ env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
+ return;
+ }
+
+ jlong jtarget_size = ptr_jtarget_size[i];
+
+ db_paths.push_back(
+ ROCKSDB_NAMESPACE::DbPath(path, static_cast<uint64_t>(jtarget_size)));
+ }
+
+ env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
+
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->db_paths = db_paths;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: dbPathsLen
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_dbPathsLen(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jlong>(opt->db_paths.size());
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: dbPaths
+ * Signature: (J[Ljava/lang/String;[J)V
+ */
+void Java_org_rocksdb_DBOptions_dbPaths(
+ JNIEnv* env, jobject, jlong jhandle, jobjectArray jpaths,
+ jlongArray jtarget_sizes) {
+ jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr);
+ if (ptr_jtarget_size == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ const jsize len = env->GetArrayLength(jpaths);
+ for (jsize i = 0; i < len; i++) {
+ ROCKSDB_NAMESPACE::DbPath db_path = opt->db_paths[i];
+
+ jstring jpath = env->NewStringUTF(db_path.path.c_str());
+ if (jpath == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
+ return;
+ }
+ env->SetObjectArrayElement(jpaths, i, jpath);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jpath);
+ env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT);
+ return;
+ }
+
+ ptr_jtarget_size[i] = static_cast<jint>(db_path.target_size);
+ }
+
+ env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_COMMIT);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setDbLogDir
+ * Signature: (JLjava/lang/String)V
+ */
+void Java_org_rocksdb_DBOptions_setDbLogDir(
+ JNIEnv* env, jobject, jlong jhandle, jstring jdb_log_dir) {
+ const char* log_dir = env->GetStringUTFChars(jdb_log_dir, nullptr);
+ if (log_dir == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->db_log_dir.assign(
+ log_dir);
+ env->ReleaseStringUTFChars(jdb_log_dir, log_dir);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: dbLogDir
+ * Signature: (J)Ljava/lang/String
+ */
+jstring Java_org_rocksdb_DBOptions_dbLogDir(
+ JNIEnv* env, jobject, jlong jhandle) {
+ return env->NewStringUTF(
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->db_log_dir.c_str());
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setWalDir
+ * Signature: (JLjava/lang/String)V
+ */
+void Java_org_rocksdb_DBOptions_setWalDir(
+ JNIEnv* env, jobject, jlong jhandle, jstring jwal_dir) {
+ const char* wal_dir = env->GetStringUTFChars(jwal_dir, 0);
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->wal_dir.assign(
+ wal_dir);
+ env->ReleaseStringUTFChars(jwal_dir, wal_dir);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: walDir
+ * Signature: (J)Ljava/lang/String
+ */
+jstring Java_org_rocksdb_DBOptions_walDir(
+ JNIEnv* env, jobject, jlong jhandle) {
+ return env->NewStringUTF(
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->wal_dir.c_str());
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setDeleteObsoleteFilesPeriodMicros
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setDeleteObsoleteFilesPeriodMicros(
+ JNIEnv*, jobject, jlong jhandle, jlong micros) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->delete_obsolete_files_period_micros = static_cast<int64_t>(micros);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: deleteObsoleteFilesPeriodMicros
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_deleteObsoleteFilesPeriodMicros(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->delete_obsolete_files_period_micros;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setBaseBackgroundCompactions
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_DBOptions_setBaseBackgroundCompactions(
+ JNIEnv*, jobject, jlong jhandle, jint max) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->base_background_compactions = static_cast<int>(max);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: baseBackgroundCompactions
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_DBOptions_baseBackgroundCompactions(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->base_background_compactions;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setMaxBackgroundCompactions
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_DBOptions_setMaxBackgroundCompactions(
+ JNIEnv*, jobject, jlong jhandle, jint max) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->max_background_compactions = static_cast<int>(max);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: maxBackgroundCompactions
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_DBOptions_maxBackgroundCompactions(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->max_background_compactions;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setMaxSubcompactions
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_DBOptions_setMaxSubcompactions(
+ JNIEnv*, jobject, jlong jhandle, jint max) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->max_subcompactions =
+ static_cast<int32_t>(max);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: maxSubcompactions
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_DBOptions_maxSubcompactions(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->max_subcompactions;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setMaxBackgroundFlushes
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_DBOptions_setMaxBackgroundFlushes(
+ JNIEnv*, jobject, jlong jhandle, jint max_background_flushes) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->max_background_flushes = static_cast<int>(max_background_flushes);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: maxBackgroundFlushes
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_DBOptions_maxBackgroundFlushes(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->max_background_flushes;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setMaxBackgroundJobs
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_DBOptions_setMaxBackgroundJobs(
+ JNIEnv*, jobject, jlong jhandle, jint max_background_jobs) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->max_background_jobs = static_cast<int>(max_background_jobs);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: maxBackgroundJobs
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_DBOptions_maxBackgroundJobs(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->max_background_jobs;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setMaxLogFileSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setMaxLogFileSize(
+ JNIEnv* env, jobject, jlong jhandle, jlong max_log_file_size) {
+ auto s =
+ ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(max_log_file_size);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->max_log_file_size = max_log_file_size;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: maxLogFileSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_maxLogFileSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->max_log_file_size;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setLogFileTimeToRoll
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setLogFileTimeToRoll(
+ JNIEnv* env, jobject, jlong jhandle, jlong log_file_time_to_roll) {
+ auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(
+ log_file_time_to_roll);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->log_file_time_to_roll = log_file_time_to_roll;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: logFileTimeToRoll
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_logFileTimeToRoll(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->log_file_time_to_roll;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setKeepLogFileNum
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setKeepLogFileNum(
+ JNIEnv* env, jobject, jlong jhandle, jlong keep_log_file_num) {
+ auto s =
+ ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(keep_log_file_num);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->keep_log_file_num = keep_log_file_num;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: keepLogFileNum
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_keepLogFileNum(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->keep_log_file_num;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setRecycleLogFileNum
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setRecycleLogFileNum(
+ JNIEnv* env, jobject, jlong jhandle, jlong recycle_log_file_num) {
+ auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(
+ recycle_log_file_num);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->recycle_log_file_num = recycle_log_file_num;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: recycleLogFileNum
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_recycleLogFileNum(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->recycle_log_file_num;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setMaxManifestFileSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setMaxManifestFileSize(
+ JNIEnv*, jobject, jlong jhandle, jlong max_manifest_file_size) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->max_manifest_file_size = static_cast<int64_t>(max_manifest_file_size);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: maxManifestFileSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_maxManifestFileSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->max_manifest_file_size;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setTableCacheNumshardbits
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_DBOptions_setTableCacheNumshardbits(
+ JNIEnv*, jobject, jlong jhandle, jint table_cache_numshardbits) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->table_cache_numshardbits = static_cast<int>(table_cache_numshardbits);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: tableCacheNumshardbits
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_DBOptions_tableCacheNumshardbits(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->table_cache_numshardbits;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setWalTtlSeconds
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setWalTtlSeconds(
+ JNIEnv*, jobject, jlong jhandle, jlong WAL_ttl_seconds) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->WAL_ttl_seconds =
+ static_cast<int64_t>(WAL_ttl_seconds);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: walTtlSeconds
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_walTtlSeconds(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->WAL_ttl_seconds;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setWalSizeLimitMB
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setWalSizeLimitMB(
+ JNIEnv*, jobject, jlong jhandle, jlong WAL_size_limit_MB) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->WAL_size_limit_MB =
+ static_cast<int64_t>(WAL_size_limit_MB);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: walTtlSeconds
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_walSizeLimitMB(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->WAL_size_limit_MB;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setManifestPreallocationSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setManifestPreallocationSize(
+ JNIEnv* env, jobject, jlong jhandle, jlong preallocation_size) {
+ auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(
+ preallocation_size);
+ if (s.ok()) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->manifest_preallocation_size = preallocation_size;
+ } else {
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: manifestPreallocationSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_manifestPreallocationSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->manifest_preallocation_size;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: useDirectReads
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_useDirectReads(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->use_direct_reads;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setUseDirectReads
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setUseDirectReads(
+ JNIEnv*, jobject, jlong jhandle, jboolean use_direct_reads) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->use_direct_reads =
+ static_cast<bool>(use_direct_reads);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: useDirectIoForFlushAndCompaction
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_useDirectIoForFlushAndCompaction(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->use_direct_io_for_flush_and_compaction;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setUseDirectReads
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setUseDirectIoForFlushAndCompaction(
+ JNIEnv*, jobject, jlong jhandle,
+ jboolean use_direct_io_for_flush_and_compaction) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->use_direct_io_for_flush_and_compaction =
+ static_cast<bool>(use_direct_io_for_flush_and_compaction);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setAllowFAllocate
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setAllowFAllocate(
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_fallocate) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->allow_fallocate =
+ static_cast<bool>(jallow_fallocate);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: allowFAllocate
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_allowFAllocate(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->allow_fallocate);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setAllowMmapReads
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setAllowMmapReads(
+ JNIEnv*, jobject, jlong jhandle, jboolean allow_mmap_reads) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->allow_mmap_reads =
+ static_cast<bool>(allow_mmap_reads);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: allowMmapReads
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_allowMmapReads(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->allow_mmap_reads;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setAllowMmapWrites
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setAllowMmapWrites(
+ JNIEnv*, jobject, jlong jhandle, jboolean allow_mmap_writes) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->allow_mmap_writes =
+ static_cast<bool>(allow_mmap_writes);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: allowMmapWrites
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_allowMmapWrites(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->allow_mmap_writes;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setIsFdCloseOnExec
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setIsFdCloseOnExec(
+ JNIEnv*, jobject, jlong jhandle, jboolean is_fd_close_on_exec) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->is_fd_close_on_exec = static_cast<bool>(is_fd_close_on_exec);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: isFdCloseOnExec
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_isFdCloseOnExec(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->is_fd_close_on_exec;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setStatsDumpPeriodSec
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_DBOptions_setStatsDumpPeriodSec(
+ JNIEnv*, jobject, jlong jhandle, jint jstats_dump_period_sec) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->stats_dump_period_sec =
+ static_cast<unsigned int>(jstats_dump_period_sec);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: statsDumpPeriodSec
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_DBOptions_statsDumpPeriodSec(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->stats_dump_period_sec;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setStatsPersistPeriodSec
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_DBOptions_setStatsPersistPeriodSec(
+ JNIEnv*, jobject, jlong jhandle, jint jstats_persist_period_sec) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->stats_persist_period_sec =
+ static_cast<unsigned int>(jstats_persist_period_sec);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: statsPersistPeriodSec
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_DBOptions_statsPersistPeriodSec(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->stats_persist_period_sec;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setStatsHistoryBufferSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setStatsHistoryBufferSize(
+ JNIEnv*, jobject, jlong jhandle, jlong jstats_history_buffer_size) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->stats_history_buffer_size =
+ static_cast<size_t>(jstats_history_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: statsHistoryBufferSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_statsHistoryBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->stats_history_buffer_size;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setAdviseRandomOnOpen
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setAdviseRandomOnOpen(
+ JNIEnv*, jobject, jlong jhandle, jboolean advise_random_on_open) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->advise_random_on_open = static_cast<bool>(advise_random_on_open);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: adviseRandomOnOpen
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_adviseRandomOnOpen(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->advise_random_on_open;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setDbWriteBufferSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setDbWriteBufferSize(
+ JNIEnv*, jobject, jlong jhandle, jlong jdb_write_buffer_size) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->db_write_buffer_size = static_cast<size_t>(jdb_write_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setWriteBufferManager
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setWriteBufferManager(
+ JNIEnv*, jobject, jlong jdb_options_handle,
+ jlong jwrite_buffer_manager_handle) {
+ auto* write_buffer_manager =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::WriteBufferManager>*>(
+ jwrite_buffer_manager_handle);
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jdb_options_handle)
+ ->write_buffer_manager = *write_buffer_manager;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: dbWriteBufferSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_dbWriteBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jlong>(opt->db_write_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setAccessHintOnCompactionStart
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_DBOptions_setAccessHintOnCompactionStart(
+ JNIEnv*, jobject, jlong jhandle, jbyte jaccess_hint_value) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->access_hint_on_compaction_start =
+ ROCKSDB_NAMESPACE::AccessHintJni::toCppAccessHint(jaccess_hint_value);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: accessHintOnCompactionStart
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_DBOptions_accessHintOnCompactionStart(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return ROCKSDB_NAMESPACE::AccessHintJni::toJavaAccessHint(
+ opt->access_hint_on_compaction_start);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setNewTableReaderForCompactionInputs
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setNewTableReaderForCompactionInputs(
+ JNIEnv*, jobject, jlong jhandle,
+ jboolean jnew_table_reader_for_compaction_inputs) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->new_table_reader_for_compaction_inputs =
+ static_cast<bool>(jnew_table_reader_for_compaction_inputs);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: newTableReaderForCompactionInputs
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_newTableReaderForCompactionInputs(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<bool>(opt->new_table_reader_for_compaction_inputs);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setCompactionReadaheadSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setCompactionReadaheadSize(
+ JNIEnv*, jobject, jlong jhandle, jlong jcompaction_readahead_size) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->compaction_readahead_size =
+ static_cast<size_t>(jcompaction_readahead_size);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: compactionReadaheadSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_compactionReadaheadSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jlong>(opt->compaction_readahead_size);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setRandomAccessMaxBufferSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setRandomAccessMaxBufferSize(
+ JNIEnv*, jobject, jlong jhandle, jlong jrandom_access_max_buffer_size) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->random_access_max_buffer_size =
+ static_cast<size_t>(jrandom_access_max_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: randomAccessMaxBufferSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_randomAccessMaxBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jlong>(opt->random_access_max_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setWritableFileMaxBufferSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setWritableFileMaxBufferSize(
+ JNIEnv*, jobject, jlong jhandle, jlong jwritable_file_max_buffer_size) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->writable_file_max_buffer_size =
+ static_cast<size_t>(jwritable_file_max_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: writableFileMaxBufferSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_writableFileMaxBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jlong>(opt->writable_file_max_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setUseAdaptiveMutex
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setUseAdaptiveMutex(
+ JNIEnv*, jobject, jlong jhandle, jboolean use_adaptive_mutex) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->use_adaptive_mutex =
+ static_cast<bool>(use_adaptive_mutex);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: useAdaptiveMutex
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_useAdaptiveMutex(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->use_adaptive_mutex;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setBytesPerSync
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setBytesPerSync(
+ JNIEnv*, jobject, jlong jhandle, jlong bytes_per_sync) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->bytes_per_sync =
+ static_cast<int64_t>(bytes_per_sync);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: bytesPerSync
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_bytesPerSync(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->bytes_per_sync;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setWalBytesPerSync
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setWalBytesPerSync(
+ JNIEnv*, jobject, jlong jhandle, jlong jwal_bytes_per_sync) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)->wal_bytes_per_sync =
+ static_cast<int64_t>(jwal_bytes_per_sync);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: walBytesPerSync
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_walBytesPerSync(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jlong>(opt->wal_bytes_per_sync);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setStrictBytesPerSync
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setStrictBytesPerSync(
+ JNIEnv*, jobject, jlong jhandle, jboolean jstrict_bytes_per_sync) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->strict_bytes_per_sync = jstrict_bytes_per_sync == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: strictBytesPerSync
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_strictBytesPerSync(
+ JNIEnv*, jobject, jlong jhandle) {
+ return static_cast<jboolean>(
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->strict_bytes_per_sync);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setDelayedWriteRate
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setDelayedWriteRate(
+ JNIEnv*, jobject, jlong jhandle, jlong jdelayed_write_rate) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->delayed_write_rate = static_cast<uint64_t>(jdelayed_write_rate);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: delayedWriteRate
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_delayedWriteRate(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jlong>(opt->delayed_write_rate);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setEnablePipelinedWrite
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setEnablePipelinedWrite(
+ JNIEnv*, jobject, jlong jhandle, jboolean jenable_pipelined_write) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->enable_pipelined_write = jenable_pipelined_write == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: enablePipelinedWrite
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_enablePipelinedWrite(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->enable_pipelined_write);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setUnorderedWrite
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setUnorderedWrite(
+ JNIEnv*, jobject, jlong jhandle, jboolean junordered_write) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->unordered_write = junordered_write == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: unorderedWrite
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_unorderedWrite(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->unordered_write);
+}
+
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setEnableThreadTracking
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setEnableThreadTracking(
+ JNIEnv*, jobject, jlong jhandle, jboolean jenable_thread_tracking) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->enable_thread_tracking = jenable_thread_tracking == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: enableThreadTracking
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_enableThreadTracking(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->enable_thread_tracking);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setAllowConcurrentMemtableWrite
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setAllowConcurrentMemtableWrite(
+ JNIEnv*, jobject, jlong jhandle, jboolean allow) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->allow_concurrent_memtable_write = static_cast<bool>(allow);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: allowConcurrentMemtableWrite
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_allowConcurrentMemtableWrite(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->allow_concurrent_memtable_write;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setEnableWriteThreadAdaptiveYield
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setEnableWriteThreadAdaptiveYield(
+ JNIEnv*, jobject, jlong jhandle, jboolean yield) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->enable_write_thread_adaptive_yield = static_cast<bool>(yield);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: enableWriteThreadAdaptiveYield
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_enableWriteThreadAdaptiveYield(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->enable_write_thread_adaptive_yield;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setWriteThreadMaxYieldUsec
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setWriteThreadMaxYieldUsec(
+ JNIEnv*, jobject, jlong jhandle, jlong max) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->write_thread_max_yield_usec = static_cast<int64_t>(max);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: writeThreadMaxYieldUsec
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_writeThreadMaxYieldUsec(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->write_thread_max_yield_usec;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setWriteThreadSlowYieldUsec
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setWriteThreadSlowYieldUsec(
+ JNIEnv*, jobject, jlong jhandle, jlong slow) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->write_thread_slow_yield_usec = static_cast<int64_t>(slow);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: writeThreadSlowYieldUsec
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_writeThreadSlowYieldUsec(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle)
+ ->write_thread_slow_yield_usec;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setSkipStatsUpdateOnDbOpen
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setSkipStatsUpdateOnDbOpen(
+ JNIEnv*, jobject, jlong jhandle, jboolean jskip_stats_update_on_db_open) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->skip_stats_update_on_db_open =
+ static_cast<bool>(jskip_stats_update_on_db_open);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: skipStatsUpdateOnDbOpen
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_skipStatsUpdateOnDbOpen(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->skip_stats_update_on_db_open);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setSkipCheckingSstFileSizesOnDbOpen
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setSkipCheckingSstFileSizesOnDbOpen(
+ JNIEnv*, jobject, jlong jhandle,
+ jboolean jskip_checking_sst_file_sizes_on_db_open) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->skip_checking_sst_file_sizes_on_db_open =
+ static_cast<bool>(jskip_checking_sst_file_sizes_on_db_open);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: skipCheckingSstFileSizesOnDbOpen
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_skipCheckingSstFileSizesOnDbOpen(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->skip_checking_sst_file_sizes_on_db_open);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setWalRecoveryMode
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_DBOptions_setWalRecoveryMode(
+ JNIEnv*, jobject, jlong jhandle, jbyte jwal_recovery_mode_value) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->wal_recovery_mode =
+ ROCKSDB_NAMESPACE::WALRecoveryModeJni::toCppWALRecoveryMode(
+ jwal_recovery_mode_value);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: walRecoveryMode
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_DBOptions_walRecoveryMode(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return ROCKSDB_NAMESPACE::WALRecoveryModeJni::toJavaWALRecoveryMode(
+ opt->wal_recovery_mode);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setAllow2pc
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setAllow2pc(
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_2pc) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->allow_2pc = static_cast<bool>(jallow_2pc);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: allow2pc
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_allow2pc(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->allow_2pc);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setRowCache
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setRowCache(
+ JNIEnv*, jobject, jlong jhandle, jlong jrow_cache_handle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ auto* row_cache =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Cache>*>(
+ jrow_cache_handle);
+ opt->row_cache = *row_cache;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setWalFilter
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setWalFilter(
+ JNIEnv*, jobject, jlong jhandle, jlong jwal_filter_handle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ auto* wal_filter = reinterpret_cast<ROCKSDB_NAMESPACE::WalFilterJniCallback*>(
+ jwal_filter_handle);
+ opt->wal_filter = wal_filter;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setFailIfOptionsFileError
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setFailIfOptionsFileError(
+ JNIEnv*, jobject, jlong jhandle, jboolean jfail_if_options_file_error) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->fail_if_options_file_error =
+ static_cast<bool>(jfail_if_options_file_error);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: failIfOptionsFileError
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_failIfOptionsFileError(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->fail_if_options_file_error);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setDumpMallocStats
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setDumpMallocStats(
+ JNIEnv*, jobject, jlong jhandle, jboolean jdump_malloc_stats) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->dump_malloc_stats = static_cast<bool>(jdump_malloc_stats);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: dumpMallocStats
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_dumpMallocStats(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->dump_malloc_stats);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setAvoidFlushDuringRecovery
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setAvoidFlushDuringRecovery(
+ JNIEnv*, jobject, jlong jhandle, jboolean javoid_flush_during_recovery) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->avoid_flush_during_recovery =
+ static_cast<bool>(javoid_flush_during_recovery);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: avoidFlushDuringRecovery
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_avoidFlushDuringRecovery(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->avoid_flush_during_recovery);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setAllowIngestBehind
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setAllowIngestBehind(
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_ingest_behind) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->allow_ingest_behind = jallow_ingest_behind == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: allowIngestBehind
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_allowIngestBehind(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->allow_ingest_behind);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setPreserveDeletes
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setPreserveDeletes(
+ JNIEnv*, jobject, jlong jhandle, jboolean jpreserve_deletes) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->preserve_deletes = jpreserve_deletes == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: preserveDeletes
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_preserveDeletes(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->preserve_deletes);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setTwoWriteQueues
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setTwoWriteQueues(
+ JNIEnv*, jobject, jlong jhandle, jboolean jtwo_write_queues) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->two_write_queues = jtwo_write_queues == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: twoWriteQueues
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_twoWriteQueues(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->two_write_queues);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setManualWalFlush
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setManualWalFlush(
+ JNIEnv*, jobject, jlong jhandle, jboolean jmanual_wal_flush) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->manual_wal_flush = jmanual_wal_flush == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: manualWalFlush
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_manualWalFlush(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->manual_wal_flush);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setAtomicFlush
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setAtomicFlush(
+ JNIEnv*, jobject, jlong jhandle, jboolean jatomic_flush) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->atomic_flush = jatomic_flush == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: atomicFlush
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_atomicFlush(
+ JNIEnv *, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->atomic_flush);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setAvoidFlushDuringShutdown
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setAvoidFlushDuringShutdown(
+ JNIEnv*, jobject, jlong jhandle, jboolean javoid_flush_during_shutdown) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ opt->avoid_flush_during_shutdown =
+ static_cast<bool>(javoid_flush_during_shutdown);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: avoidFlushDuringShutdown
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_avoidFlushDuringShutdown(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->avoid_flush_during_shutdown);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// ROCKSDB_NAMESPACE::WriteOptions
+
+/*
+ * Class: org_rocksdb_WriteOptions
+ * Method: newWriteOptions
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_WriteOptions_newWriteOptions(
+ JNIEnv*, jclass) {
+ auto* op = new ROCKSDB_NAMESPACE::WriteOptions();
+ return reinterpret_cast<jlong>(op);
+}
+
+/*
+ * Class: org_rocksdb_WriteOptions
+ * Method: copyWriteOptions
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_WriteOptions_copyWriteOptions(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto new_opt = new ROCKSDB_NAMESPACE::WriteOptions(
+ *(reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jhandle)));
+ return reinterpret_cast<jlong>(new_opt);
+}
+
+/*
+ * Class: org_rocksdb_WriteOptions
+ * Method: disposeInternal
+ * Signature: ()V
+ */
+void Java_org_rocksdb_WriteOptions_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jhandle);
+ assert(write_options != nullptr);
+ delete write_options;
+}
+
+/*
+ * Class: org_rocksdb_WriteOptions
+ * Method: setSync
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_WriteOptions_setSync(
+ JNIEnv*, jobject, jlong jhandle, jboolean jflag) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jhandle)->sync = jflag;
+}
+
+/*
+ * Class: org_rocksdb_WriteOptions
+ * Method: sync
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_WriteOptions_sync(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jhandle)->sync;
+}
+
+/*
+ * Class: org_rocksdb_WriteOptions
+ * Method: setDisableWAL
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_WriteOptions_setDisableWAL(
+ JNIEnv*, jobject, jlong jhandle, jboolean jflag) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jhandle)->disableWAL =
+ jflag;
+}
+
+/*
+ * Class: org_rocksdb_WriteOptions
+ * Method: disableWAL
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_WriteOptions_disableWAL(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jhandle)
+ ->disableWAL;
+}
+
+/*
+ * Class: org_rocksdb_WriteOptions
+ * Method: setIgnoreMissingColumnFamilies
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_WriteOptions_setIgnoreMissingColumnFamilies(
+ JNIEnv*, jobject, jlong jhandle,
+ jboolean jignore_missing_column_families) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jhandle)
+ ->ignore_missing_column_families =
+ static_cast<bool>(jignore_missing_column_families);
+}
+
+/*
+ * Class: org_rocksdb_WriteOptions
+ * Method: ignoreMissingColumnFamilies
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_WriteOptions_ignoreMissingColumnFamilies(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jhandle)
+ ->ignore_missing_column_families;
+}
+
+/*
+ * Class: org_rocksdb_WriteOptions
+ * Method: setNoSlowdown
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_WriteOptions_setNoSlowdown(
+ JNIEnv*, jobject, jlong jhandle, jboolean jno_slowdown) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jhandle)->no_slowdown =
+ static_cast<bool>(jno_slowdown);
+}
+
+/*
+ * Class: org_rocksdb_WriteOptions
+ * Method: noSlowdown
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_WriteOptions_noSlowdown(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jhandle)
+ ->no_slowdown;
+}
+
+/*
+ * Class: org_rocksdb_WriteOptions
+ * Method: setLowPri
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_WriteOptions_setLowPri(
+ JNIEnv*, jobject, jlong jhandle, jboolean jlow_pri) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jhandle)->low_pri =
+ static_cast<bool>(jlow_pri);
+}
+
+/*
+ * Class: org_rocksdb_WriteOptions
+ * Method: lowPri
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_WriteOptions_lowPri(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jhandle)->low_pri;
+}
+
+/////////////////////////////////////////////////////////////////////
+// ROCKSDB_NAMESPACE::ReadOptions
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: newReadOptions
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_ReadOptions_newReadOptions__(
+ JNIEnv*, jclass) {
+ auto* read_options = new ROCKSDB_NAMESPACE::ReadOptions();
+ return reinterpret_cast<jlong>(read_options);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: newReadOptions
+ * Signature: (ZZ)J
+ */
+jlong Java_org_rocksdb_ReadOptions_newReadOptions__ZZ(
+ JNIEnv*, jclass, jboolean jverify_checksums, jboolean jfill_cache) {
+ auto* read_options = new ROCKSDB_NAMESPACE::ReadOptions(
+ static_cast<bool>(jverify_checksums), static_cast<bool>(jfill_cache));
+ return reinterpret_cast<jlong>(read_options);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: copyReadOptions
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ReadOptions_copyReadOptions(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto new_opt = new ROCKSDB_NAMESPACE::ReadOptions(
+ *(reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)));
+ return reinterpret_cast<jlong>(new_opt);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_ReadOptions_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* read_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle);
+ assert(read_options != nullptr);
+ delete read_options;
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setVerifyChecksums
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_ReadOptions_setVerifyChecksums(
+ JNIEnv*, jobject, jlong jhandle, jboolean jverify_checksums) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)->verify_checksums =
+ static_cast<bool>(jverify_checksums);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: verifyChecksums
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_ReadOptions_verifyChecksums(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)
+ ->verify_checksums;
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setFillCache
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_ReadOptions_setFillCache(
+ JNIEnv*, jobject, jlong jhandle, jboolean jfill_cache) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)->fill_cache =
+ static_cast<bool>(jfill_cache);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: fillCache
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_ReadOptions_fillCache(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)->fill_cache;
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setTailing
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_ReadOptions_setTailing(
+ JNIEnv*, jobject, jlong jhandle, jboolean jtailing) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)->tailing =
+ static_cast<bool>(jtailing);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: tailing
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_ReadOptions_tailing(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)->tailing;
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: managed
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_ReadOptions_managed(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)->managed;
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setManaged
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_ReadOptions_setManaged(
+ JNIEnv*, jobject, jlong jhandle, jboolean jmanaged) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)->managed =
+ static_cast<bool>(jmanaged);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: totalOrderSeek
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_ReadOptions_totalOrderSeek(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)
+ ->total_order_seek;
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setTotalOrderSeek
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_ReadOptions_setTotalOrderSeek(
+ JNIEnv*, jobject, jlong jhandle, jboolean jtotal_order_seek) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)->total_order_seek =
+ static_cast<bool>(jtotal_order_seek);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: prefixSameAsStart
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_ReadOptions_prefixSameAsStart(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)
+ ->prefix_same_as_start;
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setPrefixSameAsStart
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_ReadOptions_setPrefixSameAsStart(
+ JNIEnv*, jobject, jlong jhandle, jboolean jprefix_same_as_start) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)
+ ->prefix_same_as_start = static_cast<bool>(jprefix_same_as_start);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: pinData
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_ReadOptions_pinData(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)->pin_data;
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setPinData
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_ReadOptions_setPinData(
+ JNIEnv*, jobject, jlong jhandle, jboolean jpin_data) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)->pin_data =
+ static_cast<bool>(jpin_data);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: backgroundPurgeOnIteratorCleanup
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_ReadOptions_backgroundPurgeOnIteratorCleanup(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle);
+ return static_cast<jboolean>(opt->background_purge_on_iterator_cleanup);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setBackgroundPurgeOnIteratorCleanup
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_ReadOptions_setBackgroundPurgeOnIteratorCleanup(
+ JNIEnv*, jobject, jlong jhandle,
+ jboolean jbackground_purge_on_iterator_cleanup) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle);
+ opt->background_purge_on_iterator_cleanup =
+ static_cast<bool>(jbackground_purge_on_iterator_cleanup);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: readaheadSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ReadOptions_readaheadSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle);
+ return static_cast<jlong>(opt->readahead_size);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setReadaheadSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ReadOptions_setReadaheadSize(
+ JNIEnv*, jobject, jlong jhandle, jlong jreadahead_size) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle);
+ opt->readahead_size = static_cast<size_t>(jreadahead_size);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: maxSkippableInternalKeys
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ReadOptions_maxSkippableInternalKeys(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle);
+ return static_cast<jlong>(opt->max_skippable_internal_keys);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setMaxSkippableInternalKeys
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ReadOptions_setMaxSkippableInternalKeys(
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_skippable_internal_keys) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle);
+ opt->max_skippable_internal_keys =
+ static_cast<uint64_t>(jmax_skippable_internal_keys);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: ignoreRangeDeletions
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_ReadOptions_ignoreRangeDeletions(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle);
+ return static_cast<jboolean>(opt->ignore_range_deletions);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setIgnoreRangeDeletions
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_ReadOptions_setIgnoreRangeDeletions(
+ JNIEnv*, jobject, jlong jhandle, jboolean jignore_range_deletions) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle);
+ opt->ignore_range_deletions = static_cast<bool>(jignore_range_deletions);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setSnapshot
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ReadOptions_setSnapshot(
+ JNIEnv*, jobject, jlong jhandle, jlong jsnapshot) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)->snapshot =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Snapshot*>(jsnapshot);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: snapshot
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ReadOptions_snapshot(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto& snapshot =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)->snapshot;
+ return reinterpret_cast<jlong>(snapshot);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: readTier
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_ReadOptions_readTier(
+ JNIEnv*, jobject, jlong jhandle) {
+ return static_cast<jbyte>(
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)->read_tier);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setReadTier
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_ReadOptions_setReadTier(
+ JNIEnv*, jobject, jlong jhandle, jbyte jread_tier) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)->read_tier =
+ static_cast<ROCKSDB_NAMESPACE::ReadTier>(jread_tier);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setIterateUpperBound
+ * Signature: (JJ)I
+ */
+void Java_org_rocksdb_ReadOptions_setIterateUpperBound(
+ JNIEnv*, jobject, jlong jhandle, jlong jupper_bound_slice_handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)
+ ->iterate_upper_bound =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(jupper_bound_slice_handle);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: iterateUpperBound
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ReadOptions_iterateUpperBound(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto& upper_bound_slice_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)
+ ->iterate_upper_bound;
+ return reinterpret_cast<jlong>(upper_bound_slice_handle);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setIterateLowerBound
+ * Signature: (JJ)I
+ */
+void Java_org_rocksdb_ReadOptions_setIterateLowerBound(
+ JNIEnv*, jobject, jlong jhandle, jlong jlower_bound_slice_handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)
+ ->iterate_lower_bound =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(jlower_bound_slice_handle);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: iterateLowerBound
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ReadOptions_iterateLowerBound(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto& lower_bound_slice_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle)
+ ->iterate_lower_bound;
+ return reinterpret_cast<jlong>(lower_bound_slice_handle);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setTableFilter
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ReadOptions_setTableFilter(
+ JNIEnv*, jobject, jlong jhandle, jlong jjni_table_filter_handle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle);
+ auto* jni_table_filter =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TableFilterJniCallback*>(
+ jjni_table_filter_handle);
+ opt->table_filter = jni_table_filter->GetTableFilterFunction();
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setIterStartSeqnum
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ReadOptions_setIterStartSeqnum(
+ JNIEnv*, jobject, jlong jhandle, jlong jiter_start_seqnum) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle);
+ opt->iter_start_seqnum = static_cast<uint64_t>(jiter_start_seqnum);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: iterStartSeqnum
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ReadOptions_iterStartSeqnum(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jhandle);
+ return static_cast<jlong>(opt->iter_start_seqnum);
+}
+
+/////////////////////////////////////////////////////////////////////
+// ROCKSDB_NAMESPACE::ComparatorOptions
+
+/*
+ * Class: org_rocksdb_ComparatorOptions
+ * Method: newComparatorOptions
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_ComparatorOptions_newComparatorOptions(
+ JNIEnv*, jclass) {
+ auto* comparator_opt = new ROCKSDB_NAMESPACE::ComparatorJniCallbackOptions();
+ return reinterpret_cast<jlong>(comparator_opt);
+}
+
+/*
+ * Class: org_rocksdb_ComparatorOptions
+ * Method: reusedSynchronisationType
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_ComparatorOptions_reusedSynchronisationType(
+ JNIEnv *, jobject, jlong jhandle) {
+ auto* comparator_opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ComparatorJniCallbackOptions*>(
+ jhandle);
+ return ROCKSDB_NAMESPACE::ReusedSynchronisationTypeJni::
+ toJavaReusedSynchronisationType(
+ comparator_opt->reused_synchronisation_type);
+}
+
+/*
+ * Class: org_rocksdb_ComparatorOptions
+ * Method: setReusedSynchronisationType
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_ComparatorOptions_setReusedSynchronisationType(
+ JNIEnv*, jobject, jlong jhandle, jbyte jreused_synhcronisation_type) {
+ auto* comparator_opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ComparatorJniCallbackOptions*>(
+ jhandle);
+ comparator_opt->reused_synchronisation_type =
+ ROCKSDB_NAMESPACE::ReusedSynchronisationTypeJni::
+ toCppReusedSynchronisationType(jreused_synhcronisation_type);
+}
+
+/*
+ * Class: org_rocksdb_ComparatorOptions
+ * Method: useDirectBuffer
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_ComparatorOptions_useDirectBuffer(
+ JNIEnv*, jobject, jlong jhandle) {
+ return static_cast<jboolean>(
+ reinterpret_cast<ROCKSDB_NAMESPACE::ComparatorJniCallbackOptions*>(
+ jhandle)
+ ->direct_buffer);
+}
+
+/*
+ * Class: org_rocksdb_ComparatorOptions
+ * Method: setUseDirectBuffer
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_ComparatorOptions_setUseDirectBuffer(
+ JNIEnv*, jobject, jlong jhandle, jboolean jdirect_buffer) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ComparatorJniCallbackOptions*>(jhandle)
+ ->direct_buffer = jdirect_buffer == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_ComparatorOptions
+ * Method: maxReusedBufferSize
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_ComparatorOptions_maxReusedBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
+ return static_cast<jint>(
+ reinterpret_cast<ROCKSDB_NAMESPACE::ComparatorJniCallbackOptions*>(
+ jhandle)
+ ->max_reused_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_ComparatorOptions
+ * Method: setMaxReusedBufferSize
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_ComparatorOptions_setMaxReusedBufferSize(
+ JNIEnv*, jobject, jlong jhandle, jint jmax_reused_buffer_size) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::ComparatorJniCallbackOptions*>(jhandle)
+ ->max_reused_buffer_size = static_cast<int32_t>(jmax_reused_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_ComparatorOptions
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_ComparatorOptions_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* comparator_opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ComparatorJniCallbackOptions*>(
+ jhandle);
+ assert(comparator_opt != nullptr);
+ delete comparator_opt;
+}
+
+/////////////////////////////////////////////////////////////////////
+// ROCKSDB_NAMESPACE::FlushOptions
+
+/*
+ * Class: org_rocksdb_FlushOptions
+ * Method: newFlushOptions
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_FlushOptions_newFlushOptions(
+ JNIEnv*, jclass) {
+ auto* flush_opt = new ROCKSDB_NAMESPACE::FlushOptions();
+ return reinterpret_cast<jlong>(flush_opt);
+}
+
+/*
+ * Class: org_rocksdb_FlushOptions
+ * Method: setWaitForFlush
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_FlushOptions_setWaitForFlush(
+ JNIEnv*, jobject, jlong jhandle, jboolean jwait) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::FlushOptions*>(jhandle)->wait =
+ static_cast<bool>(jwait);
+}
+
+/*
+ * Class: org_rocksdb_FlushOptions
+ * Method: waitForFlush
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_FlushOptions_waitForFlush(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::FlushOptions*>(jhandle)->wait;
+}
+
+/*
+ * Class: org_rocksdb_FlushOptions
+ * Method: setAllowWriteStall
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_FlushOptions_setAllowWriteStall(
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_write_stall) {
+ auto* flush_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::FlushOptions*>(jhandle);
+ flush_options->allow_write_stall = jallow_write_stall == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_FlushOptions
+ * Method: allowWriteStall
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_FlushOptions_allowWriteStall(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* flush_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::FlushOptions*>(jhandle);
+ return static_cast<jboolean>(flush_options->allow_write_stall);
+}
+
+/*
+ * Class: org_rocksdb_FlushOptions
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_FlushOptions_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* flush_opt = reinterpret_cast<ROCKSDB_NAMESPACE::FlushOptions*>(jhandle);
+ assert(flush_opt != nullptr);
+ delete flush_opt;
+}
diff --git a/src/rocksdb/java/rocksjni/options_util.cc b/src/rocksdb/java/rocksjni/options_util.cc
new file mode 100644
index 000000000..e195adafa
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/options_util.cc
@@ -0,0 +1,134 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling C++ ROCKSDB_NAMESPACE::OptionsUtil methods from Java side.
+
+#include <jni.h>
+#include <string>
+
+#include "include/org_rocksdb_OptionsUtil.h"
+
+#include "rocksdb/db.h"
+#include "rocksdb/env.h"
+#include "rocksdb/utilities/options_util.h"
+#include "rocksjni/portal.h"
+
+void build_column_family_descriptor_list(
+ JNIEnv* env, jobject jcfds,
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyDescriptor>& cf_descs) {
+ jmethodID add_mid = ROCKSDB_NAMESPACE::ListJni::getListAddMethodId(env);
+ if (add_mid == nullptr) {
+ // exception occurred accessing method
+ return;
+ }
+
+ // Column family descriptor
+ for (ROCKSDB_NAMESPACE::ColumnFamilyDescriptor& cfd : cf_descs) {
+ // Construct a ColumnFamilyDescriptor java object
+ jobject jcfd =
+ ROCKSDB_NAMESPACE::ColumnFamilyDescriptorJni::construct(env, &cfd);
+ if (env->ExceptionCheck()) {
+ // exception occurred constructing object
+ if (jcfd != nullptr) {
+ env->DeleteLocalRef(jcfd);
+ }
+ return;
+ }
+
+ // Add the object to java list.
+ jboolean rs = env->CallBooleanMethod(jcfds, add_mid, jcfd);
+ if (env->ExceptionCheck() || rs == JNI_FALSE) {
+ // exception occurred calling method, or could not add
+ if (jcfd != nullptr) {
+ env->DeleteLocalRef(jcfd);
+ }
+ return;
+ }
+ }
+}
+
+/*
+ * Class: org_rocksdb_OptionsUtil
+ * Method: loadLatestOptions
+ * Signature: (Ljava/lang/String;JLjava/util/List;Z)V
+ */
+void Java_org_rocksdb_OptionsUtil_loadLatestOptions(
+ JNIEnv* env, jclass /*jcls*/, jstring jdbpath, jlong jenv_handle,
+ jlong jdb_opts_handle, jobject jcfds, jboolean ignore_unknown_options) {
+ jboolean has_exception = JNI_FALSE;
+ auto db_path =
+ ROCKSDB_NAMESPACE::JniUtil::copyStdString(env, jdbpath, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return;
+ }
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyDescriptor> cf_descs;
+ ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::LoadLatestOptions(
+ db_path, reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jenv_handle),
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jdb_opts_handle),
+ &cf_descs, ignore_unknown_options);
+ if (!s.ok()) {
+ // error, raise an exception
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ } else {
+ build_column_family_descriptor_list(env, jcfds, cf_descs);
+ }
+}
+
+/*
+ * Class: org_rocksdb_OptionsUtil
+ * Method: loadOptionsFromFile
+ * Signature: (Ljava/lang/String;JJLjava/util/List;Z)V
+ */
+void Java_org_rocksdb_OptionsUtil_loadOptionsFromFile(
+ JNIEnv* env, jclass /*jcls*/, jstring jopts_file_name, jlong jenv_handle,
+ jlong jdb_opts_handle, jobject jcfds, jboolean ignore_unknown_options) {
+ jboolean has_exception = JNI_FALSE;
+ auto opts_file_name = ROCKSDB_NAMESPACE::JniUtil::copyStdString(
+ env, jopts_file_name, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return;
+ }
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyDescriptor> cf_descs;
+ ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::LoadOptionsFromFile(
+ opts_file_name, reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jenv_handle),
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jdb_opts_handle),
+ &cf_descs, ignore_unknown_options);
+ if (!s.ok()) {
+ // error, raise an exception
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ } else {
+ build_column_family_descriptor_list(env, jcfds, cf_descs);
+ }
+}
+
+/*
+ * Class: org_rocksdb_OptionsUtil
+ * Method: getLatestOptionsFileName
+ * Signature: (Ljava/lang/String;J)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_OptionsUtil_getLatestOptionsFileName(
+ JNIEnv* env, jclass /*jcls*/, jstring jdbpath, jlong jenv_handle) {
+ jboolean has_exception = JNI_FALSE;
+ auto db_path =
+ ROCKSDB_NAMESPACE::JniUtil::copyStdString(env, jdbpath, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return nullptr;
+ }
+ std::string options_file_name;
+ ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::GetLatestOptionsFileName(
+ db_path, reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jenv_handle),
+ &options_file_name);
+ if (!s.ok()) {
+ // error, raise an exception
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ } else {
+ return env->NewStringUTF(options_file_name.c_str());
+ }
+}
diff --git a/src/rocksdb/java/rocksjni/persistent_cache.cc b/src/rocksdb/java/rocksjni/persistent_cache.cc
new file mode 100644
index 000000000..6776022e8
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/persistent_cache.cc
@@ -0,0 +1,57 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::PersistentCache.
+
+#include <jni.h>
+#include <string>
+
+#include "include/org_rocksdb_PersistentCache.h"
+#include "rocksdb/persistent_cache.h"
+#include "loggerjnicallback.h"
+#include "portal.h"
+
+/*
+ * Class: org_rocksdb_PersistentCache
+ * Method: newPersistentCache
+ * Signature: (JLjava/lang/String;JJZ)J
+ */
+jlong Java_org_rocksdb_PersistentCache_newPersistentCache(
+ JNIEnv* env, jclass, jlong jenv_handle, jstring jpath,
+ jlong jsz, jlong jlogger_handle, jboolean joptimized_for_nvm) {
+ auto* rocks_env = reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jenv_handle);
+ jboolean has_exception = JNI_FALSE;
+ std::string path =
+ ROCKSDB_NAMESPACE::JniUtil::copyStdString(env, jpath, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ return 0;
+ }
+ auto* logger =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::LoggerJniCallback>*>(
+ jlogger_handle);
+ auto* cache =
+ new std::shared_ptr<ROCKSDB_NAMESPACE::PersistentCache>(nullptr);
+ ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::NewPersistentCache(
+ rocks_env, path, static_cast<uint64_t>(jsz), *logger,
+ static_cast<bool>(joptimized_for_nvm), cache);
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+ return reinterpret_cast<jlong>(cache);
+}
+
+/*
+ * Class: org_rocksdb_PersistentCache
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_PersistentCache_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* cache =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::PersistentCache>*>(
+ jhandle);
+ delete cache; // delete std::shared_ptr
+}
diff --git a/src/rocksdb/java/rocksjni/portal.h b/src/rocksdb/java/rocksjni/portal.h
new file mode 100644
index 000000000..deb88af45
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/portal.h
@@ -0,0 +1,7534 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+// This file is designed for caching those frequently used IDs and provide
+// efficient portal (i.e, a set of static functions) to access java code
+// from c++.
+
+#ifndef JAVA_ROCKSJNI_PORTAL_H_
+#define JAVA_ROCKSJNI_PORTAL_H_
+
+#include <algorithm>
+#include <cstring>
+#include <functional>
+#include <iostream>
+#include <iterator>
+#include <jni.h>
+#include <limits>
+#include <memory>
+#include <string>
+#include <type_traits>
+#include <vector>
+
+#include "rocksdb/db.h"
+#include "rocksdb/filter_policy.h"
+#include "rocksdb/rate_limiter.h"
+#include "rocksdb/status.h"
+#include "rocksdb/table.h"
+#include "rocksdb/utilities/backupable_db.h"
+#include "rocksdb/utilities/memory_util.h"
+#include "rocksdb/utilities/transaction_db.h"
+#include "rocksdb/utilities/write_batch_with_index.h"
+#include "rocksjni/compaction_filter_factory_jnicallback.h"
+#include "rocksjni/comparatorjnicallback.h"
+#include "rocksjni/loggerjnicallback.h"
+#include "rocksjni/table_filter_jnicallback.h"
+#include "rocksjni/trace_writer_jnicallback.h"
+#include "rocksjni/transaction_notifier_jnicallback.h"
+#include "rocksjni/wal_filter_jnicallback.h"
+#include "rocksjni/writebatchhandlerjnicallback.h"
+
+// Remove macro on windows
+#ifdef DELETE
+#undef DELETE
+#endif
+
+namespace ROCKSDB_NAMESPACE {
+
+class JavaClass {
+ public:
+ /**
+ * Gets and initializes a Java Class
+ *
+ * @param env A pointer to the Java environment
+ * @param jclazz_name The fully qualified JNI name of the Java Class
+ * e.g. "java/lang/String"
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env, const char* jclazz_name) {
+ jclass jclazz = env->FindClass(jclazz_name);
+ assert(jclazz != nullptr);
+ return jclazz;
+ }
+};
+
+// Native class template
+template<class PTR, class DERIVED> class RocksDBNativeClass : public JavaClass {
+};
+
+// Native class template for sub-classes of RocksMutableObject
+template<class PTR, class DERIVED> class NativeRocksMutableObject
+ : public RocksDBNativeClass<PTR, DERIVED> {
+ public:
+
+ /**
+ * Gets the Java Method ID for the
+ * RocksMutableObject#setNativeHandle(long, boolean) method
+ *
+ * @param env A pointer to the Java environment
+ * @return The Java Method ID or nullptr the RocksMutableObject class cannot
+ * be accessed, or if one of the NoSuchMethodError,
+ * ExceptionInInitializerError or OutOfMemoryError exceptions is thrown
+ */
+ static jmethodID getSetNativeHandleMethod(JNIEnv* env) {
+ static jclass jclazz = DERIVED::getJClass(env);
+ if(jclazz == nullptr) {
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(
+ jclazz, "setNativeHandle", "(JZ)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Sets the C++ object pointer handle in the Java object
+ *
+ * @param env A pointer to the Java environment
+ * @param jobj The Java object on which to set the pointer handle
+ * @param ptr The C++ object pointer
+ * @param java_owns_handle JNI_TRUE if ownership of the C++ object is
+ * managed by the Java object
+ *
+ * @return true if a Java exception is pending, false otherwise
+ */
+ static bool setHandle(JNIEnv* env, jobject jobj, PTR ptr,
+ jboolean java_owns_handle) {
+ assert(jobj != nullptr);
+ static jmethodID mid = getSetNativeHandleMethod(env);
+ if(mid == nullptr) {
+ return true; // signal exception
+ }
+
+ env->CallVoidMethod(jobj, mid, reinterpret_cast<jlong>(ptr), java_owns_handle);
+ if(env->ExceptionCheck()) {
+ return true; // signal exception
+ }
+
+ return false;
+ }
+};
+
+// Java Exception template
+template<class DERIVED> class JavaException : public JavaClass {
+ public:
+ /**
+ * Create and throw a java exception with the provided message
+ *
+ * @param env A pointer to the Java environment
+ * @param msg The message for the exception
+ *
+ * @return true if an exception was thrown, false otherwise
+ */
+ static bool ThrowNew(JNIEnv* env, const std::string& msg) {
+ jclass jclazz = DERIVED::getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ std::cerr << "JavaException::ThrowNew - Error: unexpected exception!" << std::endl;
+ return env->ExceptionCheck();
+ }
+
+ const jint rs = env->ThrowNew(jclazz, msg.c_str());
+ if(rs != JNI_OK) {
+ // exception could not be thrown
+ std::cerr << "JavaException::ThrowNew - Fatal: could not throw exception!" << std::endl;
+ return env->ExceptionCheck();
+ }
+
+ return true;
+ }
+};
+
+// The portal class for java.lang.IllegalArgumentException
+class IllegalArgumentExceptionJni :
+ public JavaException<IllegalArgumentExceptionJni> {
+ public:
+ /**
+ * Get the Java Class java.lang.IllegalArgumentException
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaException::getJClass(env, "java/lang/IllegalArgumentException");
+ }
+
+ /**
+ * Create and throw a Java IllegalArgumentException with the provided status
+ *
+ * If s.ok() == true, then this function will not throw any exception.
+ *
+ * @param env A pointer to the Java environment
+ * @param s The status for the exception
+ *
+ * @return true if an exception was thrown, false otherwise
+ */
+ static bool ThrowNew(JNIEnv* env, const Status& s) {
+ assert(!s.ok());
+ if (s.ok()) {
+ return false;
+ }
+
+ // get the IllegalArgumentException class
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ std::cerr << "IllegalArgumentExceptionJni::ThrowNew/class - Error: unexpected exception!" << std::endl;
+ return env->ExceptionCheck();
+ }
+
+ return JavaException::ThrowNew(env, s.ToString());
+ }
+};
+
+// The portal class for org.rocksdb.Status.Code
+class CodeJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.Status.Code
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/Status$Code");
+ }
+
+ /**
+ * Get the Java Method: Status.Code#getValue
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getValueMethod(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetMethodID(jclazz, "getValue", "()b");
+ assert(mid != nullptr);
+ return mid;
+ }
+};
+
+// The portal class for org.rocksdb.Status.SubCode
+class SubCodeJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.Status.SubCode
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/Status$SubCode");
+ }
+
+ /**
+ * Get the Java Method: Status.SubCode#getValue
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getValueMethod(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetMethodID(jclazz, "getValue", "()b");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ static ROCKSDB_NAMESPACE::Status::SubCode toCppSubCode(
+ const jbyte jsub_code) {
+ switch (jsub_code) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::Status::SubCode::kNone;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::Status::SubCode::kMutexTimeout;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::Status::SubCode::kLockTimeout;
+ case 0x3:
+ return ROCKSDB_NAMESPACE::Status::SubCode::kLockLimit;
+ case 0x4:
+ return ROCKSDB_NAMESPACE::Status::SubCode::kNoSpace;
+ case 0x5:
+ return ROCKSDB_NAMESPACE::Status::SubCode::kDeadlock;
+ case 0x6:
+ return ROCKSDB_NAMESPACE::Status::SubCode::kStaleFile;
+ case 0x7:
+ return ROCKSDB_NAMESPACE::Status::SubCode::kMemoryLimit;
+
+ case 0x7F:
+ default:
+ return ROCKSDB_NAMESPACE::Status::SubCode::kNone;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.Status
+class StatusJni
+ : public RocksDBNativeClass<ROCKSDB_NAMESPACE::Status*, StatusJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.Status
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/Status");
+ }
+
+ /**
+ * Get the Java Method: Status#getCode
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getCodeMethod(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetMethodID(jclazz, "getCode", "()Lorg/rocksdb/Status$Code;");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: Status#getSubCode
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getSubCodeMethod(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetMethodID(jclazz, "getSubCode", "()Lorg/rocksdb/Status$SubCode;");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: Status#getState
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getStateMethod(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetMethodID(jclazz, "getState", "()Ljava/lang/String;");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Create a new Java org.rocksdb.Status object with the same properties as
+ * the provided C++ ROCKSDB_NAMESPACE::Status object
+ *
+ * @param env A pointer to the Java environment
+ * @param status The ROCKSDB_NAMESPACE::Status object
+ *
+ * @return A reference to a Java org.rocksdb.Status object, or nullptr
+ * if an an exception occurs
+ */
+ static jobject construct(JNIEnv* env, const Status& status) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid =
+ env->GetMethodID(jclazz, "<init>", "(BBLjava/lang/String;)V");
+ if(mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ // convert the Status state for Java
+ jstring jstate = nullptr;
+ if (status.getState() != nullptr) {
+ const char* const state = status.getState();
+ jstate = env->NewStringUTF(state);
+ if(env->ExceptionCheck()) {
+ if(jstate != nullptr) {
+ env->DeleteLocalRef(jstate);
+ }
+ return nullptr;
+ }
+ }
+
+ jobject jstatus =
+ env->NewObject(jclazz, mid, toJavaStatusCode(status.code()),
+ toJavaStatusSubCode(status.subcode()), jstate);
+ if(env->ExceptionCheck()) {
+ // exception occurred
+ if(jstate != nullptr) {
+ env->DeleteLocalRef(jstate);
+ }
+ return nullptr;
+ }
+
+ if(jstate != nullptr) {
+ env->DeleteLocalRef(jstate);
+ }
+
+ return jstatus;
+ }
+
+ // Returns the equivalent org.rocksdb.Status.Code for the provided
+ // C++ ROCKSDB_NAMESPACE::Status::Code enum
+ static jbyte toJavaStatusCode(const ROCKSDB_NAMESPACE::Status::Code& code) {
+ switch (code) {
+ case ROCKSDB_NAMESPACE::Status::Code::kOk:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::Status::Code::kNotFound:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::Status::Code::kCorruption:
+ return 0x2;
+ case ROCKSDB_NAMESPACE::Status::Code::kNotSupported:
+ return 0x3;
+ case ROCKSDB_NAMESPACE::Status::Code::kInvalidArgument:
+ return 0x4;
+ case ROCKSDB_NAMESPACE::Status::Code::kIOError:
+ return 0x5;
+ case ROCKSDB_NAMESPACE::Status::Code::kMergeInProgress:
+ return 0x6;
+ case ROCKSDB_NAMESPACE::Status::Code::kIncomplete:
+ return 0x7;
+ case ROCKSDB_NAMESPACE::Status::Code::kShutdownInProgress:
+ return 0x8;
+ case ROCKSDB_NAMESPACE::Status::Code::kTimedOut:
+ return 0x9;
+ case ROCKSDB_NAMESPACE::Status::Code::kAborted:
+ return 0xA;
+ case ROCKSDB_NAMESPACE::Status::Code::kBusy:
+ return 0xB;
+ case ROCKSDB_NAMESPACE::Status::Code::kExpired:
+ return 0xC;
+ case ROCKSDB_NAMESPACE::Status::Code::kTryAgain:
+ return 0xD;
+ case ROCKSDB_NAMESPACE::Status::Code::kColumnFamilyDropped:
+ return 0xE;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ // Returns the equivalent org.rocksdb.Status.SubCode for the provided
+ // C++ ROCKSDB_NAMESPACE::Status::SubCode enum
+ static jbyte toJavaStatusSubCode(
+ const ROCKSDB_NAMESPACE::Status::SubCode& subCode) {
+ switch (subCode) {
+ case ROCKSDB_NAMESPACE::Status::SubCode::kNone:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::Status::SubCode::kMutexTimeout:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::Status::SubCode::kLockTimeout:
+ return 0x2;
+ case ROCKSDB_NAMESPACE::Status::SubCode::kLockLimit:
+ return 0x3;
+ case ROCKSDB_NAMESPACE::Status::SubCode::kNoSpace:
+ return 0x4;
+ case ROCKSDB_NAMESPACE::Status::SubCode::kDeadlock:
+ return 0x5;
+ case ROCKSDB_NAMESPACE::Status::SubCode::kStaleFile:
+ return 0x6;
+ case ROCKSDB_NAMESPACE::Status::SubCode::kMemoryLimit:
+ return 0x7;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ static std::unique_ptr<ROCKSDB_NAMESPACE::Status> toCppStatus(
+ const jbyte jcode_value, const jbyte jsub_code_value) {
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status;
+ switch (jcode_value) {
+ case 0x0:
+ //Ok
+ status = std::unique_ptr<ROCKSDB_NAMESPACE::Status>(
+ new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::OK()));
+ break;
+ case 0x1:
+ //NotFound
+ status = std::unique_ptr<ROCKSDB_NAMESPACE::Status>(
+ new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::NotFound(
+ ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0x2:
+ //Corruption
+ status = std::unique_ptr<ROCKSDB_NAMESPACE::Status>(
+ new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::Corruption(
+ ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0x3:
+ //NotSupported
+ status = std::unique_ptr<ROCKSDB_NAMESPACE::Status>(
+ new ROCKSDB_NAMESPACE::Status(
+ ROCKSDB_NAMESPACE::Status::NotSupported(
+ ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(
+ jsub_code_value))));
+ break;
+ case 0x4:
+ //InvalidArgument
+ status = std::unique_ptr<ROCKSDB_NAMESPACE::Status>(
+ new ROCKSDB_NAMESPACE::Status(
+ ROCKSDB_NAMESPACE::Status::InvalidArgument(
+ ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(
+ jsub_code_value))));
+ break;
+ case 0x5:
+ //IOError
+ status = std::unique_ptr<ROCKSDB_NAMESPACE::Status>(
+ new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::IOError(
+ ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0x6:
+ //MergeInProgress
+ status = std::unique_ptr<ROCKSDB_NAMESPACE::Status>(
+ new ROCKSDB_NAMESPACE::Status(
+ ROCKSDB_NAMESPACE::Status::MergeInProgress(
+ ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(
+ jsub_code_value))));
+ break;
+ case 0x7:
+ //Incomplete
+ status = std::unique_ptr<ROCKSDB_NAMESPACE::Status>(
+ new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::Incomplete(
+ ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0x8:
+ //ShutdownInProgress
+ status = std::unique_ptr<ROCKSDB_NAMESPACE::Status>(
+ new ROCKSDB_NAMESPACE::Status(
+ ROCKSDB_NAMESPACE::Status::ShutdownInProgress(
+ ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(
+ jsub_code_value))));
+ break;
+ case 0x9:
+ //TimedOut
+ status = std::unique_ptr<ROCKSDB_NAMESPACE::Status>(
+ new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::TimedOut(
+ ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0xA:
+ //Aborted
+ status = std::unique_ptr<ROCKSDB_NAMESPACE::Status>(
+ new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::Aborted(
+ ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0xB:
+ //Busy
+ status = std::unique_ptr<ROCKSDB_NAMESPACE::Status>(
+ new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::Busy(
+ ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0xC:
+ //Expired
+ status = std::unique_ptr<ROCKSDB_NAMESPACE::Status>(
+ new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::Expired(
+ ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0xD:
+ //TryAgain
+ status = std::unique_ptr<ROCKSDB_NAMESPACE::Status>(
+ new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::TryAgain(
+ ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0xE:
+ // ColumnFamilyDropped
+ status = std::unique_ptr<ROCKSDB_NAMESPACE::Status>(
+ new ROCKSDB_NAMESPACE::Status(
+ ROCKSDB_NAMESPACE::Status::ColumnFamilyDropped(
+ ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(
+ jsub_code_value))));
+ break;
+ case 0x7F:
+ default:
+ return nullptr;
+ }
+ return status;
+ }
+
+ // Returns the equivalent ROCKSDB_NAMESPACE::Status for the Java
+ // org.rocksdb.Status
+ static std::unique_ptr<ROCKSDB_NAMESPACE::Status> toCppStatus(
+ JNIEnv* env, const jobject jstatus) {
+ jmethodID mid_code = getCodeMethod(env);
+ if (mid_code == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+ jobject jcode = env->CallObjectMethod(jstatus, mid_code);
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ return nullptr;
+ }
+
+ jmethodID mid_code_value = ROCKSDB_NAMESPACE::CodeJni::getValueMethod(env);
+ if (mid_code_value == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+ jbyte jcode_value = env->CallByteMethod(jcode, mid_code_value);
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ if (jcode != nullptr) {
+ env->DeleteLocalRef(jcode);
+ }
+ return nullptr;
+ }
+
+ jmethodID mid_subCode = getSubCodeMethod(env);
+ if (mid_subCode == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+ jobject jsubCode = env->CallObjectMethod(jstatus, mid_subCode);
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ if (jcode != nullptr) {
+ env->DeleteLocalRef(jcode);
+ }
+ return nullptr;
+ }
+
+ jbyte jsub_code_value = 0x0; // None
+ if (jsubCode != nullptr) {
+ jmethodID mid_subCode_value =
+ ROCKSDB_NAMESPACE::SubCodeJni::getValueMethod(env);
+ if (mid_subCode_value == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+ jsub_code_value = env->CallByteMethod(jsubCode, mid_subCode_value);
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ if (jcode != nullptr) {
+ env->DeleteLocalRef(jcode);
+ }
+ return nullptr;
+ }
+ }
+
+ jmethodID mid_state = getStateMethod(env);
+ if (mid_state == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+ jobject jstate = env->CallObjectMethod(jstatus, mid_state);
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ if (jsubCode != nullptr) {
+ env->DeleteLocalRef(jsubCode);
+ }
+ if (jcode != nullptr) {
+ env->DeleteLocalRef(jcode);
+ }
+ return nullptr;
+ }
+
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ toCppStatus(jcode_value, jsub_code_value);
+
+ // delete all local refs
+ if (jstate != nullptr) {
+ env->DeleteLocalRef(jstate);
+ }
+ if (jsubCode != nullptr) {
+ env->DeleteLocalRef(jsubCode);
+ }
+ if (jcode != nullptr) {
+ env->DeleteLocalRef(jcode);
+ }
+
+ return status;
+ }
+};
+
+// The portal class for org.rocksdb.RocksDBException
+class RocksDBExceptionJni :
+ public JavaException<RocksDBExceptionJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.RocksDBException
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaException::getJClass(env, "org/rocksdb/RocksDBException");
+ }
+
+ /**
+ * Create and throw a Java RocksDBException with the provided message
+ *
+ * @param env A pointer to the Java environment
+ * @param msg The message for the exception
+ *
+ * @return true if an exception was thrown, false otherwise
+ */
+ static bool ThrowNew(JNIEnv* env, const std::string& msg) {
+ return JavaException::ThrowNew(env, msg);
+ }
+
+ /**
+ * Create and throw a Java RocksDBException with the provided status
+ *
+ * If s->ok() == true, then this function will not throw any exception.
+ *
+ * @param env A pointer to the Java environment
+ * @param s The status for the exception
+ *
+ * @return true if an exception was thrown, false otherwise
+ */
+ static bool ThrowNew(JNIEnv* env, std::unique_ptr<Status>& s) {
+ return ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, *(s.get()));
+ }
+
+ /**
+ * Create and throw a Java RocksDBException with the provided status
+ *
+ * If s.ok() == true, then this function will not throw any exception.
+ *
+ * @param env A pointer to the Java environment
+ * @param s The status for the exception
+ *
+ * @return true if an exception was thrown, false otherwise
+ */
+ static bool ThrowNew(JNIEnv* env, const Status& s) {
+ if (s.ok()) {
+ return false;
+ }
+
+ // get the RocksDBException class
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ std::cerr << "RocksDBExceptionJni::ThrowNew/class - Error: unexpected exception!" << std::endl;
+ return env->ExceptionCheck();
+ }
+
+ // get the constructor of org.rocksdb.RocksDBException
+ jmethodID mid =
+ env->GetMethodID(jclazz, "<init>", "(Lorg/rocksdb/Status;)V");
+ if(mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ std::cerr << "RocksDBExceptionJni::ThrowNew/cstr - Error: unexpected exception!" << std::endl;
+ return env->ExceptionCheck();
+ }
+
+ // get the Java status object
+ jobject jstatus = StatusJni::construct(env, s);
+ if(jstatus == nullptr) {
+ // exception occcurred
+ std::cerr << "RocksDBExceptionJni::ThrowNew/StatusJni - Error: unexpected exception!" << std::endl;
+ return env->ExceptionCheck();
+ }
+
+ // construct the RocksDBException
+ jthrowable rocksdb_exception = reinterpret_cast<jthrowable>(env->NewObject(jclazz, mid, jstatus));
+ if(env->ExceptionCheck()) {
+ if(jstatus != nullptr) {
+ env->DeleteLocalRef(jstatus);
+ }
+ if(rocksdb_exception != nullptr) {
+ env->DeleteLocalRef(rocksdb_exception);
+ }
+ std::cerr << "RocksDBExceptionJni::ThrowNew/NewObject - Error: unexpected exception!" << std::endl;
+ return true;
+ }
+
+ // throw the RocksDBException
+ const jint rs = env->Throw(rocksdb_exception);
+ if(rs != JNI_OK) {
+ // exception could not be thrown
+ std::cerr << "RocksDBExceptionJni::ThrowNew - Fatal: could not throw exception!" << std::endl;
+ if(jstatus != nullptr) {
+ env->DeleteLocalRef(jstatus);
+ }
+ if(rocksdb_exception != nullptr) {
+ env->DeleteLocalRef(rocksdb_exception);
+ }
+ return env->ExceptionCheck();
+ }
+
+ if(jstatus != nullptr) {
+ env->DeleteLocalRef(jstatus);
+ }
+ if(rocksdb_exception != nullptr) {
+ env->DeleteLocalRef(rocksdb_exception);
+ }
+
+ return true;
+ }
+
+ /**
+ * Create and throw a Java RocksDBException with the provided message
+ * and status
+ *
+ * If s.ok() == true, then this function will not throw any exception.
+ *
+ * @param env A pointer to the Java environment
+ * @param msg The message for the exception
+ * @param s The status for the exception
+ *
+ * @return true if an exception was thrown, false otherwise
+ */
+ static bool ThrowNew(JNIEnv* env, const std::string& msg, const Status& s) {
+ assert(!s.ok());
+ if (s.ok()) {
+ return false;
+ }
+
+ // get the RocksDBException class
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ std::cerr << "RocksDBExceptionJni::ThrowNew/class - Error: unexpected exception!" << std::endl;
+ return env->ExceptionCheck();
+ }
+
+ // get the constructor of org.rocksdb.RocksDBException
+ jmethodID mid =
+ env->GetMethodID(jclazz, "<init>", "(Ljava/lang/String;Lorg/rocksdb/Status;)V");
+ if(mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ std::cerr << "RocksDBExceptionJni::ThrowNew/cstr - Error: unexpected exception!" << std::endl;
+ return env->ExceptionCheck();
+ }
+
+ jstring jmsg = env->NewStringUTF(msg.c_str());
+ if(jmsg == nullptr) {
+ // exception thrown: OutOfMemoryError
+ std::cerr << "RocksDBExceptionJni::ThrowNew/msg - Error: unexpected exception!" << std::endl;
+ return env->ExceptionCheck();
+ }
+
+ // get the Java status object
+ jobject jstatus = StatusJni::construct(env, s);
+ if(jstatus == nullptr) {
+ // exception occcurred
+ std::cerr << "RocksDBExceptionJni::ThrowNew/StatusJni - Error: unexpected exception!" << std::endl;
+ if(jmsg != nullptr) {
+ env->DeleteLocalRef(jmsg);
+ }
+ return env->ExceptionCheck();
+ }
+
+ // construct the RocksDBException
+ jthrowable rocksdb_exception = reinterpret_cast<jthrowable>(env->NewObject(jclazz, mid, jmsg, jstatus));
+ if(env->ExceptionCheck()) {
+ if(jstatus != nullptr) {
+ env->DeleteLocalRef(jstatus);
+ }
+ if(jmsg != nullptr) {
+ env->DeleteLocalRef(jmsg);
+ }
+ if(rocksdb_exception != nullptr) {
+ env->DeleteLocalRef(rocksdb_exception);
+ }
+ std::cerr << "RocksDBExceptionJni::ThrowNew/NewObject - Error: unexpected exception!" << std::endl;
+ return true;
+ }
+
+ // throw the RocksDBException
+ const jint rs = env->Throw(rocksdb_exception);
+ if(rs != JNI_OK) {
+ // exception could not be thrown
+ std::cerr << "RocksDBExceptionJni::ThrowNew - Fatal: could not throw exception!" << std::endl;
+ if(jstatus != nullptr) {
+ env->DeleteLocalRef(jstatus);
+ }
+ if(jmsg != nullptr) {
+ env->DeleteLocalRef(jmsg);
+ }
+ if(rocksdb_exception != nullptr) {
+ env->DeleteLocalRef(rocksdb_exception);
+ }
+ return env->ExceptionCheck();
+ }
+
+ if(jstatus != nullptr) {
+ env->DeleteLocalRef(jstatus);
+ }
+ if(jmsg != nullptr) {
+ env->DeleteLocalRef(jmsg);
+ }
+ if(rocksdb_exception != nullptr) {
+ env->DeleteLocalRef(rocksdb_exception);
+ }
+
+ return true;
+ }
+
+ /**
+ * Get the Java Method: RocksDBException#getStatus
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getStatusMethod(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetMethodID(jclazz, "getStatus", "()Lorg/rocksdb/Status;");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ static std::unique_ptr<ROCKSDB_NAMESPACE::Status> toCppStatus(
+ JNIEnv* env, jthrowable jrocksdb_exception) {
+ if(!env->IsInstanceOf(jrocksdb_exception, getJClass(env))) {
+ // not an instance of RocksDBException
+ return nullptr;
+ }
+
+ // get the java status object
+ jmethodID mid = getStatusMethod(env);
+ if(mid == nullptr) {
+ // exception occurred accessing class or method
+ return nullptr;
+ }
+
+ jobject jstatus = env->CallObjectMethod(jrocksdb_exception, mid);
+ if(env->ExceptionCheck()) {
+ // exception occurred
+ return nullptr;
+ }
+
+ if(jstatus == nullptr) {
+ return nullptr; // no status available
+ }
+
+ return ROCKSDB_NAMESPACE::StatusJni::toCppStatus(env, jstatus);
+ }
+};
+
+// The portal class for java.util.List
+class ListJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class java.util.List
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getListClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "java/util/List");
+ }
+
+ /**
+ * Get the Java Class java.util.ArrayList
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getArrayListClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "java/util/ArrayList");
+ }
+
+ /**
+ * Get the Java Class java.util.Iterator
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getIteratorClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "java/util/Iterator");
+ }
+
+ /**
+ * Get the Java Method: List#iterator
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getIteratorMethod(JNIEnv* env) {
+ jclass jlist_clazz = getListClass(env);
+ if(jlist_clazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetMethodID(jlist_clazz, "iterator", "()Ljava/util/Iterator;");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: Iterator#hasNext
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getHasNextMethod(JNIEnv* env) {
+ jclass jiterator_clazz = getIteratorClass(env);
+ if(jiterator_clazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jiterator_clazz, "hasNext", "()Z");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: Iterator#next
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getNextMethod(JNIEnv* env) {
+ jclass jiterator_clazz = getIteratorClass(env);
+ if(jiterator_clazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetMethodID(jiterator_clazz, "next", "()Ljava/lang/Object;");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: ArrayList constructor
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getArrayListConstructorMethodId(JNIEnv* env) {
+ jclass jarray_list_clazz = getArrayListClass(env);
+ if(jarray_list_clazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+ static jmethodID mid =
+ env->GetMethodID(jarray_list_clazz, "<init>", "(I)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: List#add
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getListAddMethodId(JNIEnv* env) {
+ jclass jlist_clazz = getListClass(env);
+ if(jlist_clazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetMethodID(jlist_clazz, "add", "(Ljava/lang/Object;)Z");
+ assert(mid != nullptr);
+ return mid;
+ }
+};
+
+// The portal class for java.lang.Byte
+class ByteJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class java.lang.Byte
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "java/lang/Byte");
+ }
+
+ /**
+ * Get the Java Class byte[]
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getArrayJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "[B");
+ }
+
+ /**
+ * Creates a new 2-dimensional Java Byte Array byte[][]
+ *
+ * @param env A pointer to the Java environment
+ * @param len The size of the first dimension
+ *
+ * @return A reference to the Java byte[][] or nullptr if an exception occurs
+ */
+ static jobjectArray new2dByteArray(JNIEnv* env, const jsize len) {
+ jclass clazz = getArrayJClass(env);
+ if(clazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ return env->NewObjectArray(len, clazz, nullptr);
+ }
+
+ /**
+ * Get the Java Method: Byte#byteValue
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retrieved
+ */
+ static jmethodID getByteValueMethod(JNIEnv* env) {
+ jclass clazz = getJClass(env);
+ if(clazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(clazz, "byteValue", "()B");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Calls the Java Method: Byte#valueOf, returning a constructed Byte jobject
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return A constructing Byte object or nullptr if the class or method id could not
+ * be retrieved, or an exception occurred
+ */
+ static jobject valueOf(JNIEnv* env, jbyte jprimitive_byte) {
+ jclass clazz = getJClass(env);
+ if (clazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetStaticMethodID(clazz, "valueOf", "(B)Ljava/lang/Byte;");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ const jobject jbyte_obj =
+ env->CallStaticObjectMethod(clazz, mid, jprimitive_byte);
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ return nullptr;
+ }
+
+ return jbyte_obj;
+ }
+
+};
+
+// The portal class for java.nio.ByteBuffer
+class ByteBufferJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class java.nio.ByteBuffer
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "java/nio/ByteBuffer");
+ }
+
+ /**
+ * Get the Java Method: ByteBuffer#allocate
+ *
+ * @param env A pointer to the Java environment
+ * @param jbytebuffer_clazz if you have a reference to a ByteBuffer class, or nullptr
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getAllocateMethodId(JNIEnv* env,
+ jclass jbytebuffer_clazz = nullptr) {
+ const jclass jclazz =
+ jbytebuffer_clazz == nullptr ? getJClass(env) : jbytebuffer_clazz;
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetStaticMethodID(
+ jclazz, "allocate", "(I)Ljava/nio/ByteBuffer;");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: ByteBuffer#array
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getArrayMethodId(JNIEnv* env,
+ jclass jbytebuffer_clazz = nullptr) {
+ const jclass jclazz =
+ jbytebuffer_clazz == nullptr ? getJClass(env) : jbytebuffer_clazz;
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "array", "()[B");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ static jobject construct(
+ JNIEnv* env, const bool direct, const size_t capacity,
+ jclass jbytebuffer_clazz = nullptr) {
+ return constructWith(env, direct, nullptr, capacity, jbytebuffer_clazz);
+ }
+
+ static jobject constructWith(
+ JNIEnv* env, const bool direct, const char* buf, const size_t capacity,
+ jclass jbytebuffer_clazz = nullptr) {
+ if (direct) {
+ bool allocated = false;
+ if (buf == nullptr) {
+ buf = new char[capacity];
+ allocated = true;
+ }
+ jobject jbuf = env->NewDirectByteBuffer(const_cast<char*>(buf), static_cast<jlong>(capacity));
+ if (jbuf == nullptr) {
+ // exception occurred
+ if (allocated) {
+ delete[] static_cast<const char*>(buf);
+ }
+ return nullptr;
+ }
+ return jbuf;
+ } else {
+ const jclass jclazz =
+ jbytebuffer_clazz == nullptr ? getJClass(env) : jbytebuffer_clazz;
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+ const jmethodID jmid_allocate = getAllocateMethodId(env, jbytebuffer_clazz);
+ if (jmid_allocate == nullptr) {
+ // exception occurred accessing class, or NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+ const jobject jbuf = env->CallStaticObjectMethod(
+ jclazz, jmid_allocate, static_cast<jint>(capacity));
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ return nullptr;
+ }
+
+ // set buffer data?
+ if (buf != nullptr) {
+ jbyteArray jarray = array(env, jbuf, jbytebuffer_clazz);
+ if (jarray == nullptr) {
+ // exception occurred
+ env->DeleteLocalRef(jbuf);
+ return nullptr;
+ }
+
+ jboolean is_copy = JNI_FALSE;
+ jbyte* ja = reinterpret_cast<jbyte*>(
+ env->GetPrimitiveArrayCritical(jarray, &is_copy));
+ if (ja == nullptr) {
+ // exception occurred
+ env->DeleteLocalRef(jarray);
+ env->DeleteLocalRef(jbuf);
+ return nullptr;
+ }
+
+ memcpy(ja, const_cast<char*>(buf), capacity);
+
+ env->ReleasePrimitiveArrayCritical(jarray, ja, 0);
+
+ env->DeleteLocalRef(jarray);
+ }
+
+ return jbuf;
+ }
+ }
+
+ static jbyteArray array(JNIEnv* env, const jobject& jbyte_buffer,
+ jclass jbytebuffer_clazz = nullptr) {
+ const jmethodID mid = getArrayMethodId(env, jbytebuffer_clazz);
+ if (mid == nullptr) {
+ // exception occurred accessing class, or NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+ const jobject jarray = env->CallObjectMethod(jbyte_buffer, mid);
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ return nullptr;
+ }
+ return static_cast<jbyteArray>(jarray);
+ }
+};
+
+// The portal class for java.lang.Integer
+class IntegerJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class java.lang.Integer
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "java/lang/Integer");
+ }
+
+ static jobject valueOf(JNIEnv* env, jint jprimitive_int) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid =
+ env->GetStaticMethodID(jclazz, "valueOf", "(I)Ljava/lang/Integer;");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ const jobject jinteger_obj =
+ env->CallStaticObjectMethod(jclazz, mid, jprimitive_int);
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ return nullptr;
+ }
+
+ return jinteger_obj;
+ }
+};
+
+// The portal class for java.lang.Long
+class LongJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class java.lang.Long
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "java/lang/Long");
+ }
+
+ static jobject valueOf(JNIEnv* env, jlong jprimitive_long) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid =
+ env->GetStaticMethodID(jclazz, "valueOf", "(J)Ljava/lang/Long;");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ const jobject jlong_obj =
+ env->CallStaticObjectMethod(jclazz, mid, jprimitive_long);
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ return nullptr;
+ }
+
+ return jlong_obj;
+ }
+};
+
+// The portal class for java.lang.StringBuilder
+class StringBuilderJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class java.lang.StringBuilder
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "java/lang/StringBuilder");
+ }
+
+ /**
+ * Get the Java Method: StringBuilder#append
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getListAddMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetMethodID(jclazz, "append",
+ "(Ljava/lang/String;)Ljava/lang/StringBuilder;");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Appends a C-style string to a StringBuilder
+ *
+ * @param env A pointer to the Java environment
+ * @param jstring_builder Reference to a java.lang.StringBuilder
+ * @param c_str A C-style string to append to the StringBuilder
+ *
+ * @return A reference to the updated StringBuilder, or a nullptr if
+ * an exception occurs
+ */
+ static jobject append(JNIEnv* env, jobject jstring_builder,
+ const char* c_str) {
+ jmethodID mid = getListAddMethodId(env);
+ if(mid == nullptr) {
+ // exception occurred accessing class or method
+ return nullptr;
+ }
+
+ jstring new_value_str = env->NewStringUTF(c_str);
+ if(new_value_str == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ jobject jresult_string_builder =
+ env->CallObjectMethod(jstring_builder, mid, new_value_str);
+ if(env->ExceptionCheck()) {
+ // exception occurred
+ env->DeleteLocalRef(new_value_str);
+ return nullptr;
+ }
+
+ return jresult_string_builder;
+ }
+};
+
+// various utility functions for working with RocksDB and JNI
+class JniUtil {
+ public:
+ /**
+ * Detect if jlong overflows size_t
+ *
+ * @param jvalue the jlong value
+ *
+ * @return
+ */
+ inline static Status check_if_jlong_fits_size_t(const jlong& jvalue) {
+ Status s = Status::OK();
+ if (static_cast<uint64_t>(jvalue) > std::numeric_limits<size_t>::max()) {
+ s = Status::InvalidArgument(Slice("jlong overflows 32 bit value."));
+ }
+ return s;
+ }
+
+ /**
+ * Obtains a reference to the JNIEnv from
+ * the JVM
+ *
+ * If the current thread is not attached to the JavaVM
+ * then it will be attached so as to retrieve the JNIEnv
+ *
+ * If a thread is attached, it must later be manually
+ * released by calling JavaVM::DetachCurrentThread.
+ * This can be handled by always matching calls to this
+ * function with calls to {@link JniUtil::releaseJniEnv(JavaVM*, jboolean)}
+ *
+ * @param jvm (IN) A pointer to the JavaVM instance
+ * @param attached (OUT) A pointer to a boolean which
+ * will be set to JNI_TRUE if we had to attach the thread
+ *
+ * @return A pointer to the JNIEnv or nullptr if a fatal error
+ * occurs and the JNIEnv cannot be retrieved
+ */
+ static JNIEnv* getJniEnv(JavaVM* jvm, jboolean* attached) {
+ assert(jvm != nullptr);
+
+ JNIEnv *env;
+ const jint env_rs = jvm->GetEnv(reinterpret_cast<void**>(&env),
+ JNI_VERSION_1_6);
+
+ if(env_rs == JNI_OK) {
+ // current thread is already attached, return the JNIEnv
+ *attached = JNI_FALSE;
+ return env;
+ } else if(env_rs == JNI_EDETACHED) {
+ // current thread is not attached, attempt to attach
+ const jint rs_attach = jvm->AttachCurrentThread(reinterpret_cast<void**>(&env), NULL);
+ if(rs_attach == JNI_OK) {
+ *attached = JNI_TRUE;
+ return env;
+ } else {
+ // error, could not attach the thread
+ std::cerr << "JniUtil::getJniEnv - Fatal: could not attach current thread to JVM!" << std::endl;
+ return nullptr;
+ }
+ } else if(env_rs == JNI_EVERSION) {
+ // error, JDK does not support JNI_VERSION_1_6+
+ std::cerr << "JniUtil::getJniEnv - Fatal: JDK does not support JNI_VERSION_1_6" << std::endl;
+ return nullptr;
+ } else {
+ std::cerr << "JniUtil::getJniEnv - Fatal: Unknown error: env_rs=" << env_rs << std::endl;
+ return nullptr;
+ }
+ }
+
+ /**
+ * Counterpart to {@link JniUtil::getJniEnv(JavaVM*, jboolean*)}
+ *
+ * Detachess the current thread from the JVM if it was previously
+ * attached
+ *
+ * @param jvm (IN) A pointer to the JavaVM instance
+ * @param attached (IN) JNI_TRUE if we previously had to attach the thread
+ * to the JavaVM to get the JNIEnv
+ */
+ static void releaseJniEnv(JavaVM* jvm, jboolean& attached) {
+ assert(jvm != nullptr);
+ if(attached == JNI_TRUE) {
+ const jint rs_detach = jvm->DetachCurrentThread();
+ assert(rs_detach == JNI_OK);
+ if(rs_detach != JNI_OK) {
+ std::cerr << "JniUtil::getJniEnv - Warn: Unable to detach current thread from JVM!" << std::endl;
+ }
+ }
+ }
+
+ /**
+ * Copies a Java String[] to a C++ std::vector<std::string>
+ *
+ * @param env (IN) A pointer to the java environment
+ * @param jss (IN) The Java String array to copy
+ * @param has_exception (OUT) will be set to JNI_TRUE
+ * if an OutOfMemoryError or ArrayIndexOutOfBoundsException
+ * exception occurs
+ *
+ * @return A std::vector<std:string> containing copies of the Java strings
+ */
+ static std::vector<std::string> copyStrings(JNIEnv* env,
+ jobjectArray jss, jboolean* has_exception) {
+ return ROCKSDB_NAMESPACE::JniUtil::copyStrings(
+ env, jss, env->GetArrayLength(jss), has_exception);
+ }
+
+ /**
+ * Copies a Java String[] to a C++ std::vector<std::string>
+ *
+ * @param env (IN) A pointer to the java environment
+ * @param jss (IN) The Java String array to copy
+ * @param jss_len (IN) The length of the Java String array to copy
+ * @param has_exception (OUT) will be set to JNI_TRUE
+ * if an OutOfMemoryError or ArrayIndexOutOfBoundsException
+ * exception occurs
+ *
+ * @return A std::vector<std:string> containing copies of the Java strings
+ */
+ static std::vector<std::string> copyStrings(JNIEnv* env,
+ jobjectArray jss, const jsize jss_len, jboolean* has_exception) {
+ std::vector<std::string> strs;
+ strs.reserve(jss_len);
+ for (jsize i = 0; i < jss_len; i++) {
+ jobject js = env->GetObjectArrayElement(jss, i);
+ if(env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ *has_exception = JNI_TRUE;
+ return strs;
+ }
+
+ jstring jstr = static_cast<jstring>(js);
+ const char* str = env->GetStringUTFChars(jstr, nullptr);
+ if(str == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->DeleteLocalRef(js);
+ *has_exception = JNI_TRUE;
+ return strs;
+ }
+
+ strs.push_back(std::string(str));
+
+ env->ReleaseStringUTFChars(jstr, str);
+ env->DeleteLocalRef(js);
+ }
+
+ *has_exception = JNI_FALSE;
+ return strs;
+ }
+
+ /**
+ * Copies a jstring to a C-style null-terminated byte string
+ * and releases the original jstring
+ *
+ * The jstring is copied as UTF-8
+ *
+ * If an exception occurs, then JNIEnv::ExceptionCheck()
+ * will have been called
+ *
+ * @param env (IN) A pointer to the java environment
+ * @param js (IN) The java string to copy
+ * @param has_exception (OUT) will be set to JNI_TRUE
+ * if an OutOfMemoryError exception occurs
+ *
+ * @return A pointer to the copied string, or a
+ * nullptr if has_exception == JNI_TRUE
+ */
+ static std::unique_ptr<char[]> copyString(JNIEnv* env, jstring js,
+ jboolean* has_exception) {
+ const char *utf = env->GetStringUTFChars(js, nullptr);
+ if(utf == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->ExceptionCheck();
+ *has_exception = JNI_TRUE;
+ return nullptr;
+ } else if(env->ExceptionCheck()) {
+ // exception thrown
+ env->ReleaseStringUTFChars(js, utf);
+ *has_exception = JNI_TRUE;
+ return nullptr;
+ }
+
+ const jsize utf_len = env->GetStringUTFLength(js);
+ std::unique_ptr<char[]> str(new char[utf_len + 1]); // Note: + 1 is needed for the c_str null terminator
+ std::strcpy(str.get(), utf);
+ env->ReleaseStringUTFChars(js, utf);
+ *has_exception = JNI_FALSE;
+ return str;
+ }
+
+ /**
+ * Copies a jstring to a std::string
+ * and releases the original jstring
+ *
+ * If an exception occurs, then JNIEnv::ExceptionCheck()
+ * will have been called
+ *
+ * @param env (IN) A pointer to the java environment
+ * @param js (IN) The java string to copy
+ * @param has_exception (OUT) will be set to JNI_TRUE
+ * if an OutOfMemoryError exception occurs
+ *
+ * @return A std:string copy of the jstring, or an
+ * empty std::string if has_exception == JNI_TRUE
+ */
+ static std::string copyStdString(JNIEnv* env, jstring js,
+ jboolean* has_exception) {
+ const char *utf = env->GetStringUTFChars(js, nullptr);
+ if(utf == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->ExceptionCheck();
+ *has_exception = JNI_TRUE;
+ return std::string();
+ } else if(env->ExceptionCheck()) {
+ // exception thrown
+ env->ReleaseStringUTFChars(js, utf);
+ *has_exception = JNI_TRUE;
+ return std::string();
+ }
+
+ std::string name(utf);
+ env->ReleaseStringUTFChars(js, utf);
+ *has_exception = JNI_FALSE;
+ return name;
+ }
+
+ /**
+ * Copies bytes from a std::string to a jByteArray
+ *
+ * @param env A pointer to the java environment
+ * @param bytes The bytes to copy
+ *
+ * @return the Java byte[], or nullptr if an exception occurs
+ *
+ * @throws RocksDBException thrown
+ * if memory size to copy exceeds general java specific array size limitation.
+ */
+ static jbyteArray copyBytes(JNIEnv* env, std::string bytes) {
+ return createJavaByteArrayWithSizeCheck(env, bytes.c_str(), bytes.size());
+ }
+
+ /**
+ * Given a Java byte[][] which is an array of java.lang.Strings
+ * where each String is a byte[], the passed function `string_fn`
+ * will be called on each String, the result is the collected by
+ * calling the passed function `collector_fn`
+ *
+ * @param env (IN) A pointer to the java environment
+ * @param jbyte_strings (IN) A Java array of Strings expressed as bytes
+ * @param string_fn (IN) A transform function to call for each String
+ * @param collector_fn (IN) A collector which is called for the result
+ * of each `string_fn`
+ * @param has_exception (OUT) will be set to JNI_TRUE
+ * if an ArrayIndexOutOfBoundsException or OutOfMemoryError
+ * exception occurs
+ */
+ template <typename T> static void byteStrings(JNIEnv* env,
+ jobjectArray jbyte_strings,
+ std::function<T(const char*, const size_t)> string_fn,
+ std::function<void(size_t, T)> collector_fn,
+ jboolean *has_exception) {
+ const jsize jlen = env->GetArrayLength(jbyte_strings);
+
+ for(jsize i = 0; i < jlen; i++) {
+ jobject jbyte_string_obj = env->GetObjectArrayElement(jbyte_strings, i);
+ if(env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ *has_exception = JNI_TRUE; // signal error
+ return;
+ }
+
+ jbyteArray jbyte_string_ary =
+ reinterpret_cast<jbyteArray>(jbyte_string_obj);
+ T result = byteString(env, jbyte_string_ary, string_fn, has_exception);
+
+ env->DeleteLocalRef(jbyte_string_obj);
+
+ if(*has_exception == JNI_TRUE) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ collector_fn(i, result);
+ }
+
+ *has_exception = JNI_FALSE;
+ }
+
+ /**
+ * Given a Java String which is expressed as a Java Byte Array byte[],
+ * the passed function `string_fn` will be called on the String
+ * and the result returned
+ *
+ * @param env (IN) A pointer to the java environment
+ * @param jbyte_string_ary (IN) A Java String expressed in bytes
+ * @param string_fn (IN) A transform function to call on the String
+ * @param has_exception (OUT) will be set to JNI_TRUE
+ * if an OutOfMemoryError exception occurs
+ */
+ template <typename T> static T byteString(JNIEnv* env,
+ jbyteArray jbyte_string_ary,
+ std::function<T(const char*, const size_t)> string_fn,
+ jboolean* has_exception) {
+ const jsize jbyte_string_len = env->GetArrayLength(jbyte_string_ary);
+ return byteString<T>(env, jbyte_string_ary, jbyte_string_len, string_fn,
+ has_exception);
+ }
+
+ /**
+ * Given a Java String which is expressed as a Java Byte Array byte[],
+ * the passed function `string_fn` will be called on the String
+ * and the result returned
+ *
+ * @param env (IN) A pointer to the java environment
+ * @param jbyte_string_ary (IN) A Java String expressed in bytes
+ * @param jbyte_string_len (IN) The length of the Java String
+ * expressed in bytes
+ * @param string_fn (IN) A transform function to call on the String
+ * @param has_exception (OUT) will be set to JNI_TRUE
+ * if an OutOfMemoryError exception occurs
+ */
+ template <typename T> static T byteString(JNIEnv* env,
+ jbyteArray jbyte_string_ary, const jsize jbyte_string_len,
+ std::function<T(const char*, const size_t)> string_fn,
+ jboolean* has_exception) {
+ jbyte* jbyte_string =
+ env->GetByteArrayElements(jbyte_string_ary, nullptr);
+ if(jbyte_string == nullptr) {
+ // exception thrown: OutOfMemoryError
+ *has_exception = JNI_TRUE;
+ return nullptr; // signal error
+ }
+
+ T result =
+ string_fn(reinterpret_cast<char *>(jbyte_string), jbyte_string_len);
+
+ env->ReleaseByteArrayElements(jbyte_string_ary, jbyte_string, JNI_ABORT);
+
+ *has_exception = JNI_FALSE;
+ return result;
+ }
+
+ /**
+ * Converts a std::vector<string> to a Java byte[][] where each Java String
+ * is expressed as a Java Byte Array byte[].
+ *
+ * @param env A pointer to the java environment
+ * @param strings A vector of Strings
+ *
+ * @return A Java array of Strings expressed as bytes,
+ * or nullptr if an exception is thrown
+ */
+ static jobjectArray stringsBytes(JNIEnv* env, std::vector<std::string> strings) {
+ jclass jcls_ba = ByteJni::getArrayJClass(env);
+ if(jcls_ba == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const jsize len = static_cast<jsize>(strings.size());
+
+ jobjectArray jbyte_strings = env->NewObjectArray(len, jcls_ba, nullptr);
+ if(jbyte_strings == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ for (jsize i = 0; i < len; i++) {
+ std::string *str = &strings[i];
+ const jsize str_len = static_cast<jsize>(str->size());
+
+ jbyteArray jbyte_string_ary = env->NewByteArray(str_len);
+ if(jbyte_string_ary == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->DeleteLocalRef(jbyte_strings);
+ return nullptr;
+ }
+
+ env->SetByteArrayRegion(
+ jbyte_string_ary, 0, str_len,
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(str->c_str())));
+ if(env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jbyte_string_ary);
+ env->DeleteLocalRef(jbyte_strings);
+ return nullptr;
+ }
+
+ env->SetObjectArrayElement(jbyte_strings, i, jbyte_string_ary);
+ if(env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ // or ArrayStoreException
+ env->DeleteLocalRef(jbyte_string_ary);
+ env->DeleteLocalRef(jbyte_strings);
+ return nullptr;
+ }
+
+ env->DeleteLocalRef(jbyte_string_ary);
+ }
+
+ return jbyte_strings;
+ }
+
+ /**
+ * Converts a std::vector<std::string> to a Java String[].
+ *
+ * @param env A pointer to the java environment
+ * @param strings A vector of Strings
+ *
+ * @return A Java array of Strings,
+ * or nullptr if an exception is thrown
+ */
+ static jobjectArray toJavaStrings(JNIEnv* env,
+ const std::vector<std::string>* strings) {
+ jclass jcls_str = env->FindClass("java/lang/String");
+ if(jcls_str == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const jsize len = static_cast<jsize>(strings->size());
+
+ jobjectArray jstrings = env->NewObjectArray(len, jcls_str, nullptr);
+ if(jstrings == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ for (jsize i = 0; i < len; i++) {
+ const std::string *str = &((*strings)[i]);
+ jstring js = ROCKSDB_NAMESPACE::JniUtil::toJavaString(env, str);
+ if (js == nullptr) {
+ env->DeleteLocalRef(jstrings);
+ return nullptr;
+ }
+
+ env->SetObjectArrayElement(jstrings, i, js);
+ if(env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ // or ArrayStoreException
+ env->DeleteLocalRef(js);
+ env->DeleteLocalRef(jstrings);
+ return nullptr;
+ }
+ }
+
+ return jstrings;
+ }
+
+ /**
+ * Creates a Java UTF String from a C++ std::string
+ *
+ * @param env A pointer to the java environment
+ * @param string the C++ std::string
+ * @param treat_empty_as_null true if empty strings should be treated as null
+ *
+ * @return the Java UTF string, or nullptr if the provided string
+ * is null (or empty and treat_empty_as_null is set), or if an
+ * exception occurs allocating the Java String.
+ */
+ static jstring toJavaString(JNIEnv* env, const std::string* string,
+ const bool treat_empty_as_null = false) {
+ if (string == nullptr) {
+ return nullptr;
+ }
+
+ if (treat_empty_as_null && string->empty()) {
+ return nullptr;
+ }
+
+ return env->NewStringUTF(string->c_str());
+ }
+
+ /**
+ * Copies bytes to a new jByteArray with the check of java array size limitation.
+ *
+ * @param bytes pointer to memory to copy to a new jByteArray
+ * @param size number of bytes to copy
+ *
+ * @return the Java byte[], or nullptr if an exception occurs
+ *
+ * @throws RocksDBException thrown
+ * if memory size to copy exceeds general java array size limitation to avoid overflow.
+ */
+ static jbyteArray createJavaByteArrayWithSizeCheck(JNIEnv* env, const char* bytes, const size_t size) {
+ // Limitation for java array size is vm specific
+ // In general it cannot exceed Integer.MAX_VALUE (2^31 - 1)
+ // Current HotSpot VM limitation for array size is Integer.MAX_VALUE - 5 (2^31 - 1 - 5)
+ // It means that the next call to env->NewByteArray can still end with
+ // OutOfMemoryError("Requested array size exceeds VM limit") coming from VM
+ static const size_t MAX_JARRAY_SIZE = (static_cast<size_t>(1)) << 31;
+ if(size > MAX_JARRAY_SIZE) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, "Requested array size exceeds VM limit");
+ return nullptr;
+ }
+
+ const jsize jlen = static_cast<jsize>(size);
+ jbyteArray jbytes = env->NewByteArray(jlen);
+ if(jbytes == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ env->SetByteArrayRegion(jbytes, 0, jlen,
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(bytes)));
+ if(env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jbytes);
+ return nullptr;
+ }
+
+ return jbytes;
+ }
+
+ /**
+ * Copies bytes from a ROCKSDB_NAMESPACE::Slice to a jByteArray
+ *
+ * @param env A pointer to the java environment
+ * @param bytes The bytes to copy
+ *
+ * @return the Java byte[] or nullptr if an exception occurs
+ *
+ * @throws RocksDBException thrown
+ * if memory size to copy exceeds general java specific array size
+ * limitation.
+ */
+ static jbyteArray copyBytes(JNIEnv* env, const Slice& bytes) {
+ return createJavaByteArrayWithSizeCheck(env, bytes.data(), bytes.size());
+ }
+
+ /*
+ * Helper for operations on a key and value
+ * for example WriteBatch->Put
+ *
+ * TODO(AR) could be used for RocksDB->Put etc.
+ */
+ static std::unique_ptr<ROCKSDB_NAMESPACE::Status> kv_op(
+ std::function<ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Slice,
+ ROCKSDB_NAMESPACE::Slice)>
+ op,
+ JNIEnv* env, jobject /*jobj*/, jbyteArray jkey, jint jkey_len,
+ jbyteArray jvalue, jint jvalue_len) {
+ jbyte* key = env->GetByteArrayElements(jkey, nullptr);
+ if(env->ExceptionCheck()) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ jbyte* value = env->GetByteArrayElements(jvalue, nullptr);
+ if(env->ExceptionCheck()) {
+ // exception thrown: OutOfMemoryError
+ if(key != nullptr) {
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+ }
+ return nullptr;
+ }
+
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char*>(key),
+ jkey_len);
+ ROCKSDB_NAMESPACE::Slice value_slice(reinterpret_cast<char*>(value),
+ jvalue_len);
+
+ auto status = op(key_slice, value_slice);
+
+ if(value != nullptr) {
+ env->ReleaseByteArrayElements(jvalue, value, JNI_ABORT);
+ }
+ if(key != nullptr) {
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+ }
+
+ return std::unique_ptr<ROCKSDB_NAMESPACE::Status>(
+ new ROCKSDB_NAMESPACE::Status(status));
+ }
+
+ /*
+ * Helper for operations on a key
+ * for example WriteBatch->Delete
+ *
+ * TODO(AR) could be used for RocksDB->Delete etc.
+ */
+ static std::unique_ptr<ROCKSDB_NAMESPACE::Status> k_op(
+ std::function<ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Slice)> op,
+ JNIEnv* env, jobject /*jobj*/, jbyteArray jkey, jint jkey_len) {
+ jbyte* key = env->GetByteArrayElements(jkey, nullptr);
+ if(env->ExceptionCheck()) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char*>(key),
+ jkey_len);
+
+ auto status = op(key_slice);
+
+ if(key != nullptr) {
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+ }
+
+ return std::unique_ptr<ROCKSDB_NAMESPACE::Status>(
+ new ROCKSDB_NAMESPACE::Status(status));
+ }
+
+ /*
+ * Helper for operations on a value
+ * for example WriteBatchWithIndex->GetFromBatch
+ */
+ static jbyteArray v_op(std::function<ROCKSDB_NAMESPACE::Status(
+ ROCKSDB_NAMESPACE::Slice, std::string*)>
+ op,
+ JNIEnv* env, jbyteArray jkey, jint jkey_len) {
+ jbyte* key = env->GetByteArrayElements(jkey, nullptr);
+ if(env->ExceptionCheck()) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char*>(key),
+ jkey_len);
+
+ std::string value;
+ ROCKSDB_NAMESPACE::Status s = op(key_slice, &value);
+
+ if(key != nullptr) {
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+ }
+
+ if (s.IsNotFound()) {
+ return nullptr;
+ }
+
+ if (s.ok()) {
+ jbyteArray jret_value =
+ env->NewByteArray(static_cast<jsize>(value.size()));
+ if(jret_value == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ env->SetByteArrayRegion(jret_value, 0, static_cast<jsize>(value.size()),
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(value.c_str())));
+ if(env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ if(jret_value != nullptr) {
+ env->DeleteLocalRef(jret_value);
+ }
+ return nullptr;
+ }
+
+ return jret_value;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+
+ /**
+ * Creates a vector<T*> of C++ pointers from
+ * a Java array of C++ pointer addresses.
+ *
+ * @param env (IN) A pointer to the java environment
+ * @param pointers (IN) A Java array of C++ pointer addresses
+ * @param has_exception (OUT) will be set to JNI_TRUE
+ * if an ArrayIndexOutOfBoundsException or OutOfMemoryError
+ * exception occurs.
+ *
+ * @return A vector of C++ pointers.
+ */
+ template<typename T> static std::vector<T*> fromJPointers(
+ JNIEnv* env, jlongArray jptrs, jboolean *has_exception) {
+ const jsize jptrs_len = env->GetArrayLength(jptrs);
+ std::vector<T*> ptrs;
+ jlong* jptr = env->GetLongArrayElements(jptrs, nullptr);
+ if (jptr == nullptr) {
+ // exception thrown: OutOfMemoryError
+ *has_exception = JNI_TRUE;
+ return ptrs;
+ }
+ ptrs.reserve(jptrs_len);
+ for (jsize i = 0; i < jptrs_len; i++) {
+ ptrs.push_back(reinterpret_cast<T*>(jptr[i]));
+ }
+ env->ReleaseLongArrayElements(jptrs, jptr, JNI_ABORT);
+ return ptrs;
+ }
+
+ /**
+ * Creates a Java array of C++ pointer addresses
+ * from a vector of C++ pointers.
+ *
+ * @param env (IN) A pointer to the java environment
+ * @param pointers (IN) A vector of C++ pointers
+ * @param has_exception (OUT) will be set to JNI_TRUE
+ * if an ArrayIndexOutOfBoundsException or OutOfMemoryError
+ * exception occurs
+ *
+ * @return Java array of C++ pointer addresses.
+ */
+ template<typename T> static jlongArray toJPointers(JNIEnv* env,
+ const std::vector<T*> &pointers,
+ jboolean *has_exception) {
+ const jsize len = static_cast<jsize>(pointers.size());
+ std::unique_ptr<jlong[]> results(new jlong[len]);
+ std::transform(pointers.begin(), pointers.end(), results.get(), [](T* pointer) -> jlong {
+ return reinterpret_cast<jlong>(pointer);
+ });
+
+ jlongArray jpointers = env->NewLongArray(len);
+ if (jpointers == nullptr) {
+ // exception thrown: OutOfMemoryError
+ *has_exception = JNI_TRUE;
+ return nullptr;
+ }
+
+ env->SetLongArrayRegion(jpointers, 0, len, results.get());
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ *has_exception = JNI_TRUE;
+ env->DeleteLocalRef(jpointers);
+ return nullptr;
+ }
+
+ *has_exception = JNI_FALSE;
+
+ return jpointers;
+ }
+
+ /*
+ * Helper for operations on a key and value
+ * for example WriteBatch->Put
+ *
+ * TODO(AR) could be extended to cover returning ROCKSDB_NAMESPACE::Status
+ * from `op` and used for RocksDB->Put etc.
+ */
+ static void kv_op_direct(std::function<void(ROCKSDB_NAMESPACE::Slice&,
+ ROCKSDB_NAMESPACE::Slice&)>
+ op,
+ JNIEnv* env, jobject jkey, jint jkey_off,
+ jint jkey_len, jobject jval, jint jval_off,
+ jint jval_len) {
+ char* key = reinterpret_cast<char*>(env->GetDirectBufferAddress(jkey));
+ if (key == nullptr ||
+ env->GetDirectBufferCapacity(jkey) < (jkey_off + jkey_len)) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, "Invalid key argument");
+ return;
+ }
+
+ char* value = reinterpret_cast<char*>(env->GetDirectBufferAddress(jval));
+ if (value == nullptr ||
+ env->GetDirectBufferCapacity(jval) < (jval_off + jval_len)) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, "Invalid value argument");
+ return;
+ }
+
+ key += jkey_off;
+ value += jval_off;
+
+ ROCKSDB_NAMESPACE::Slice key_slice(key, jkey_len);
+ ROCKSDB_NAMESPACE::Slice value_slice(value, jval_len);
+
+ op(key_slice, value_slice);
+ }
+
+ /*
+ * Helper for operations on a key and value
+ * for example WriteBatch->Delete
+ *
+ * TODO(AR) could be extended to cover returning ROCKSDB_NAMESPACE::Status
+ * from `op` and used for RocksDB->Delete etc.
+ */
+ static void k_op_direct(std::function<void(ROCKSDB_NAMESPACE::Slice&)> op,
+ JNIEnv* env, jobject jkey, jint jkey_off,
+ jint jkey_len) {
+ char* key = reinterpret_cast<char*>(env->GetDirectBufferAddress(jkey));
+ if (key == nullptr ||
+ env->GetDirectBufferCapacity(jkey) < (jkey_off + jkey_len)) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, "Invalid key argument");
+ return;
+ }
+
+ key += jkey_off;
+
+ ROCKSDB_NAMESPACE::Slice key_slice(key, jkey_len);
+
+ return op(key_slice);
+ }
+
+ template <class T>
+ static jint copyToDirect(JNIEnv* env, T& source, jobject jtarget,
+ jint jtarget_off, jint jtarget_len) {
+ char* target =
+ reinterpret_cast<char*>(env->GetDirectBufferAddress(jtarget));
+ if (target == nullptr ||
+ env->GetDirectBufferCapacity(jtarget) < (jtarget_off + jtarget_len)) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, "Invalid target argument");
+ return 0;
+ }
+
+ target += jtarget_off;
+
+ const jint cvalue_len = static_cast<jint>(source.size());
+ const jint length = std::min(jtarget_len, cvalue_len);
+
+ memcpy(target, source.data(), length);
+
+ return cvalue_len;
+ }
+};
+
+class MapJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class java.util.Map
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "java/util/Map");
+ }
+
+ /**
+ * Get the Java Method: Map#put
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getMapPutMethodId(JNIEnv* env) {
+ jclass jlist_clazz = getJClass(env);
+ if(jlist_clazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetMethodID(jlist_clazz, "put", "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;");
+ assert(mid != nullptr);
+ return mid;
+ }
+};
+
+class HashMapJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class java.util.HashMap
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "java/util/HashMap");
+ }
+
+ /**
+ * Create a new Java java.util.HashMap object.
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return A reference to a Java java.util.HashMap object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject construct(JNIEnv* env, const uint32_t initial_capacity = 16) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "(I)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ jobject jhash_map = env->NewObject(jclazz, mid, static_cast<jint>(initial_capacity));
+ if (env->ExceptionCheck()) {
+ return nullptr;
+ }
+
+ return jhash_map;
+ }
+
+ /**
+ * A function which maps a std::pair<K,V> to a std::pair<JK, JV>
+ *
+ * @return Either a pointer to a std::pair<jobject, jobject>, or nullptr
+ * if an error occurs during the mapping
+ */
+ template <typename K, typename V, typename JK, typename JV>
+ using FnMapKV = std::function<std::unique_ptr<std::pair<JK, JV>> (const std::pair<K, V>&)>;
+
+ // template <class I, typename K, typename V, typename K1, typename V1, typename std::enable_if<std::is_same<typename std::iterator_traits<I>::value_type, std::pair<const K,V>>::value, int32_t>::type = 0>
+ // static void putAll(JNIEnv* env, const jobject jhash_map, I iterator, const FnMapKV<const K,V,K1,V1> &fn_map_kv) {
+ /**
+ * Returns true if it succeeds, false if an error occurs
+ */
+ template<class iterator_type, typename K, typename V>
+ static bool putAll(JNIEnv* env, const jobject jhash_map, iterator_type iterator, iterator_type end, const FnMapKV<K, V, jobject, jobject> &fn_map_kv) {
+ const jmethodID jmid_put =
+ ROCKSDB_NAMESPACE::MapJni::getMapPutMethodId(env);
+ if (jmid_put == nullptr) {
+ return false;
+ }
+
+ for (auto it = iterator; it != end; ++it) {
+ const std::unique_ptr<std::pair<jobject, jobject>> result = fn_map_kv(*it);
+ if (result == nullptr) {
+ // an error occurred during fn_map_kv
+ return false;
+ }
+ env->CallObjectMethod(jhash_map, jmid_put, result->first, result->second);
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ env->DeleteLocalRef(result->second);
+ env->DeleteLocalRef(result->first);
+ return false;
+ }
+
+ // release local references
+ env->DeleteLocalRef(result->second);
+ env->DeleteLocalRef(result->first);
+ }
+
+ return true;
+ }
+
+ /**
+ * Creates a java.util.Map<String, String> from a std::map<std::string, std::string>
+ *
+ * @param env A pointer to the Java environment
+ * @param map the Cpp map
+ *
+ * @return a reference to the Java java.util.Map object, or nullptr if an exception occcurred
+ */
+ static jobject fromCppMap(JNIEnv* env, const std::map<std::string, std::string>* map) {
+ if (map == nullptr) {
+ return nullptr;
+ }
+
+ jobject jhash_map = construct(env, static_cast<uint32_t>(map->size()));
+ if (jhash_map == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const ROCKSDB_NAMESPACE::HashMapJni::FnMapKV<
+ const std::string, const std::string, jobject, jobject>
+ fn_map_kv =
+ [env](const std::pair<const std::string, const std::string>& kv) {
+ jstring jkey = ROCKSDB_NAMESPACE::JniUtil::toJavaString(
+ env, &(kv.first), false);
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ jstring jvalue = ROCKSDB_NAMESPACE::JniUtil::toJavaString(
+ env, &(kv.second), true);
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ env->DeleteLocalRef(jkey);
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ return std::unique_ptr<std::pair<jobject, jobject>>(
+ new std::pair<jobject, jobject>(
+ static_cast<jobject>(jkey),
+ static_cast<jobject>(jvalue)));
+ };
+
+ if (!putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) {
+ // exception occurred
+ return nullptr;
+ }
+
+ return jhash_map;
+ }
+
+ /**
+ * Creates a java.util.Map<String, Long> from a std::map<std::string, uint32_t>
+ *
+ * @param env A pointer to the Java environment
+ * @param map the Cpp map
+ *
+ * @return a reference to the Java java.util.Map object, or nullptr if an exception occcurred
+ */
+ static jobject fromCppMap(JNIEnv* env, const std::map<std::string, uint32_t>* map) {
+ if (map == nullptr) {
+ return nullptr;
+ }
+
+ if (map == nullptr) {
+ return nullptr;
+ }
+
+ jobject jhash_map = construct(env, static_cast<uint32_t>(map->size()));
+ if (jhash_map == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const ROCKSDB_NAMESPACE::HashMapJni::FnMapKV<
+ const std::string, const uint32_t, jobject, jobject>
+ fn_map_kv =
+ [env](const std::pair<const std::string, const uint32_t>& kv) {
+ jstring jkey = ROCKSDB_NAMESPACE::JniUtil::toJavaString(
+ env, &(kv.first), false);
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ jobject jvalue = ROCKSDB_NAMESPACE::IntegerJni::valueOf(
+ env, static_cast<jint>(kv.second));
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ env->DeleteLocalRef(jkey);
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ return std::unique_ptr<std::pair<jobject, jobject>>(
+ new std::pair<jobject, jobject>(static_cast<jobject>(jkey),
+ jvalue));
+ };
+
+ if (!putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) {
+ // exception occurred
+ return nullptr;
+ }
+
+ return jhash_map;
+ }
+
+ /**
+ * Creates a java.util.Map<String, Long> from a std::map<std::string, uint64_t>
+ *
+ * @param env A pointer to the Java environment
+ * @param map the Cpp map
+ *
+ * @return a reference to the Java java.util.Map object, or nullptr if an exception occcurred
+ */
+ static jobject fromCppMap(JNIEnv* env, const std::map<std::string, uint64_t>* map) {
+ if (map == nullptr) {
+ return nullptr;
+ }
+
+ jobject jhash_map = construct(env, static_cast<uint32_t>(map->size()));
+ if (jhash_map == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const ROCKSDB_NAMESPACE::HashMapJni::FnMapKV<
+ const std::string, const uint64_t, jobject, jobject>
+ fn_map_kv =
+ [env](const std::pair<const std::string, const uint64_t>& kv) {
+ jstring jkey = ROCKSDB_NAMESPACE::JniUtil::toJavaString(
+ env, &(kv.first), false);
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ jobject jvalue = ROCKSDB_NAMESPACE::LongJni::valueOf(
+ env, static_cast<jlong>(kv.second));
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ env->DeleteLocalRef(jkey);
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ return std::unique_ptr<std::pair<jobject, jobject>>(
+ new std::pair<jobject, jobject>(static_cast<jobject>(jkey),
+ jvalue));
+ };
+
+ if (!putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) {
+ // exception occurred
+ return nullptr;
+ }
+
+ return jhash_map;
+ }
+
+ /**
+ * Creates a java.util.Map<String, Long> from a std::map<uint32_t, uint64_t>
+ *
+ * @param env A pointer to the Java environment
+ * @param map the Cpp map
+ *
+ * @return a reference to the Java java.util.Map object, or nullptr if an exception occcurred
+ */
+ static jobject fromCppMap(JNIEnv* env, const std::map<uint32_t, uint64_t>* map) {
+ if (map == nullptr) {
+ return nullptr;
+ }
+
+ jobject jhash_map = construct(env, static_cast<uint32_t>(map->size()));
+ if (jhash_map == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const ROCKSDB_NAMESPACE::HashMapJni::FnMapKV<const uint32_t, const uint64_t,
+ jobject, jobject>
+ fn_map_kv = [env](const std::pair<const uint32_t, const uint64_t>& kv) {
+ jobject jkey = ROCKSDB_NAMESPACE::IntegerJni::valueOf(
+ env, static_cast<jint>(kv.first));
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ jobject jvalue = ROCKSDB_NAMESPACE::LongJni::valueOf(
+ env, static_cast<jlong>(kv.second));
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ env->DeleteLocalRef(jkey);
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ return std::unique_ptr<std::pair<jobject, jobject>>(
+ new std::pair<jobject, jobject>(static_cast<jobject>(jkey),
+ jvalue));
+ };
+
+ if (!putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) {
+ // exception occurred
+ return nullptr;
+ }
+
+ return jhash_map;
+ }
+};
+
+// The portal class for org.rocksdb.RocksDB
+class RocksDBJni
+ : public RocksDBNativeClass<ROCKSDB_NAMESPACE::DB*, RocksDBJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.RocksDB
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/RocksDB");
+ }
+};
+
+// The portal class for org.rocksdb.Options
+class OptionsJni
+ : public RocksDBNativeClass<ROCKSDB_NAMESPACE::Options*, OptionsJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.Options
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/Options");
+ }
+};
+
+// The portal class for org.rocksdb.DBOptions
+class DBOptionsJni
+ : public RocksDBNativeClass<ROCKSDB_NAMESPACE::DBOptions*, DBOptionsJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.DBOptions
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/DBOptions");
+ }
+};
+
+// The portal class for org.rocksdb.ColumnFamilyOptions
+class ColumnFamilyOptionsJni
+ : public RocksDBNativeClass<ROCKSDB_NAMESPACE::ColumnFamilyOptions*,
+ ColumnFamilyOptionsJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.ColumnFamilyOptions
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env,
+ "org/rocksdb/ColumnFamilyOptions");
+ }
+
+ /**
+ * Create a new Java org.rocksdb.ColumnFamilyOptions object with the same
+ * properties as the provided C++ ROCKSDB_NAMESPACE::ColumnFamilyOptions
+ * object
+ *
+ * @param env A pointer to the Java environment
+ * @param cfoptions A pointer to ROCKSDB_NAMESPACE::ColumnFamilyOptions object
+ *
+ * @return A reference to a Java org.rocksdb.ColumnFamilyOptions object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject construct(JNIEnv* env, const ColumnFamilyOptions* cfoptions) {
+ auto* cfo = new ROCKSDB_NAMESPACE::ColumnFamilyOptions(*cfoptions);
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "(J)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ jobject jcfd = env->NewObject(jclazz, mid, reinterpret_cast<jlong>(cfo));
+ if (env->ExceptionCheck()) {
+ return nullptr;
+ }
+
+ return jcfd;
+ }
+};
+
+// The portal class for org.rocksdb.WriteOptions
+class WriteOptionsJni
+ : public RocksDBNativeClass<ROCKSDB_NAMESPACE::WriteOptions*,
+ WriteOptionsJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.WriteOptions
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/WriteOptions");
+ }
+};
+
+// The portal class for org.rocksdb.ReadOptions
+class ReadOptionsJni
+ : public RocksDBNativeClass<ROCKSDB_NAMESPACE::ReadOptions*,
+ ReadOptionsJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.ReadOptions
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/ReadOptions");
+ }
+};
+
+// The portal class for org.rocksdb.WriteBatch
+class WriteBatchJni
+ : public RocksDBNativeClass<ROCKSDB_NAMESPACE::WriteBatch*, WriteBatchJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.WriteBatch
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/WriteBatch");
+ }
+
+ /**
+ * Create a new Java org.rocksdb.WriteBatch object
+ *
+ * @param env A pointer to the Java environment
+ * @param wb A pointer to ROCKSDB_NAMESPACE::WriteBatch object
+ *
+ * @return A reference to a Java org.rocksdb.WriteBatch object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject construct(JNIEnv* env, const WriteBatch* wb) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "(J)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ jobject jwb = env->NewObject(jclazz, mid, reinterpret_cast<jlong>(wb));
+ if (env->ExceptionCheck()) {
+ return nullptr;
+ }
+
+ return jwb;
+ }
+};
+
+// The portal class for org.rocksdb.WriteBatch.Handler
+class WriteBatchHandlerJni
+ : public RocksDBNativeClass<
+ const ROCKSDB_NAMESPACE::WriteBatchHandlerJniCallback*,
+ WriteBatchHandlerJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.WriteBatch.Handler
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env,
+ "org/rocksdb/WriteBatch$Handler");
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#put
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getPutCfMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "put", "(I[B[B)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#put
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getPutMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "put", "([B[B)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#merge
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getMergeCfMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "merge", "(I[B[B)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#merge
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getMergeMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "merge", "([B[B)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#delete
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getDeleteCfMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "delete", "(I[B)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#delete
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getDeleteMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "delete", "([B)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#singleDelete
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getSingleDeleteCfMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "singleDelete", "(I[B)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#singleDelete
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getSingleDeleteMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "singleDelete", "([B)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#deleteRange
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getDeleteRangeCfMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "deleteRange", "(I[B[B)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#deleteRange
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getDeleteRangeMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "deleteRange", "([B[B)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#logData
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getLogDataMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "logData", "([B)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#putBlobIndex
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getPutBlobIndexCfMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "putBlobIndex", "(I[B[B)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#markBeginPrepare
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getMarkBeginPrepareMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "markBeginPrepare", "()V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#markEndPrepare
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getMarkEndPrepareMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "markEndPrepare", "([B)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#markNoop
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getMarkNoopMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "markNoop", "(Z)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#markRollback
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getMarkRollbackMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "markRollback", "([B)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#markCommit
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getMarkCommitMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "markCommit", "([B)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#shouldContinue
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getContinueMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "shouldContinue", "()Z");
+ assert(mid != nullptr);
+ return mid;
+ }
+};
+
+class WriteBatchSavePointJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.WriteBatch.SavePoint
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/WriteBatch$SavePoint");
+ }
+
+ /**
+ * Get the Java Method: HistogramData constructor
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getConstructorMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "<init>", "(JJJ)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Create a new Java org.rocksdb.WriteBatch.SavePoint object
+ *
+ * @param env A pointer to the Java environment
+ * @param savePoint A pointer to ROCKSDB_NAMESPACE::WriteBatch::SavePoint
+ * object
+ *
+ * @return A reference to a Java org.rocksdb.WriteBatch.SavePoint object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject construct(JNIEnv* env, const SavePoint &save_point) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = getConstructorMethodId(env);
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ jobject jsave_point = env->NewObject(jclazz, mid,
+ static_cast<jlong>(save_point.size),
+ static_cast<jlong>(save_point.count),
+ static_cast<jlong>(save_point.content_flags));
+ if (env->ExceptionCheck()) {
+ return nullptr;
+ }
+
+ return jsave_point;
+ }
+};
+
+// The portal class for org.rocksdb.WriteBatchWithIndex
+class WriteBatchWithIndexJni
+ : public RocksDBNativeClass<ROCKSDB_NAMESPACE::WriteBatchWithIndex*,
+ WriteBatchWithIndexJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.WriteBatchWithIndex
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env,
+ "org/rocksdb/WriteBatchWithIndex");
+ }
+};
+
+// The portal class for org.rocksdb.HistogramData
+class HistogramDataJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.HistogramData
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/HistogramData");
+ }
+
+ /**
+ * Get the Java Method: HistogramData constructor
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getConstructorMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "<init>", "(DDDDDDJJD)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+};
+
+// The portal class for org.rocksdb.BackupableDBOptions
+class BackupableDBOptionsJni
+ : public RocksDBNativeClass<ROCKSDB_NAMESPACE::BackupableDBOptions*,
+ BackupableDBOptionsJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.BackupableDBOptions
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env,
+ "org/rocksdb/BackupableDBOptions");
+ }
+};
+
+// The portal class for org.rocksdb.BackupEngine
+class BackupEngineJni
+ : public RocksDBNativeClass<ROCKSDB_NAMESPACE::BackupEngine*,
+ BackupEngineJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.BackupableEngine
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/BackupEngine");
+ }
+};
+
+// The portal class for org.rocksdb.RocksIterator
+class IteratorJni
+ : public RocksDBNativeClass<ROCKSDB_NAMESPACE::Iterator*, IteratorJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.RocksIterator
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/RocksIterator");
+ }
+};
+
+// The portal class for org.rocksdb.Filter
+class FilterJni
+ : public RocksDBNativeClass<
+ std::shared_ptr<ROCKSDB_NAMESPACE::FilterPolicy>*, FilterJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.Filter
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/Filter");
+ }
+};
+
+// The portal class for org.rocksdb.ColumnFamilyHandle
+class ColumnFamilyHandleJni
+ : public RocksDBNativeClass<ROCKSDB_NAMESPACE::ColumnFamilyHandle*,
+ ColumnFamilyHandleJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.ColumnFamilyHandle
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env,
+ "org/rocksdb/ColumnFamilyHandle");
+ }
+};
+
+// The portal class for org.rocksdb.FlushOptions
+class FlushOptionsJni
+ : public RocksDBNativeClass<ROCKSDB_NAMESPACE::FlushOptions*,
+ FlushOptionsJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.FlushOptions
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/FlushOptions");
+ }
+};
+
+// The portal class for org.rocksdb.ComparatorOptions
+class ComparatorOptionsJni
+ : public RocksDBNativeClass<
+ ROCKSDB_NAMESPACE::ComparatorJniCallbackOptions*,
+ ComparatorOptionsJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.ComparatorOptions
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/ComparatorOptions");
+ }
+};
+
+// The portal class for org.rocksdb.AbstractCompactionFilterFactory
+class AbstractCompactionFilterFactoryJni
+ : public RocksDBNativeClass<
+ const ROCKSDB_NAMESPACE::CompactionFilterFactoryJniCallback*,
+ AbstractCompactionFilterFactoryJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.AbstractCompactionFilterFactory
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env,
+ "org/rocksdb/AbstractCompactionFilterFactory");
+ }
+
+ /**
+ * Get the Java Method: AbstractCompactionFilterFactory#name
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getNameMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(
+ jclazz, "name", "()Ljava/lang/String;");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: AbstractCompactionFilterFactory#createCompactionFilter
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getCreateCompactionFilterMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz,
+ "createCompactionFilter",
+ "(ZZ)J");
+ assert(mid != nullptr);
+ return mid;
+ }
+};
+
+// The portal class for org.rocksdb.AbstractTransactionNotifier
+class AbstractTransactionNotifierJni
+ : public RocksDBNativeClass<
+ const ROCKSDB_NAMESPACE::TransactionNotifierJniCallback*,
+ AbstractTransactionNotifierJni> {
+ public:
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env,
+ "org/rocksdb/AbstractTransactionNotifier");
+ }
+
+ // Get the java method `snapshotCreated`
+ // of org.rocksdb.AbstractTransactionNotifier.
+ static jmethodID getSnapshotCreatedMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "snapshotCreated", "(J)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+};
+
+// The portal class for org.rocksdb.AbstractComparatorJniBridge
+class AbstractComparatorJniBridge : public JavaClass {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.AbstractComparatorJniBridge
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env,
+ "org/rocksdb/AbstractComparatorJniBridge");
+ }
+
+ /**
+ * Get the Java Method: Comparator#compareInternal
+ *
+ * @param env A pointer to the Java environment
+ * @param jclazz the AbstractComparatorJniBridge class
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getCompareInternalMethodId(JNIEnv* env, jclass jclazz) {
+ static jmethodID mid =
+ env->GetStaticMethodID(jclazz, "compareInternal",
+ "(Lorg/rocksdb/AbstractComparator;Ljava/nio/ByteBuffer;ILjava/nio/ByteBuffer;I)I");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: Comparator#findShortestSeparatorInternal
+ *
+ * @param env A pointer to the Java environment
+ * @param jclazz the AbstractComparatorJniBridge class
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getFindShortestSeparatorInternalMethodId(JNIEnv* env, jclass jclazz) {
+ static jmethodID mid =
+ env->GetStaticMethodID(jclazz, "findShortestSeparatorInternal",
+ "(Lorg/rocksdb/AbstractComparator;Ljava/nio/ByteBuffer;ILjava/nio/ByteBuffer;I)I");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: Comparator#findShortSuccessorInternal
+ *
+ * @param env A pointer to the Java environment
+ * @param jclazz the AbstractComparatorJniBridge class
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getFindShortSuccessorInternalMethodId(JNIEnv* env, jclass jclazz) {
+ static jmethodID mid =
+ env->GetStaticMethodID(jclazz, "findShortSuccessorInternal",
+ "(Lorg/rocksdb/AbstractComparator;Ljava/nio/ByteBuffer;I)I");
+ assert(mid != nullptr);
+ return mid;
+ }
+};
+
+// The portal class for org.rocksdb.AbstractComparator
+class AbstractComparatorJni
+ : public RocksDBNativeClass<const ROCKSDB_NAMESPACE::ComparatorJniCallback*,
+ AbstractComparatorJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.AbstractComparator
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env,
+ "org/rocksdb/AbstractComparator");
+ }
+
+ /**
+ * Get the Java Method: Comparator#name
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getNameMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetMethodID(jclazz, "name", "()Ljava/lang/String;");
+ assert(mid != nullptr);
+ return mid;
+ }
+};
+
+// The portal class for org.rocksdb.AbstractSlice
+class AbstractSliceJni
+ : public NativeRocksMutableObject<const ROCKSDB_NAMESPACE::Slice*,
+ AbstractSliceJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.AbstractSlice
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/AbstractSlice");
+ }
+};
+
+// The portal class for org.rocksdb.Slice
+class SliceJni
+ : public NativeRocksMutableObject<const ROCKSDB_NAMESPACE::Slice*,
+ AbstractSliceJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.Slice
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/Slice");
+ }
+
+ /**
+ * Constructs a Slice object
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return A reference to a Java Slice object, or a nullptr if an
+ * exception occurs
+ */
+ static jobject construct0(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "<init>", "()V");
+ if(mid == nullptr) {
+ // exception occurred accessing method
+ return nullptr;
+ }
+
+ jobject jslice = env->NewObject(jclazz, mid);
+ if(env->ExceptionCheck()) {
+ return nullptr;
+ }
+
+ return jslice;
+ }
+};
+
+// The portal class for org.rocksdb.DirectSlice
+class DirectSliceJni
+ : public NativeRocksMutableObject<const ROCKSDB_NAMESPACE::Slice*,
+ AbstractSliceJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.DirectSlice
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/DirectSlice");
+ }
+
+ /**
+ * Constructs a DirectSlice object
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return A reference to a Java DirectSlice object, or a nullptr if an
+ * exception occurs
+ */
+ static jobject construct0(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "<init>", "()V");
+ if(mid == nullptr) {
+ // exception occurred accessing method
+ return nullptr;
+ }
+
+ jobject jdirect_slice = env->NewObject(jclazz, mid);
+ if(env->ExceptionCheck()) {
+ return nullptr;
+ }
+
+ return jdirect_slice;
+ }
+};
+
+// The portal class for org.rocksdb.BackupInfo
+class BackupInfoJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.BackupInfo
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/BackupInfo");
+ }
+
+ /**
+ * Constructs a BackupInfo object
+ *
+ * @param env A pointer to the Java environment
+ * @param backup_id id of the backup
+ * @param timestamp timestamp of the backup
+ * @param size size of the backup
+ * @param number_files number of files related to the backup
+ * @param app_metadata application specific metadata
+ *
+ * @return A reference to a Java BackupInfo object, or a nullptr if an
+ * exception occurs
+ */
+ static jobject construct0(JNIEnv* env, uint32_t backup_id, int64_t timestamp,
+ uint64_t size, uint32_t number_files,
+ const std::string& app_metadata) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetMethodID(jclazz, "<init>", "(IJJILjava/lang/String;)V");
+ if(mid == nullptr) {
+ // exception occurred accessing method
+ return nullptr;
+ }
+
+ jstring japp_metadata = nullptr;
+ if (app_metadata != nullptr) {
+ japp_metadata = env->NewStringUTF(app_metadata.c_str());
+ if (japp_metadata == nullptr) {
+ // exception occurred creating java string
+ return nullptr;
+ }
+ }
+
+ jobject jbackup_info = env->NewObject(jclazz, mid, backup_id, timestamp,
+ size, number_files, japp_metadata);
+ if(env->ExceptionCheck()) {
+ env->DeleteLocalRef(japp_metadata);
+ return nullptr;
+ }
+
+ return jbackup_info;
+ }
+};
+
+class BackupInfoListJni {
+ public:
+ /**
+ * Converts a C++ std::vector<BackupInfo> object to
+ * a Java ArrayList<org.rocksdb.BackupInfo> object
+ *
+ * @param env A pointer to the Java environment
+ * @param backup_infos A vector of BackupInfo
+ *
+ * @return Either a reference to a Java ArrayList object, or a nullptr
+ * if an exception occurs
+ */
+ static jobject getBackupInfo(JNIEnv* env,
+ std::vector<BackupInfo> backup_infos) {
+ jclass jarray_list_clazz =
+ ROCKSDB_NAMESPACE::ListJni::getArrayListClass(env);
+ if(jarray_list_clazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID cstr_mid =
+ ROCKSDB_NAMESPACE::ListJni::getArrayListConstructorMethodId(env);
+ if(cstr_mid == nullptr) {
+ // exception occurred accessing method
+ return nullptr;
+ }
+
+ jmethodID add_mid = ROCKSDB_NAMESPACE::ListJni::getListAddMethodId(env);
+ if(add_mid == nullptr) {
+ // exception occurred accessing method
+ return nullptr;
+ }
+
+ // create java list
+ jobject jbackup_info_handle_list =
+ env->NewObject(jarray_list_clazz, cstr_mid, backup_infos.size());
+ if(env->ExceptionCheck()) {
+ // exception occurred constructing object
+ return nullptr;
+ }
+
+ // insert in java list
+ auto end = backup_infos.end();
+ for (auto it = backup_infos.begin(); it != end; ++it) {
+ auto backup_info = *it;
+
+ jobject obj = ROCKSDB_NAMESPACE::BackupInfoJni::construct0(
+ env, backup_info.backup_id, backup_info.timestamp, backup_info.size,
+ backup_info.number_files, backup_info.app_metadata);
+ if(env->ExceptionCheck()) {
+ // exception occurred constructing object
+ if(obj != nullptr) {
+ env->DeleteLocalRef(obj);
+ }
+ if(jbackup_info_handle_list != nullptr) {
+ env->DeleteLocalRef(jbackup_info_handle_list);
+ }
+ return nullptr;
+ }
+
+ jboolean rs =
+ env->CallBooleanMethod(jbackup_info_handle_list, add_mid, obj);
+ if(env->ExceptionCheck() || rs == JNI_FALSE) {
+ // exception occurred calling method, or could not add
+ if(obj != nullptr) {
+ env->DeleteLocalRef(obj);
+ }
+ if(jbackup_info_handle_list != nullptr) {
+ env->DeleteLocalRef(jbackup_info_handle_list);
+ }
+ return nullptr;
+ }
+ }
+
+ return jbackup_info_handle_list;
+ }
+};
+
+// The portal class for org.rocksdb.WBWIRocksIterator
+class WBWIRocksIteratorJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.WBWIRocksIterator
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/WBWIRocksIterator");
+ }
+
+ /**
+ * Get the Java Field: WBWIRocksIterator#entry
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Field ID or nullptr if the class or field id could not
+ * be retieved
+ */
+ static jfieldID getWriteEntryField(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jfieldID fid =
+ env->GetFieldID(jclazz, "entry",
+ "Lorg/rocksdb/WBWIRocksIterator$WriteEntry;");
+ assert(fid != nullptr);
+ return fid;
+ }
+
+ /**
+ * Gets the value of the WBWIRocksIterator#entry
+ *
+ * @param env A pointer to the Java environment
+ * @param jwbwi_rocks_iterator A reference to a WBWIIterator
+ *
+ * @return A reference to a Java WBWIRocksIterator.WriteEntry object, or
+ * a nullptr if an exception occurs
+ */
+ static jobject getWriteEntry(JNIEnv* env, jobject jwbwi_rocks_iterator) {
+ assert(jwbwi_rocks_iterator != nullptr);
+
+ jfieldID jwrite_entry_field = getWriteEntryField(env);
+ if(jwrite_entry_field == nullptr) {
+ // exception occurred accessing the field
+ return nullptr;
+ }
+
+ jobject jwe = env->GetObjectField(jwbwi_rocks_iterator, jwrite_entry_field);
+ assert(jwe != nullptr);
+ return jwe;
+ }
+};
+
+// The portal class for org.rocksdb.WBWIRocksIterator.WriteType
+class WriteTypeJni : public JavaClass {
+ public:
+ /**
+ * Get the PUT enum field value of WBWIRocksIterator.WriteType
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return A reference to the enum field value or a nullptr if
+ * the enum field value could not be retrieved
+ */
+ static jobject PUT(JNIEnv* env) {
+ return getEnum(env, "PUT");
+ }
+
+ /**
+ * Get the MERGE enum field value of WBWIRocksIterator.WriteType
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return A reference to the enum field value or a nullptr if
+ * the enum field value could not be retrieved
+ */
+ static jobject MERGE(JNIEnv* env) {
+ return getEnum(env, "MERGE");
+ }
+
+ /**
+ * Get the DELETE enum field value of WBWIRocksIterator.WriteType
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return A reference to the enum field value or a nullptr if
+ * the enum field value could not be retrieved
+ */
+ static jobject DELETE(JNIEnv* env) {
+ return getEnum(env, "DELETE");
+ }
+
+ /**
+ * Get the LOG enum field value of WBWIRocksIterator.WriteType
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return A reference to the enum field value or a nullptr if
+ * the enum field value could not be retrieved
+ */
+ static jobject LOG(JNIEnv* env) {
+ return getEnum(env, "LOG");
+ }
+
+ // Returns the equivalent org.rocksdb.WBWIRocksIterator.WriteType for the
+ // provided C++ ROCKSDB_NAMESPACE::WriteType enum
+ static jbyte toJavaWriteType(const ROCKSDB_NAMESPACE::WriteType& writeType) {
+ switch (writeType) {
+ case ROCKSDB_NAMESPACE::WriteType::kPutRecord:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::WriteType::kMergeRecord:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::WriteType::kDeleteRecord:
+ return 0x2;
+ case ROCKSDB_NAMESPACE::WriteType::kSingleDeleteRecord:
+ return 0x3;
+ case ROCKSDB_NAMESPACE::WriteType::kDeleteRangeRecord:
+ return 0x4;
+ case ROCKSDB_NAMESPACE::WriteType::kLogDataRecord:
+ return 0x5;
+ case ROCKSDB_NAMESPACE::WriteType::kXIDRecord:
+ return 0x6;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ private:
+ /**
+ * Get the Java Class org.rocksdb.WBWIRocksIterator.WriteType
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/WBWIRocksIterator$WriteType");
+ }
+
+ /**
+ * Get an enum field of org.rocksdb.WBWIRocksIterator.WriteType
+ *
+ * @param env A pointer to the Java environment
+ * @param name The name of the enum field
+ *
+ * @return A reference to the enum field value or a nullptr if
+ * the enum field value could not be retrieved
+ */
+ static jobject getEnum(JNIEnv* env, const char name[]) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jfieldID jfid =
+ env->GetStaticFieldID(jclazz, name,
+ "Lorg/rocksdb/WBWIRocksIterator$WriteType;");
+ if(env->ExceptionCheck()) {
+ // exception occurred while getting field
+ return nullptr;
+ } else if(jfid == nullptr) {
+ return nullptr;
+ }
+
+ jobject jwrite_type = env->GetStaticObjectField(jclazz, jfid);
+ assert(jwrite_type != nullptr);
+ return jwrite_type;
+ }
+};
+
+// The portal class for org.rocksdb.WBWIRocksIterator.WriteEntry
+class WriteEntryJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.WBWIRocksIterator.WriteEntry
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/WBWIRocksIterator$WriteEntry");
+ }
+};
+
+// The portal class for org.rocksdb.InfoLogLevel
+class InfoLogLevelJni : public JavaClass {
+ public:
+ /**
+ * Get the DEBUG_LEVEL enum field value of InfoLogLevel
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return A reference to the enum field value or a nullptr if
+ * the enum field value could not be retrieved
+ */
+ static jobject DEBUG_LEVEL(JNIEnv* env) {
+ return getEnum(env, "DEBUG_LEVEL");
+ }
+
+ /**
+ * Get the INFO_LEVEL enum field value of InfoLogLevel
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return A reference to the enum field value or a nullptr if
+ * the enum field value could not be retrieved
+ */
+ static jobject INFO_LEVEL(JNIEnv* env) {
+ return getEnum(env, "INFO_LEVEL");
+ }
+
+ /**
+ * Get the WARN_LEVEL enum field value of InfoLogLevel
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return A reference to the enum field value or a nullptr if
+ * the enum field value could not be retrieved
+ */
+ static jobject WARN_LEVEL(JNIEnv* env) {
+ return getEnum(env, "WARN_LEVEL");
+ }
+
+ /**
+ * Get the ERROR_LEVEL enum field value of InfoLogLevel
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return A reference to the enum field value or a nullptr if
+ * the enum field value could not be retrieved
+ */
+ static jobject ERROR_LEVEL(JNIEnv* env) {
+ return getEnum(env, "ERROR_LEVEL");
+ }
+
+ /**
+ * Get the FATAL_LEVEL enum field value of InfoLogLevel
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return A reference to the enum field value or a nullptr if
+ * the enum field value could not be retrieved
+ */
+ static jobject FATAL_LEVEL(JNIEnv* env) {
+ return getEnum(env, "FATAL_LEVEL");
+ }
+
+ /**
+ * Get the HEADER_LEVEL enum field value of InfoLogLevel
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return A reference to the enum field value or a nullptr if
+ * the enum field value could not be retrieved
+ */
+ static jobject HEADER_LEVEL(JNIEnv* env) {
+ return getEnum(env, "HEADER_LEVEL");
+ }
+
+ private:
+ /**
+ * Get the Java Class org.rocksdb.InfoLogLevel
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/InfoLogLevel");
+ }
+
+ /**
+ * Get an enum field of org.rocksdb.InfoLogLevel
+ *
+ * @param env A pointer to the Java environment
+ * @param name The name of the enum field
+ *
+ * @return A reference to the enum field value or a nullptr if
+ * the enum field value could not be retrieved
+ */
+ static jobject getEnum(JNIEnv* env, const char name[]) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jfieldID jfid =
+ env->GetStaticFieldID(jclazz, name, "Lorg/rocksdb/InfoLogLevel;");
+ if(env->ExceptionCheck()) {
+ // exception occurred while getting field
+ return nullptr;
+ } else if(jfid == nullptr) {
+ return nullptr;
+ }
+
+ jobject jinfo_log_level = env->GetStaticObjectField(jclazz, jfid);
+ assert(jinfo_log_level != nullptr);
+ return jinfo_log_level;
+ }
+};
+
+// The portal class for org.rocksdb.Logger
+class LoggerJni
+ : public RocksDBNativeClass<
+ std::shared_ptr<ROCKSDB_NAMESPACE::LoggerJniCallback>*, LoggerJni> {
+ public:
+ /**
+ * Get the Java Class org/rocksdb/Logger
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/Logger");
+ }
+
+ /**
+ * Get the Java Method: Logger#log
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getLogMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetMethodID(jclazz, "log",
+ "(Lorg/rocksdb/InfoLogLevel;Ljava/lang/String;)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+};
+
+// The portal class for org.rocksdb.TransactionLogIterator.BatchResult
+class BatchResultJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.TransactionLogIterator.BatchResult
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env,
+ "org/rocksdb/TransactionLogIterator$BatchResult");
+ }
+
+ /**
+ * Create a new Java org.rocksdb.TransactionLogIterator.BatchResult object
+ * with the same properties as the provided C++ ROCKSDB_NAMESPACE::BatchResult
+ * object
+ *
+ * @param env A pointer to the Java environment
+ * @param batch_result The ROCKSDB_NAMESPACE::BatchResult object
+ *
+ * @return A reference to a Java
+ * org.rocksdb.TransactionLogIterator.BatchResult object,
+ * or nullptr if an an exception occurs
+ */
+ static jobject construct(JNIEnv* env,
+ ROCKSDB_NAMESPACE::BatchResult& batch_result) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(
+ jclazz, "<init>", "(JJ)V");
+ if(mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ jobject jbatch_result = env->NewObject(jclazz, mid,
+ batch_result.sequence, batch_result.writeBatchPtr.get());
+ if(jbatch_result == nullptr) {
+ // exception thrown: InstantiationException or OutOfMemoryError
+ return nullptr;
+ }
+
+ batch_result.writeBatchPtr.release();
+ return jbatch_result;
+ }
+};
+
+// The portal class for org.rocksdb.BottommostLevelCompaction
+class BottommostLevelCompactionJni {
+ public:
+ // Returns the equivalent org.rocksdb.BottommostLevelCompaction for the
+ // provided C++ ROCKSDB_NAMESPACE::BottommostLevelCompaction enum
+ static jint toJavaBottommostLevelCompaction(
+ const ROCKSDB_NAMESPACE::BottommostLevelCompaction&
+ bottommost_level_compaction) {
+ switch(bottommost_level_compaction) {
+ case ROCKSDB_NAMESPACE::BottommostLevelCompaction::kSkip:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::BottommostLevelCompaction::
+ kIfHaveCompactionFilter:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::BottommostLevelCompaction::kForce:
+ return 0x2;
+ case ROCKSDB_NAMESPACE::BottommostLevelCompaction::kForceOptimized:
+ return 0x3;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::BottommostLevelCompaction
+ // enum for the provided Java org.rocksdb.BottommostLevelCompaction
+ static ROCKSDB_NAMESPACE::BottommostLevelCompaction
+ toCppBottommostLevelCompaction(jint bottommost_level_compaction) {
+ switch(bottommost_level_compaction) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::BottommostLevelCompaction::kSkip;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::BottommostLevelCompaction::
+ kIfHaveCompactionFilter;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::BottommostLevelCompaction::kForce;
+ case 0x3:
+ return ROCKSDB_NAMESPACE::BottommostLevelCompaction::kForceOptimized;
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::BottommostLevelCompaction::
+ kIfHaveCompactionFilter;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.CompactionStopStyle
+class CompactionStopStyleJni {
+ public:
+ // Returns the equivalent org.rocksdb.CompactionStopStyle for the provided
+ // C++ ROCKSDB_NAMESPACE::CompactionStopStyle enum
+ static jbyte toJavaCompactionStopStyle(
+ const ROCKSDB_NAMESPACE::CompactionStopStyle& compaction_stop_style) {
+ switch(compaction_stop_style) {
+ case ROCKSDB_NAMESPACE::CompactionStopStyle::
+ kCompactionStopStyleSimilarSize:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::CompactionStopStyle::
+ kCompactionStopStyleTotalSize:
+ return 0x1;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::CompactionStopStyle enum for
+ // the provided Java org.rocksdb.CompactionStopStyle
+ static ROCKSDB_NAMESPACE::CompactionStopStyle toCppCompactionStopStyle(
+ jbyte jcompaction_stop_style) {
+ switch(jcompaction_stop_style) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::CompactionStopStyle::
+ kCompactionStopStyleSimilarSize;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::CompactionStopStyle::
+ kCompactionStopStyleTotalSize;
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::CompactionStopStyle::
+ kCompactionStopStyleSimilarSize;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.CompressionType
+class CompressionTypeJni {
+ public:
+ // Returns the equivalent org.rocksdb.CompressionType for the provided
+ // C++ ROCKSDB_NAMESPACE::CompressionType enum
+ static jbyte toJavaCompressionType(
+ const ROCKSDB_NAMESPACE::CompressionType& compression_type) {
+ switch(compression_type) {
+ case ROCKSDB_NAMESPACE::CompressionType::kNoCompression:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::CompressionType::kSnappyCompression:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::CompressionType::kZlibCompression:
+ return 0x2;
+ case ROCKSDB_NAMESPACE::CompressionType::kBZip2Compression:
+ return 0x3;
+ case ROCKSDB_NAMESPACE::CompressionType::kLZ4Compression:
+ return 0x4;
+ case ROCKSDB_NAMESPACE::CompressionType::kLZ4HCCompression:
+ return 0x5;
+ case ROCKSDB_NAMESPACE::CompressionType::kXpressCompression:
+ return 0x6;
+ case ROCKSDB_NAMESPACE::CompressionType::kZSTD:
+ return 0x7;
+ case ROCKSDB_NAMESPACE::CompressionType::kDisableCompressionOption:
+ default:
+ return 0x7F;
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::CompressionType enum for the
+ // provided Java org.rocksdb.CompressionType
+ static ROCKSDB_NAMESPACE::CompressionType toCppCompressionType(
+ jbyte jcompression_type) {
+ switch(jcompression_type) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::CompressionType::kNoCompression;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::CompressionType::kSnappyCompression;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::CompressionType::kZlibCompression;
+ case 0x3:
+ return ROCKSDB_NAMESPACE::CompressionType::kBZip2Compression;
+ case 0x4:
+ return ROCKSDB_NAMESPACE::CompressionType::kLZ4Compression;
+ case 0x5:
+ return ROCKSDB_NAMESPACE::CompressionType::kLZ4HCCompression;
+ case 0x6:
+ return ROCKSDB_NAMESPACE::CompressionType::kXpressCompression;
+ case 0x7:
+ return ROCKSDB_NAMESPACE::CompressionType::kZSTD;
+ case 0x7F:
+ default:
+ return ROCKSDB_NAMESPACE::CompressionType::kDisableCompressionOption;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.CompactionPriority
+class CompactionPriorityJni {
+ public:
+ // Returns the equivalent org.rocksdb.CompactionPriority for the provided
+ // C++ ROCKSDB_NAMESPACE::CompactionPri enum
+ static jbyte toJavaCompactionPriority(
+ const ROCKSDB_NAMESPACE::CompactionPri& compaction_priority) {
+ switch(compaction_priority) {
+ case ROCKSDB_NAMESPACE::CompactionPri::kByCompensatedSize:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::CompactionPri::kOldestLargestSeqFirst:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::CompactionPri::kOldestSmallestSeqFirst:
+ return 0x2;
+ case ROCKSDB_NAMESPACE::CompactionPri::kMinOverlappingRatio:
+ return 0x3;
+ default:
+ return 0x0; // undefined
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::CompactionPri enum for the
+ // provided Java org.rocksdb.CompactionPriority
+ static ROCKSDB_NAMESPACE::CompactionPri toCppCompactionPriority(
+ jbyte jcompaction_priority) {
+ switch(jcompaction_priority) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::CompactionPri::kByCompensatedSize;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::CompactionPri::kOldestLargestSeqFirst;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::CompactionPri::kOldestSmallestSeqFirst;
+ case 0x3:
+ return ROCKSDB_NAMESPACE::CompactionPri::kMinOverlappingRatio;
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::CompactionPri::kByCompensatedSize;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.AccessHint
+class AccessHintJni {
+ public:
+ // Returns the equivalent org.rocksdb.AccessHint for the provided
+ // C++ ROCKSDB_NAMESPACE::DBOptions::AccessHint enum
+ static jbyte toJavaAccessHint(
+ const ROCKSDB_NAMESPACE::DBOptions::AccessHint& access_hint) {
+ switch(access_hint) {
+ case ROCKSDB_NAMESPACE::DBOptions::AccessHint::NONE:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::DBOptions::AccessHint::NORMAL:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::DBOptions::AccessHint::SEQUENTIAL:
+ return 0x2;
+ case ROCKSDB_NAMESPACE::DBOptions::AccessHint::WILLNEED:
+ return 0x3;
+ default:
+ // undefined/default
+ return 0x1;
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::DBOptions::AccessHint enum
+ // for the provided Java org.rocksdb.AccessHint
+ static ROCKSDB_NAMESPACE::DBOptions::AccessHint toCppAccessHint(
+ jbyte jaccess_hint) {
+ switch(jaccess_hint) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::DBOptions::AccessHint::NONE;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::DBOptions::AccessHint::NORMAL;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::DBOptions::AccessHint::SEQUENTIAL;
+ case 0x3:
+ return ROCKSDB_NAMESPACE::DBOptions::AccessHint::WILLNEED;
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::DBOptions::AccessHint::NORMAL;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.WALRecoveryMode
+class WALRecoveryModeJni {
+ public:
+ // Returns the equivalent org.rocksdb.WALRecoveryMode for the provided
+ // C++ ROCKSDB_NAMESPACE::WALRecoveryMode enum
+ static jbyte toJavaWALRecoveryMode(
+ const ROCKSDB_NAMESPACE::WALRecoveryMode& wal_recovery_mode) {
+ switch(wal_recovery_mode) {
+ case ROCKSDB_NAMESPACE::WALRecoveryMode::kTolerateCorruptedTailRecords:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::WALRecoveryMode::kAbsoluteConsistency:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::WALRecoveryMode::kPointInTimeRecovery:
+ return 0x2;
+ case ROCKSDB_NAMESPACE::WALRecoveryMode::kSkipAnyCorruptedRecords:
+ return 0x3;
+ default:
+ // undefined/default
+ return 0x2;
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::WALRecoveryMode enum for the
+ // provided Java org.rocksdb.WALRecoveryMode
+ static ROCKSDB_NAMESPACE::WALRecoveryMode toCppWALRecoveryMode(
+ jbyte jwal_recovery_mode) {
+ switch(jwal_recovery_mode) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::WALRecoveryMode::
+ kTolerateCorruptedTailRecords;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::WALRecoveryMode::kAbsoluteConsistency;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::WALRecoveryMode::kPointInTimeRecovery;
+ case 0x3:
+ return ROCKSDB_NAMESPACE::WALRecoveryMode::kSkipAnyCorruptedRecords;
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::WALRecoveryMode::kPointInTimeRecovery;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.TickerType
+class TickerTypeJni {
+ public:
+ // Returns the equivalent org.rocksdb.TickerType for the provided
+ // C++ ROCKSDB_NAMESPACE::Tickers enum
+ static jbyte toJavaTickerType(const ROCKSDB_NAMESPACE::Tickers& tickers) {
+ switch(tickers) {
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_MISS:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_HIT:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_ADD:
+ return 0x2;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_ADD_FAILURES:
+ return 0x3;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_MISS:
+ return 0x4;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_HIT:
+ return 0x5;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_ADD:
+ return 0x6;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_BYTES_INSERT:
+ return 0x7;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_BYTES_EVICT:
+ return 0x8;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_MISS:
+ return 0x9;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_HIT:
+ return 0xA;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_ADD:
+ return 0xB;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_BYTES_INSERT:
+ return 0xC;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_BYTES_EVICT:
+ return 0xD;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_DATA_MISS:
+ return 0xE;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_DATA_HIT:
+ return 0xF;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_DATA_ADD:
+ return 0x10;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_DATA_BYTES_INSERT:
+ return 0x11;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_BYTES_READ:
+ return 0x12;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_BYTES_WRITE:
+ return 0x13;
+ case ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_USEFUL:
+ return 0x14;
+ case ROCKSDB_NAMESPACE::Tickers::PERSISTENT_CACHE_HIT:
+ return 0x15;
+ case ROCKSDB_NAMESPACE::Tickers::PERSISTENT_CACHE_MISS:
+ return 0x16;
+ case ROCKSDB_NAMESPACE::Tickers::SIM_BLOCK_CACHE_HIT:
+ return 0x17;
+ case ROCKSDB_NAMESPACE::Tickers::SIM_BLOCK_CACHE_MISS:
+ return 0x18;
+ case ROCKSDB_NAMESPACE::Tickers::MEMTABLE_HIT:
+ return 0x19;
+ case ROCKSDB_NAMESPACE::Tickers::MEMTABLE_MISS:
+ return 0x1A;
+ case ROCKSDB_NAMESPACE::Tickers::GET_HIT_L0:
+ return 0x1B;
+ case ROCKSDB_NAMESPACE::Tickers::GET_HIT_L1:
+ return 0x1C;
+ case ROCKSDB_NAMESPACE::Tickers::GET_HIT_L2_AND_UP:
+ return 0x1D;
+ case ROCKSDB_NAMESPACE::Tickers::COMPACTION_KEY_DROP_NEWER_ENTRY:
+ return 0x1E;
+ case ROCKSDB_NAMESPACE::Tickers::COMPACTION_KEY_DROP_OBSOLETE:
+ return 0x1F;
+ case ROCKSDB_NAMESPACE::Tickers::COMPACTION_KEY_DROP_RANGE_DEL:
+ return 0x20;
+ case ROCKSDB_NAMESPACE::Tickers::COMPACTION_KEY_DROP_USER:
+ return 0x21;
+ case ROCKSDB_NAMESPACE::Tickers::COMPACTION_RANGE_DEL_DROP_OBSOLETE:
+ return 0x22;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_KEYS_WRITTEN:
+ return 0x23;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_KEYS_READ:
+ return 0x24;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_KEYS_UPDATED:
+ return 0x25;
+ case ROCKSDB_NAMESPACE::Tickers::BYTES_WRITTEN:
+ return 0x26;
+ case ROCKSDB_NAMESPACE::Tickers::BYTES_READ:
+ return 0x27;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_SEEK:
+ return 0x28;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_NEXT:
+ return 0x29;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_PREV:
+ return 0x2A;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_SEEK_FOUND:
+ return 0x2B;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_NEXT_FOUND:
+ return 0x2C;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_PREV_FOUND:
+ return 0x2D;
+ case ROCKSDB_NAMESPACE::Tickers::ITER_BYTES_READ:
+ return 0x2E;
+ case ROCKSDB_NAMESPACE::Tickers::NO_FILE_CLOSES:
+ return 0x2F;
+ case ROCKSDB_NAMESPACE::Tickers::NO_FILE_OPENS:
+ return 0x30;
+ case ROCKSDB_NAMESPACE::Tickers::NO_FILE_ERRORS:
+ return 0x31;
+ case ROCKSDB_NAMESPACE::Tickers::STALL_L0_SLOWDOWN_MICROS:
+ return 0x32;
+ case ROCKSDB_NAMESPACE::Tickers::STALL_MEMTABLE_COMPACTION_MICROS:
+ return 0x33;
+ case ROCKSDB_NAMESPACE::Tickers::STALL_L0_NUM_FILES_MICROS:
+ return 0x34;
+ case ROCKSDB_NAMESPACE::Tickers::STALL_MICROS:
+ return 0x35;
+ case ROCKSDB_NAMESPACE::Tickers::DB_MUTEX_WAIT_MICROS:
+ return 0x36;
+ case ROCKSDB_NAMESPACE::Tickers::RATE_LIMIT_DELAY_MILLIS:
+ return 0x37;
+ case ROCKSDB_NAMESPACE::Tickers::NO_ITERATORS:
+ return 0x38;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_MULTIGET_CALLS:
+ return 0x39;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_MULTIGET_KEYS_READ:
+ return 0x3A;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_MULTIGET_BYTES_READ:
+ return 0x3B;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_FILTERED_DELETES:
+ return 0x3C;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_MERGE_FAILURES:
+ return 0x3D;
+ case ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_PREFIX_CHECKED:
+ return 0x3E;
+ case ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_PREFIX_USEFUL:
+ return 0x3F;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_OF_RESEEKS_IN_ITERATION:
+ return 0x40;
+ case ROCKSDB_NAMESPACE::Tickers::GET_UPDATES_SINCE_CALLS:
+ return 0x41;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_COMPRESSED_MISS:
+ return 0x42;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_COMPRESSED_HIT:
+ return 0x43;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_COMPRESSED_ADD:
+ return 0x44;
+ case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_COMPRESSED_ADD_FAILURES:
+ return 0x45;
+ case ROCKSDB_NAMESPACE::Tickers::WAL_FILE_SYNCED:
+ return 0x46;
+ case ROCKSDB_NAMESPACE::Tickers::WAL_FILE_BYTES:
+ return 0x47;
+ case ROCKSDB_NAMESPACE::Tickers::WRITE_DONE_BY_SELF:
+ return 0x48;
+ case ROCKSDB_NAMESPACE::Tickers::WRITE_DONE_BY_OTHER:
+ return 0x49;
+ case ROCKSDB_NAMESPACE::Tickers::WRITE_TIMEDOUT:
+ return 0x4A;
+ case ROCKSDB_NAMESPACE::Tickers::WRITE_WITH_WAL:
+ return 0x4B;
+ case ROCKSDB_NAMESPACE::Tickers::COMPACT_READ_BYTES:
+ return 0x4C;
+ case ROCKSDB_NAMESPACE::Tickers::COMPACT_WRITE_BYTES:
+ return 0x4D;
+ case ROCKSDB_NAMESPACE::Tickers::FLUSH_WRITE_BYTES:
+ return 0x4E;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_DIRECT_LOAD_TABLE_PROPERTIES:
+ return 0x4F;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_SUPERVERSION_ACQUIRES:
+ return 0x50;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_SUPERVERSION_RELEASES:
+ return 0x51;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_SUPERVERSION_CLEANUPS:
+ return 0x52;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_BLOCK_COMPRESSED:
+ return 0x53;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_BLOCK_DECOMPRESSED:
+ return 0x54;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_BLOCK_NOT_COMPRESSED:
+ return 0x55;
+ case ROCKSDB_NAMESPACE::Tickers::MERGE_OPERATION_TOTAL_TIME:
+ return 0x56;
+ case ROCKSDB_NAMESPACE::Tickers::FILTER_OPERATION_TOTAL_TIME:
+ return 0x57;
+ case ROCKSDB_NAMESPACE::Tickers::ROW_CACHE_HIT:
+ return 0x58;
+ case ROCKSDB_NAMESPACE::Tickers::ROW_CACHE_MISS:
+ return 0x59;
+ case ROCKSDB_NAMESPACE::Tickers::READ_AMP_ESTIMATE_USEFUL_BYTES:
+ return 0x5A;
+ case ROCKSDB_NAMESPACE::Tickers::READ_AMP_TOTAL_READ_BYTES:
+ return 0x5B;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_RATE_LIMITER_DRAINS:
+ return 0x5C;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_ITER_SKIP:
+ return 0x5D;
+ case ROCKSDB_NAMESPACE::Tickers::NUMBER_MULTIGET_KEYS_FOUND:
+ return 0x5E;
+ case ROCKSDB_NAMESPACE::Tickers::NO_ITERATOR_CREATED:
+ // -0x01 to fixate the new value that incorrectly changed TICKER_ENUM_MAX.
+ return -0x01;
+ case ROCKSDB_NAMESPACE::Tickers::NO_ITERATOR_DELETED:
+ return 0x60;
+ case ROCKSDB_NAMESPACE::Tickers::COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE:
+ return 0x61;
+ case ROCKSDB_NAMESPACE::Tickers::COMPACTION_CANCELLED:
+ return 0x62;
+ case ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_FULL_POSITIVE:
+ return 0x63;
+ case ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_FULL_TRUE_POSITIVE:
+ return 0x64;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_PUT:
+ return 0x65;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_WRITE:
+ return 0x66;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_GET:
+ return 0x67;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_MULTIGET:
+ return 0x68;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_SEEK:
+ return 0x69;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_NEXT:
+ return 0x6A;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_PREV:
+ return 0x6B;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_KEYS_WRITTEN:
+ return 0x6C;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_KEYS_READ:
+ return 0x6D;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BYTES_WRITTEN:
+ return 0x6E;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BYTES_READ:
+ return 0x6F;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_WRITE_INLINED:
+ return 0x70;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_WRITE_INLINED_TTL:
+ return 0x71;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_WRITE_BLOB:
+ return 0x72;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_WRITE_BLOB_TTL:
+ return 0x73;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_FILE_BYTES_WRITTEN:
+ return 0x74;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_FILE_BYTES_READ:
+ return 0x75;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_FILE_SYNCED:
+ return 0x76;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_INDEX_EXPIRED_COUNT:
+ return 0x77;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_INDEX_EXPIRED_SIZE:
+ return 0x78;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_INDEX_EVICTED_COUNT:
+ return 0x79;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_INDEX_EVICTED_SIZE:
+ return 0x7A;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_FILES:
+ return 0x7B;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_NEW_FILES:
+ return 0x7C;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_FAILURES:
+ return 0x7D;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_KEYS_OVERWRITTEN:
+ return 0x7E;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_KEYS_EXPIRED:
+ return 0x7F;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_KEYS_RELOCATED:
+ return -0x02;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_BYTES_OVERWRITTEN:
+ return -0x03;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_BYTES_EXPIRED:
+ return -0x04;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_BYTES_RELOCATED:
+ return -0x05;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_FIFO_NUM_FILES_EVICTED:
+ return -0x06;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_FIFO_NUM_KEYS_EVICTED:
+ return -0x07;
+ case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_FIFO_BYTES_EVICTED:
+ return -0x08;
+ case ROCKSDB_NAMESPACE::Tickers::TXN_PREPARE_MUTEX_OVERHEAD:
+ return -0x09;
+ case ROCKSDB_NAMESPACE::Tickers::TXN_OLD_COMMIT_MAP_MUTEX_OVERHEAD:
+ return -0x0A;
+ case ROCKSDB_NAMESPACE::Tickers::TXN_DUPLICATE_KEY_OVERHEAD:
+ return -0x0B;
+ case ROCKSDB_NAMESPACE::Tickers::TXN_SNAPSHOT_MUTEX_OVERHEAD:
+ return -0x0C;
+ case ROCKSDB_NAMESPACE::Tickers::TXN_GET_TRY_AGAIN:
+ return -0x0D;
+ case ROCKSDB_NAMESPACE::Tickers::TICKER_ENUM_MAX:
+ // 0x5F for backwards compatibility on current minor version.
+ return 0x5F;
+ default:
+ // undefined/default
+ return 0x0;
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::Tickers enum for the
+ // provided Java org.rocksdb.TickerType
+ static ROCKSDB_NAMESPACE::Tickers toCppTickers(jbyte jticker_type) {
+ switch(jticker_type) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_MISS;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_HIT;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_ADD;
+ case 0x3:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_ADD_FAILURES;
+ case 0x4:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_MISS;
+ case 0x5:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_HIT;
+ case 0x6:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_ADD;
+ case 0x7:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_BYTES_INSERT;
+ case 0x8:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_BYTES_EVICT;
+ case 0x9:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_MISS;
+ case 0xA:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_HIT;
+ case 0xB:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_ADD;
+ case 0xC:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_BYTES_INSERT;
+ case 0xD:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_BYTES_EVICT;
+ case 0xE:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_DATA_MISS;
+ case 0xF:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_DATA_HIT;
+ case 0x10:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_DATA_ADD;
+ case 0x11:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_DATA_BYTES_INSERT;
+ case 0x12:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_BYTES_READ;
+ case 0x13:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_BYTES_WRITE;
+ case 0x14:
+ return ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_USEFUL;
+ case 0x15:
+ return ROCKSDB_NAMESPACE::Tickers::PERSISTENT_CACHE_HIT;
+ case 0x16:
+ return ROCKSDB_NAMESPACE::Tickers::PERSISTENT_CACHE_MISS;
+ case 0x17:
+ return ROCKSDB_NAMESPACE::Tickers::SIM_BLOCK_CACHE_HIT;
+ case 0x18:
+ return ROCKSDB_NAMESPACE::Tickers::SIM_BLOCK_CACHE_MISS;
+ case 0x19:
+ return ROCKSDB_NAMESPACE::Tickers::MEMTABLE_HIT;
+ case 0x1A:
+ return ROCKSDB_NAMESPACE::Tickers::MEMTABLE_MISS;
+ case 0x1B:
+ return ROCKSDB_NAMESPACE::Tickers::GET_HIT_L0;
+ case 0x1C:
+ return ROCKSDB_NAMESPACE::Tickers::GET_HIT_L1;
+ case 0x1D:
+ return ROCKSDB_NAMESPACE::Tickers::GET_HIT_L2_AND_UP;
+ case 0x1E:
+ return ROCKSDB_NAMESPACE::Tickers::COMPACTION_KEY_DROP_NEWER_ENTRY;
+ case 0x1F:
+ return ROCKSDB_NAMESPACE::Tickers::COMPACTION_KEY_DROP_OBSOLETE;
+ case 0x20:
+ return ROCKSDB_NAMESPACE::Tickers::COMPACTION_KEY_DROP_RANGE_DEL;
+ case 0x21:
+ return ROCKSDB_NAMESPACE::Tickers::COMPACTION_KEY_DROP_USER;
+ case 0x22:
+ return ROCKSDB_NAMESPACE::Tickers::COMPACTION_RANGE_DEL_DROP_OBSOLETE;
+ case 0x23:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_KEYS_WRITTEN;
+ case 0x24:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_KEYS_READ;
+ case 0x25:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_KEYS_UPDATED;
+ case 0x26:
+ return ROCKSDB_NAMESPACE::Tickers::BYTES_WRITTEN;
+ case 0x27:
+ return ROCKSDB_NAMESPACE::Tickers::BYTES_READ;
+ case 0x28:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_SEEK;
+ case 0x29:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_NEXT;
+ case 0x2A:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_PREV;
+ case 0x2B:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_SEEK_FOUND;
+ case 0x2C:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_NEXT_FOUND;
+ case 0x2D:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_PREV_FOUND;
+ case 0x2E:
+ return ROCKSDB_NAMESPACE::Tickers::ITER_BYTES_READ;
+ case 0x2F:
+ return ROCKSDB_NAMESPACE::Tickers::NO_FILE_CLOSES;
+ case 0x30:
+ return ROCKSDB_NAMESPACE::Tickers::NO_FILE_OPENS;
+ case 0x31:
+ return ROCKSDB_NAMESPACE::Tickers::NO_FILE_ERRORS;
+ case 0x32:
+ return ROCKSDB_NAMESPACE::Tickers::STALL_L0_SLOWDOWN_MICROS;
+ case 0x33:
+ return ROCKSDB_NAMESPACE::Tickers::STALL_MEMTABLE_COMPACTION_MICROS;
+ case 0x34:
+ return ROCKSDB_NAMESPACE::Tickers::STALL_L0_NUM_FILES_MICROS;
+ case 0x35:
+ return ROCKSDB_NAMESPACE::Tickers::STALL_MICROS;
+ case 0x36:
+ return ROCKSDB_NAMESPACE::Tickers::DB_MUTEX_WAIT_MICROS;
+ case 0x37:
+ return ROCKSDB_NAMESPACE::Tickers::RATE_LIMIT_DELAY_MILLIS;
+ case 0x38:
+ return ROCKSDB_NAMESPACE::Tickers::NO_ITERATORS;
+ case 0x39:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_MULTIGET_CALLS;
+ case 0x3A:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_MULTIGET_KEYS_READ;
+ case 0x3B:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_MULTIGET_BYTES_READ;
+ case 0x3C:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_FILTERED_DELETES;
+ case 0x3D:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_MERGE_FAILURES;
+ case 0x3E:
+ return ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_PREFIX_CHECKED;
+ case 0x3F:
+ return ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_PREFIX_USEFUL;
+ case 0x40:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_OF_RESEEKS_IN_ITERATION;
+ case 0x41:
+ return ROCKSDB_NAMESPACE::Tickers::GET_UPDATES_SINCE_CALLS;
+ case 0x42:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_COMPRESSED_MISS;
+ case 0x43:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_COMPRESSED_HIT;
+ case 0x44:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_COMPRESSED_ADD;
+ case 0x45:
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_COMPRESSED_ADD_FAILURES;
+ case 0x46:
+ return ROCKSDB_NAMESPACE::Tickers::WAL_FILE_SYNCED;
+ case 0x47:
+ return ROCKSDB_NAMESPACE::Tickers::WAL_FILE_BYTES;
+ case 0x48:
+ return ROCKSDB_NAMESPACE::Tickers::WRITE_DONE_BY_SELF;
+ case 0x49:
+ return ROCKSDB_NAMESPACE::Tickers::WRITE_DONE_BY_OTHER;
+ case 0x4A:
+ return ROCKSDB_NAMESPACE::Tickers::WRITE_TIMEDOUT;
+ case 0x4B:
+ return ROCKSDB_NAMESPACE::Tickers::WRITE_WITH_WAL;
+ case 0x4C:
+ return ROCKSDB_NAMESPACE::Tickers::COMPACT_READ_BYTES;
+ case 0x4D:
+ return ROCKSDB_NAMESPACE::Tickers::COMPACT_WRITE_BYTES;
+ case 0x4E:
+ return ROCKSDB_NAMESPACE::Tickers::FLUSH_WRITE_BYTES;
+ case 0x4F:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_DIRECT_LOAD_TABLE_PROPERTIES;
+ case 0x50:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_SUPERVERSION_ACQUIRES;
+ case 0x51:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_SUPERVERSION_RELEASES;
+ case 0x52:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_SUPERVERSION_CLEANUPS;
+ case 0x53:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_BLOCK_COMPRESSED;
+ case 0x54:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_BLOCK_DECOMPRESSED;
+ case 0x55:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_BLOCK_NOT_COMPRESSED;
+ case 0x56:
+ return ROCKSDB_NAMESPACE::Tickers::MERGE_OPERATION_TOTAL_TIME;
+ case 0x57:
+ return ROCKSDB_NAMESPACE::Tickers::FILTER_OPERATION_TOTAL_TIME;
+ case 0x58:
+ return ROCKSDB_NAMESPACE::Tickers::ROW_CACHE_HIT;
+ case 0x59:
+ return ROCKSDB_NAMESPACE::Tickers::ROW_CACHE_MISS;
+ case 0x5A:
+ return ROCKSDB_NAMESPACE::Tickers::READ_AMP_ESTIMATE_USEFUL_BYTES;
+ case 0x5B:
+ return ROCKSDB_NAMESPACE::Tickers::READ_AMP_TOTAL_READ_BYTES;
+ case 0x5C:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_RATE_LIMITER_DRAINS;
+ case 0x5D:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_ITER_SKIP;
+ case 0x5E:
+ return ROCKSDB_NAMESPACE::Tickers::NUMBER_MULTIGET_KEYS_FOUND;
+ case -0x01:
+ // -0x01 to fixate the new value that incorrectly changed TICKER_ENUM_MAX.
+ return ROCKSDB_NAMESPACE::Tickers::NO_ITERATOR_CREATED;
+ case 0x60:
+ return ROCKSDB_NAMESPACE::Tickers::NO_ITERATOR_DELETED;
+ case 0x61:
+ return ROCKSDB_NAMESPACE::Tickers::
+ COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE;
+ case 0x62:
+ return ROCKSDB_NAMESPACE::Tickers::COMPACTION_CANCELLED;
+ case 0x63:
+ return ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_FULL_POSITIVE;
+ case 0x64:
+ return ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_FULL_TRUE_POSITIVE;
+ case 0x65:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_PUT;
+ case 0x66:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_WRITE;
+ case 0x67:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_GET;
+ case 0x68:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_MULTIGET;
+ case 0x69:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_SEEK;
+ case 0x6A:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_NEXT;
+ case 0x6B:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_PREV;
+ case 0x6C:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_KEYS_WRITTEN;
+ case 0x6D:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_KEYS_READ;
+ case 0x6E:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BYTES_WRITTEN;
+ case 0x6F:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BYTES_READ;
+ case 0x70:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_WRITE_INLINED;
+ case 0x71:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_WRITE_INLINED_TTL;
+ case 0x72:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_WRITE_BLOB;
+ case 0x73:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_WRITE_BLOB_TTL;
+ case 0x74:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_FILE_BYTES_WRITTEN;
+ case 0x75:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_FILE_BYTES_READ;
+ case 0x76:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_FILE_SYNCED;
+ case 0x77:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_INDEX_EXPIRED_COUNT;
+ case 0x78:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_INDEX_EXPIRED_SIZE;
+ case 0x79:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_INDEX_EVICTED_COUNT;
+ case 0x7A:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_INDEX_EVICTED_SIZE;
+ case 0x7B:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_FILES;
+ case 0x7C:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_NEW_FILES;
+ case 0x7D:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_FAILURES;
+ case 0x7E:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_KEYS_OVERWRITTEN;
+ case 0x7F:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_KEYS_EXPIRED;
+ case -0x02:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_KEYS_RELOCATED;
+ case -0x03:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_BYTES_OVERWRITTEN;
+ case -0x04:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_BYTES_EXPIRED;
+ case -0x05:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_BYTES_RELOCATED;
+ case -0x06:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_FIFO_NUM_FILES_EVICTED;
+ case -0x07:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_FIFO_NUM_KEYS_EVICTED;
+ case -0x08:
+ return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_FIFO_BYTES_EVICTED;
+ case -0x09:
+ return ROCKSDB_NAMESPACE::Tickers::TXN_PREPARE_MUTEX_OVERHEAD;
+ case -0x0A:
+ return ROCKSDB_NAMESPACE::Tickers::TXN_OLD_COMMIT_MAP_MUTEX_OVERHEAD;
+ case -0x0B:
+ return ROCKSDB_NAMESPACE::Tickers::TXN_DUPLICATE_KEY_OVERHEAD;
+ case -0x0C:
+ return ROCKSDB_NAMESPACE::Tickers::TXN_SNAPSHOT_MUTEX_OVERHEAD;
+ case -0x0D:
+ return ROCKSDB_NAMESPACE::Tickers::TXN_GET_TRY_AGAIN;
+ case 0x5F:
+ // 0x5F for backwards compatibility on current minor version.
+ return ROCKSDB_NAMESPACE::Tickers::TICKER_ENUM_MAX;
+
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_MISS;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.HistogramType
+class HistogramTypeJni {
+ public:
+ // Returns the equivalent org.rocksdb.HistogramType for the provided
+ // C++ ROCKSDB_NAMESPACE::Histograms enum
+ static jbyte toJavaHistogramsType(
+ const ROCKSDB_NAMESPACE::Histograms& histograms) {
+ switch(histograms) {
+ case ROCKSDB_NAMESPACE::Histograms::DB_GET:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::Histograms::DB_WRITE:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::Histograms::COMPACTION_TIME:
+ return 0x2;
+ case ROCKSDB_NAMESPACE::Histograms::SUBCOMPACTION_SETUP_TIME:
+ return 0x3;
+ case ROCKSDB_NAMESPACE::Histograms::TABLE_SYNC_MICROS:
+ return 0x4;
+ case ROCKSDB_NAMESPACE::Histograms::COMPACTION_OUTFILE_SYNC_MICROS:
+ return 0x5;
+ case ROCKSDB_NAMESPACE::Histograms::WAL_FILE_SYNC_MICROS:
+ return 0x6;
+ case ROCKSDB_NAMESPACE::Histograms::MANIFEST_FILE_SYNC_MICROS:
+ return 0x7;
+ case ROCKSDB_NAMESPACE::Histograms::TABLE_OPEN_IO_MICROS:
+ return 0x8;
+ case ROCKSDB_NAMESPACE::Histograms::DB_MULTIGET:
+ return 0x9;
+ case ROCKSDB_NAMESPACE::Histograms::READ_BLOCK_COMPACTION_MICROS:
+ return 0xA;
+ case ROCKSDB_NAMESPACE::Histograms::READ_BLOCK_GET_MICROS:
+ return 0xB;
+ case ROCKSDB_NAMESPACE::Histograms::WRITE_RAW_BLOCK_MICROS:
+ return 0xC;
+ case ROCKSDB_NAMESPACE::Histograms::STALL_L0_SLOWDOWN_COUNT:
+ return 0xD;
+ case ROCKSDB_NAMESPACE::Histograms::STALL_MEMTABLE_COMPACTION_COUNT:
+ return 0xE;
+ case ROCKSDB_NAMESPACE::Histograms::STALL_L0_NUM_FILES_COUNT:
+ return 0xF;
+ case ROCKSDB_NAMESPACE::Histograms::HARD_RATE_LIMIT_DELAY_COUNT:
+ return 0x10;
+ case ROCKSDB_NAMESPACE::Histograms::SOFT_RATE_LIMIT_DELAY_COUNT:
+ return 0x11;
+ case ROCKSDB_NAMESPACE::Histograms::NUM_FILES_IN_SINGLE_COMPACTION:
+ return 0x12;
+ case ROCKSDB_NAMESPACE::Histograms::DB_SEEK:
+ return 0x13;
+ case ROCKSDB_NAMESPACE::Histograms::WRITE_STALL:
+ return 0x14;
+ case ROCKSDB_NAMESPACE::Histograms::SST_READ_MICROS:
+ return 0x15;
+ case ROCKSDB_NAMESPACE::Histograms::NUM_SUBCOMPACTIONS_SCHEDULED:
+ return 0x16;
+ case ROCKSDB_NAMESPACE::Histograms::BYTES_PER_READ:
+ return 0x17;
+ case ROCKSDB_NAMESPACE::Histograms::BYTES_PER_WRITE:
+ return 0x18;
+ case ROCKSDB_NAMESPACE::Histograms::BYTES_PER_MULTIGET:
+ return 0x19;
+ case ROCKSDB_NAMESPACE::Histograms::BYTES_COMPRESSED:
+ return 0x1A;
+ case ROCKSDB_NAMESPACE::Histograms::BYTES_DECOMPRESSED:
+ return 0x1B;
+ case ROCKSDB_NAMESPACE::Histograms::COMPRESSION_TIMES_NANOS:
+ return 0x1C;
+ case ROCKSDB_NAMESPACE::Histograms::DECOMPRESSION_TIMES_NANOS:
+ return 0x1D;
+ case ROCKSDB_NAMESPACE::Histograms::READ_NUM_MERGE_OPERANDS:
+ return 0x1E;
+ // 0x20 to skip 0x1F so TICKER_ENUM_MAX remains unchanged for minor version compatibility.
+ case ROCKSDB_NAMESPACE::Histograms::FLUSH_TIME:
+ return 0x20;
+ case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_KEY_SIZE:
+ return 0x21;
+ case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_VALUE_SIZE:
+ return 0x22;
+ case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_WRITE_MICROS:
+ return 0x23;
+ case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_GET_MICROS:
+ return 0x24;
+ case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_MULTIGET_MICROS:
+ return 0x25;
+ case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_SEEK_MICROS:
+ return 0x26;
+ case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_NEXT_MICROS:
+ return 0x27;
+ case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_PREV_MICROS:
+ return 0x28;
+ case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_BLOB_FILE_WRITE_MICROS:
+ return 0x29;
+ case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_BLOB_FILE_READ_MICROS:
+ return 0x2A;
+ case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_BLOB_FILE_SYNC_MICROS:
+ return 0x2B;
+ case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_GC_MICROS:
+ return 0x2C;
+ case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_COMPRESSION_MICROS:
+ return 0x2D;
+ case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_DECOMPRESSION_MICROS:
+ return 0x2E;
+ case ROCKSDB_NAMESPACE::Histograms::HISTOGRAM_ENUM_MAX:
+ // 0x1F for backwards compatibility on current minor version.
+ return 0x1F;
+
+ default:
+ // undefined/default
+ return 0x0;
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::Histograms enum for the
+ // provided Java org.rocksdb.HistogramsType
+ static ROCKSDB_NAMESPACE::Histograms toCppHistograms(jbyte jhistograms_type) {
+ switch(jhistograms_type) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::Histograms::DB_GET;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::Histograms::DB_WRITE;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::Histograms::COMPACTION_TIME;
+ case 0x3:
+ return ROCKSDB_NAMESPACE::Histograms::SUBCOMPACTION_SETUP_TIME;
+ case 0x4:
+ return ROCKSDB_NAMESPACE::Histograms::TABLE_SYNC_MICROS;
+ case 0x5:
+ return ROCKSDB_NAMESPACE::Histograms::COMPACTION_OUTFILE_SYNC_MICROS;
+ case 0x6:
+ return ROCKSDB_NAMESPACE::Histograms::WAL_FILE_SYNC_MICROS;
+ case 0x7:
+ return ROCKSDB_NAMESPACE::Histograms::MANIFEST_FILE_SYNC_MICROS;
+ case 0x8:
+ return ROCKSDB_NAMESPACE::Histograms::TABLE_OPEN_IO_MICROS;
+ case 0x9:
+ return ROCKSDB_NAMESPACE::Histograms::DB_MULTIGET;
+ case 0xA:
+ return ROCKSDB_NAMESPACE::Histograms::READ_BLOCK_COMPACTION_MICROS;
+ case 0xB:
+ return ROCKSDB_NAMESPACE::Histograms::READ_BLOCK_GET_MICROS;
+ case 0xC:
+ return ROCKSDB_NAMESPACE::Histograms::WRITE_RAW_BLOCK_MICROS;
+ case 0xD:
+ return ROCKSDB_NAMESPACE::Histograms::STALL_L0_SLOWDOWN_COUNT;
+ case 0xE:
+ return ROCKSDB_NAMESPACE::Histograms::STALL_MEMTABLE_COMPACTION_COUNT;
+ case 0xF:
+ return ROCKSDB_NAMESPACE::Histograms::STALL_L0_NUM_FILES_COUNT;
+ case 0x10:
+ return ROCKSDB_NAMESPACE::Histograms::HARD_RATE_LIMIT_DELAY_COUNT;
+ case 0x11:
+ return ROCKSDB_NAMESPACE::Histograms::SOFT_RATE_LIMIT_DELAY_COUNT;
+ case 0x12:
+ return ROCKSDB_NAMESPACE::Histograms::NUM_FILES_IN_SINGLE_COMPACTION;
+ case 0x13:
+ return ROCKSDB_NAMESPACE::Histograms::DB_SEEK;
+ case 0x14:
+ return ROCKSDB_NAMESPACE::Histograms::WRITE_STALL;
+ case 0x15:
+ return ROCKSDB_NAMESPACE::Histograms::SST_READ_MICROS;
+ case 0x16:
+ return ROCKSDB_NAMESPACE::Histograms::NUM_SUBCOMPACTIONS_SCHEDULED;
+ case 0x17:
+ return ROCKSDB_NAMESPACE::Histograms::BYTES_PER_READ;
+ case 0x18:
+ return ROCKSDB_NAMESPACE::Histograms::BYTES_PER_WRITE;
+ case 0x19:
+ return ROCKSDB_NAMESPACE::Histograms::BYTES_PER_MULTIGET;
+ case 0x1A:
+ return ROCKSDB_NAMESPACE::Histograms::BYTES_COMPRESSED;
+ case 0x1B:
+ return ROCKSDB_NAMESPACE::Histograms::BYTES_DECOMPRESSED;
+ case 0x1C:
+ return ROCKSDB_NAMESPACE::Histograms::COMPRESSION_TIMES_NANOS;
+ case 0x1D:
+ return ROCKSDB_NAMESPACE::Histograms::DECOMPRESSION_TIMES_NANOS;
+ case 0x1E:
+ return ROCKSDB_NAMESPACE::Histograms::READ_NUM_MERGE_OPERANDS;
+ // 0x20 to skip 0x1F so TICKER_ENUM_MAX remains unchanged for minor version compatibility.
+ case 0x20:
+ return ROCKSDB_NAMESPACE::Histograms::FLUSH_TIME;
+ case 0x21:
+ return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_KEY_SIZE;
+ case 0x22:
+ return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_VALUE_SIZE;
+ case 0x23:
+ return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_WRITE_MICROS;
+ case 0x24:
+ return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_GET_MICROS;
+ case 0x25:
+ return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_MULTIGET_MICROS;
+ case 0x26:
+ return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_SEEK_MICROS;
+ case 0x27:
+ return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_NEXT_MICROS;
+ case 0x28:
+ return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_PREV_MICROS;
+ case 0x29:
+ return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_BLOB_FILE_WRITE_MICROS;
+ case 0x2A:
+ return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_BLOB_FILE_READ_MICROS;
+ case 0x2B:
+ return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_BLOB_FILE_SYNC_MICROS;
+ case 0x2C:
+ return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_GC_MICROS;
+ case 0x2D:
+ return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_COMPRESSION_MICROS;
+ case 0x2E:
+ return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_DECOMPRESSION_MICROS;
+ case 0x1F:
+ // 0x1F for backwards compatibility on current minor version.
+ return ROCKSDB_NAMESPACE::Histograms::HISTOGRAM_ENUM_MAX;
+
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::Histograms::DB_GET;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.StatsLevel
+class StatsLevelJni {
+ public:
+ // Returns the equivalent org.rocksdb.StatsLevel for the provided
+ // C++ ROCKSDB_NAMESPACE::StatsLevel enum
+ static jbyte toJavaStatsLevel(
+ const ROCKSDB_NAMESPACE::StatsLevel& stats_level) {
+ switch(stats_level) {
+ case ROCKSDB_NAMESPACE::StatsLevel::kExceptDetailedTimers:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::StatsLevel::kExceptTimeForMutex:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::StatsLevel::kAll:
+ return 0x2;
+
+ default:
+ // undefined/default
+ return 0x0;
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::StatsLevel enum for the
+ // provided Java org.rocksdb.StatsLevel
+ static ROCKSDB_NAMESPACE::StatsLevel toCppStatsLevel(jbyte jstats_level) {
+ switch(jstats_level) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::StatsLevel::kExceptDetailedTimers;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::StatsLevel::kExceptTimeForMutex;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::StatsLevel::kAll;
+
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::StatsLevel::kExceptDetailedTimers;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.RateLimiterMode
+class RateLimiterModeJni {
+ public:
+ // Returns the equivalent org.rocksdb.RateLimiterMode for the provided
+ // C++ ROCKSDB_NAMESPACE::RateLimiter::Mode enum
+ static jbyte toJavaRateLimiterMode(
+ const ROCKSDB_NAMESPACE::RateLimiter::Mode& rate_limiter_mode) {
+ switch(rate_limiter_mode) {
+ case ROCKSDB_NAMESPACE::RateLimiter::Mode::kReadsOnly:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::RateLimiter::Mode::kWritesOnly:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::RateLimiter::Mode::kAllIo:
+ return 0x2;
+
+ default:
+ // undefined/default
+ return 0x1;
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::RateLimiter::Mode enum for
+ // the provided Java org.rocksdb.RateLimiterMode
+ static ROCKSDB_NAMESPACE::RateLimiter::Mode toCppRateLimiterMode(
+ jbyte jrate_limiter_mode) {
+ switch(jrate_limiter_mode) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::RateLimiter::Mode::kReadsOnly;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::RateLimiter::Mode::kWritesOnly;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::RateLimiter::Mode::kAllIo;
+
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::RateLimiter::Mode::kWritesOnly;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.MemoryUsageType
+class MemoryUsageTypeJni {
+public:
+ // Returns the equivalent org.rocksdb.MemoryUsageType for the provided
+ // C++ ROCKSDB_NAMESPACE::MemoryUtil::UsageType enum
+ static jbyte toJavaMemoryUsageType(
+ const ROCKSDB_NAMESPACE::MemoryUtil::UsageType& usage_type) {
+ switch (usage_type) {
+ case ROCKSDB_NAMESPACE::MemoryUtil::UsageType::kMemTableTotal:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::MemoryUtil::UsageType::kMemTableUnFlushed:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::MemoryUtil::UsageType::kTableReadersTotal:
+ return 0x2;
+ case ROCKSDB_NAMESPACE::MemoryUtil::UsageType::kCacheTotal:
+ return 0x3;
+ default:
+ // undefined: use kNumUsageTypes
+ return 0x4;
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::MemoryUtil::UsageType enum for
+ // the provided Java org.rocksdb.MemoryUsageType
+ static ROCKSDB_NAMESPACE::MemoryUtil::UsageType toCppMemoryUsageType(
+ jbyte usage_type) {
+ switch (usage_type) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::MemoryUtil::UsageType::kMemTableTotal;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::MemoryUtil::UsageType::kMemTableUnFlushed;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::MemoryUtil::UsageType::kTableReadersTotal;
+ case 0x3:
+ return ROCKSDB_NAMESPACE::MemoryUtil::UsageType::kCacheTotal;
+ default:
+ // undefined/default: use kNumUsageTypes
+ return ROCKSDB_NAMESPACE::MemoryUtil::UsageType::kNumUsageTypes;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.Transaction
+class TransactionJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.Transaction
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env,
+ "org/rocksdb/Transaction");
+ }
+
+ /**
+ * Create a new Java org.rocksdb.Transaction.WaitingTransactions object
+ *
+ * @param env A pointer to the Java environment
+ * @param jtransaction A Java org.rocksdb.Transaction object
+ * @param column_family_id The id of the column family
+ * @param key The key
+ * @param transaction_ids The transaction ids
+ *
+ * @return A reference to a Java
+ * org.rocksdb.Transaction.WaitingTransactions object,
+ * or nullptr if an an exception occurs
+ */
+ static jobject newWaitingTransactions(JNIEnv* env, jobject jtransaction,
+ const uint32_t column_family_id, const std::string &key,
+ const std::vector<TransactionID> &transaction_ids) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(
+ jclazz, "newWaitingTransactions", "(JLjava/lang/String;[J)Lorg/rocksdb/Transaction$WaitingTransactions;");
+ if(mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ jstring jkey = env->NewStringUTF(key.c_str());
+ if(jkey == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ const size_t len = transaction_ids.size();
+ jlongArray jtransaction_ids = env->NewLongArray(static_cast<jsize>(len));
+ if(jtransaction_ids == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->DeleteLocalRef(jkey);
+ return nullptr;
+ }
+
+ jlong *body = env->GetLongArrayElements(jtransaction_ids, nullptr);
+ if(body == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->DeleteLocalRef(jkey);
+ env->DeleteLocalRef(jtransaction_ids);
+ return nullptr;
+ }
+ for(size_t i = 0; i < len; ++i) {
+ body[i] = static_cast<jlong>(transaction_ids[i]);
+ }
+ env->ReleaseLongArrayElements(jtransaction_ids, body, 0);
+
+ jobject jwaiting_transactions = env->CallObjectMethod(jtransaction,
+ mid, static_cast<jlong>(column_family_id), jkey, jtransaction_ids);
+ if(env->ExceptionCheck()) {
+ // exception thrown: InstantiationException or OutOfMemoryError
+ env->DeleteLocalRef(jkey);
+ env->DeleteLocalRef(jtransaction_ids);
+ return nullptr;
+ }
+
+ return jwaiting_transactions;
+ }
+};
+
+// The portal class for org.rocksdb.TransactionDB
+class TransactionDBJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.TransactionDB
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env,
+ "org/rocksdb/TransactionDB");
+ }
+
+ /**
+ * Create a new Java org.rocksdb.TransactionDB.DeadlockInfo object
+ *
+ * @param env A pointer to the Java environment
+ * @param jtransaction A Java org.rocksdb.Transaction object
+ * @param column_family_id The id of the column family
+ * @param key The key
+ * @param transaction_ids The transaction ids
+ *
+ * @return A reference to a Java
+ * org.rocksdb.Transaction.WaitingTransactions object,
+ * or nullptr if an an exception occurs
+ */
+ static jobject newDeadlockInfo(
+ JNIEnv* env, jobject jtransaction_db,
+ const ROCKSDB_NAMESPACE::TransactionID transaction_id,
+ const uint32_t column_family_id, const std::string& waiting_key,
+ const bool exclusive) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(
+ jclazz, "newDeadlockInfo", "(JJLjava/lang/String;Z)Lorg/rocksdb/TransactionDB$DeadlockInfo;");
+ if(mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ jstring jwaiting_key = env->NewStringUTF(waiting_key.c_str());
+ if(jwaiting_key == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ // resolve the column family id to a ColumnFamilyHandle
+ jobject jdeadlock_info = env->CallObjectMethod(jtransaction_db,
+ mid, transaction_id, static_cast<jlong>(column_family_id),
+ jwaiting_key, exclusive);
+ if(env->ExceptionCheck()) {
+ // exception thrown: InstantiationException or OutOfMemoryError
+ env->DeleteLocalRef(jwaiting_key);
+ return nullptr;
+ }
+
+ return jdeadlock_info;
+ }
+};
+
+// The portal class for org.rocksdb.TxnDBWritePolicy
+class TxnDBWritePolicyJni {
+ public:
+ // Returns the equivalent org.rocksdb.TxnDBWritePolicy for the provided
+ // C++ ROCKSDB_NAMESPACE::TxnDBWritePolicy enum
+ static jbyte toJavaTxnDBWritePolicy(
+ const ROCKSDB_NAMESPACE::TxnDBWritePolicy& txndb_write_policy) {
+ switch (txndb_write_policy) {
+ case ROCKSDB_NAMESPACE::TxnDBWritePolicy::WRITE_COMMITTED:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::TxnDBWritePolicy::WRITE_PREPARED:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::TxnDBWritePolicy::WRITE_UNPREPARED:
+ return 0x2;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::TxnDBWritePolicy enum for the
+ // provided Java org.rocksdb.TxnDBWritePolicy
+ static ROCKSDB_NAMESPACE::TxnDBWritePolicy toCppTxnDBWritePolicy(
+ jbyte jtxndb_write_policy) {
+ switch (jtxndb_write_policy) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::TxnDBWritePolicy::WRITE_COMMITTED;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::TxnDBWritePolicy::WRITE_PREPARED;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::TxnDBWritePolicy::WRITE_UNPREPARED;
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::TxnDBWritePolicy::WRITE_COMMITTED;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.TransactionDB.KeyLockInfo
+class KeyLockInfoJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.TransactionDB.KeyLockInfo
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env,
+ "org/rocksdb/TransactionDB$KeyLockInfo");
+ }
+
+ /**
+ * Create a new Java org.rocksdb.TransactionDB.KeyLockInfo object
+ * with the same properties as the provided C++ ROCKSDB_NAMESPACE::KeyLockInfo
+ * object
+ *
+ * @param env A pointer to the Java environment
+ * @param key_lock_info The ROCKSDB_NAMESPACE::KeyLockInfo object
+ *
+ * @return A reference to a Java
+ * org.rocksdb.TransactionDB.KeyLockInfo object,
+ * or nullptr if an an exception occurs
+ */
+ static jobject construct(
+ JNIEnv* env, const ROCKSDB_NAMESPACE::KeyLockInfo& key_lock_info) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(
+ jclazz, "<init>", "(Ljava/lang/String;[JZ)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ jstring jkey = env->NewStringUTF(key_lock_info.key.c_str());
+ if (jkey == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ const jsize jtransaction_ids_len = static_cast<jsize>(key_lock_info.ids.size());
+ jlongArray jtransactions_ids = env->NewLongArray(jtransaction_ids_len);
+ if (jtransactions_ids == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->DeleteLocalRef(jkey);
+ return nullptr;
+ }
+
+ const jobject jkey_lock_info = env->NewObject(jclazz, mid,
+ jkey, jtransactions_ids, key_lock_info.exclusive);
+ if(jkey_lock_info == nullptr) {
+ // exception thrown: InstantiationException or OutOfMemoryError
+ env->DeleteLocalRef(jtransactions_ids);
+ env->DeleteLocalRef(jkey);
+ return nullptr;
+ }
+
+ return jkey_lock_info;
+ }
+};
+
+// The portal class for org.rocksdb.TransactionDB.DeadlockInfo
+class DeadlockInfoJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.TransactionDB.DeadlockInfo
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env,"org/rocksdb/TransactionDB$DeadlockInfo");
+ }
+};
+
+// The portal class for org.rocksdb.TransactionDB.DeadlockPath
+class DeadlockPathJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.TransactionDB.DeadlockPath
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env,
+ "org/rocksdb/TransactionDB$DeadlockPath");
+ }
+
+ /**
+ * Create a new Java org.rocksdb.TransactionDB.DeadlockPath object
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return A reference to a Java
+ * org.rocksdb.TransactionDB.DeadlockPath object,
+ * or nullptr if an an exception occurs
+ */
+ static jobject construct(JNIEnv* env,
+ const jobjectArray jdeadlock_infos, const bool limit_exceeded) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(
+ jclazz, "<init>", "([LDeadlockInfo;Z)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ const jobject jdeadlock_path = env->NewObject(jclazz, mid,
+ jdeadlock_infos, limit_exceeded);
+ if(jdeadlock_path == nullptr) {
+ // exception thrown: InstantiationException or OutOfMemoryError
+ return nullptr;
+ }
+
+ return jdeadlock_path;
+ }
+};
+
+class AbstractTableFilterJni
+ : public RocksDBNativeClass<
+ const ROCKSDB_NAMESPACE::TableFilterJniCallback*,
+ AbstractTableFilterJni> {
+ public:
+ /**
+ * Get the Java Method: TableFilter#filter(TableProperties)
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getFilterMethod(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetMethodID(jclazz, "filter", "(Lorg/rocksdb/TableProperties;)Z");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ private:
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/TableFilter");
+ }
+};
+
+class TablePropertiesJni : public JavaClass {
+ public:
+ /**
+ * Create a new Java org.rocksdb.TableProperties object.
+ *
+ * @param env A pointer to the Java environment
+ * @param table_properties A Cpp table properties object
+ *
+ * @return A reference to a Java org.rocksdb.TableProperties object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject fromCppTableProperties(
+ JNIEnv* env, const ROCKSDB_NAMESPACE::TableProperties& table_properties) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "(JJJJJJJJJJJJJJJJJJJ[BLjava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/util/Map;Ljava/util/Map;Ljava/util/Map;)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ jbyteArray jcolumn_family_name = ROCKSDB_NAMESPACE::JniUtil::copyBytes(
+ env, table_properties.column_family_name);
+ if (jcolumn_family_name == nullptr) {
+ // exception occurred creating java string
+ return nullptr;
+ }
+
+ jstring jfilter_policy_name = ROCKSDB_NAMESPACE::JniUtil::toJavaString(
+ env, &table_properties.filter_policy_name, true);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java string
+ env->DeleteLocalRef(jcolumn_family_name);
+ return nullptr;
+ }
+
+ jstring jcomparator_name = ROCKSDB_NAMESPACE::JniUtil::toJavaString(
+ env, &table_properties.comparator_name, true);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java string
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfilter_policy_name);
+ return nullptr;
+ }
+
+ jstring jmerge_operator_name = ROCKSDB_NAMESPACE::JniUtil::toJavaString(
+ env, &table_properties.merge_operator_name, true);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java string
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfilter_policy_name);
+ env->DeleteLocalRef(jcomparator_name);
+ return nullptr;
+ }
+
+ jstring jprefix_extractor_name = ROCKSDB_NAMESPACE::JniUtil::toJavaString(
+ env, &table_properties.prefix_extractor_name, true);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java string
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfilter_policy_name);
+ env->DeleteLocalRef(jcomparator_name);
+ env->DeleteLocalRef(jmerge_operator_name);
+ return nullptr;
+ }
+
+ jstring jproperty_collectors_names =
+ ROCKSDB_NAMESPACE::JniUtil::toJavaString(
+ env, &table_properties.property_collectors_names, true);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java string
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfilter_policy_name);
+ env->DeleteLocalRef(jcomparator_name);
+ env->DeleteLocalRef(jmerge_operator_name);
+ env->DeleteLocalRef(jprefix_extractor_name);
+ return nullptr;
+ }
+
+ jstring jcompression_name = ROCKSDB_NAMESPACE::JniUtil::toJavaString(
+ env, &table_properties.compression_name, true);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java string
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfilter_policy_name);
+ env->DeleteLocalRef(jcomparator_name);
+ env->DeleteLocalRef(jmerge_operator_name);
+ env->DeleteLocalRef(jprefix_extractor_name);
+ env->DeleteLocalRef(jproperty_collectors_names);
+ return nullptr;
+ }
+
+ // Map<String, String>
+ jobject juser_collected_properties =
+ ROCKSDB_NAMESPACE::HashMapJni::fromCppMap(
+ env, &table_properties.user_collected_properties);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java map
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfilter_policy_name);
+ env->DeleteLocalRef(jcomparator_name);
+ env->DeleteLocalRef(jmerge_operator_name);
+ env->DeleteLocalRef(jprefix_extractor_name);
+ env->DeleteLocalRef(jproperty_collectors_names);
+ env->DeleteLocalRef(jcompression_name);
+ return nullptr;
+ }
+
+ // Map<String, String>
+ jobject jreadable_properties = ROCKSDB_NAMESPACE::HashMapJni::fromCppMap(
+ env, &table_properties.readable_properties);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java map
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfilter_policy_name);
+ env->DeleteLocalRef(jcomparator_name);
+ env->DeleteLocalRef(jmerge_operator_name);
+ env->DeleteLocalRef(jprefix_extractor_name);
+ env->DeleteLocalRef(jproperty_collectors_names);
+ env->DeleteLocalRef(jcompression_name);
+ env->DeleteLocalRef(juser_collected_properties);
+ return nullptr;
+ }
+
+ // Map<String, Long>
+ jobject jproperties_offsets = ROCKSDB_NAMESPACE::HashMapJni::fromCppMap(
+ env, &table_properties.properties_offsets);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java map
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfilter_policy_name);
+ env->DeleteLocalRef(jcomparator_name);
+ env->DeleteLocalRef(jmerge_operator_name);
+ env->DeleteLocalRef(jprefix_extractor_name);
+ env->DeleteLocalRef(jproperty_collectors_names);
+ env->DeleteLocalRef(jcompression_name);
+ env->DeleteLocalRef(juser_collected_properties);
+ env->DeleteLocalRef(jreadable_properties);
+ return nullptr;
+ }
+
+ jobject jtable_properties = env->NewObject(jclazz, mid,
+ static_cast<jlong>(table_properties.data_size),
+ static_cast<jlong>(table_properties.index_size),
+ static_cast<jlong>(table_properties.index_partitions),
+ static_cast<jlong>(table_properties.top_level_index_size),
+ static_cast<jlong>(table_properties.index_key_is_user_key),
+ static_cast<jlong>(table_properties.index_value_is_delta_encoded),
+ static_cast<jlong>(table_properties.filter_size),
+ static_cast<jlong>(table_properties.raw_key_size),
+ static_cast<jlong>(table_properties.raw_value_size),
+ static_cast<jlong>(table_properties.num_data_blocks),
+ static_cast<jlong>(table_properties.num_entries),
+ static_cast<jlong>(table_properties.num_deletions),
+ static_cast<jlong>(table_properties.num_merge_operands),
+ static_cast<jlong>(table_properties.num_range_deletions),
+ static_cast<jlong>(table_properties.format_version),
+ static_cast<jlong>(table_properties.fixed_key_len),
+ static_cast<jlong>(table_properties.column_family_id),
+ static_cast<jlong>(table_properties.creation_time),
+ static_cast<jlong>(table_properties.oldest_key_time),
+ jcolumn_family_name,
+ jfilter_policy_name,
+ jcomparator_name,
+ jmerge_operator_name,
+ jprefix_extractor_name,
+ jproperty_collectors_names,
+ jcompression_name,
+ juser_collected_properties,
+ jreadable_properties,
+ jproperties_offsets
+ );
+
+ if (env->ExceptionCheck()) {
+ return nullptr;
+ }
+
+ return jtable_properties;
+ }
+
+ private:
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/TableProperties");
+ }
+};
+
+class ColumnFamilyDescriptorJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.ColumnFamilyDescriptor
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/ColumnFamilyDescriptor");
+ }
+
+ /**
+ * Create a new Java org.rocksdb.ColumnFamilyDescriptor object with the same
+ * properties as the provided C++ ROCKSDB_NAMESPACE::ColumnFamilyDescriptor
+ * object
+ *
+ * @param env A pointer to the Java environment
+ * @param cfd A pointer to ROCKSDB_NAMESPACE::ColumnFamilyDescriptor object
+ *
+ * @return A reference to a Java org.rocksdb.ColumnFamilyDescriptor object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject construct(JNIEnv* env, ColumnFamilyDescriptor* cfd) {
+ jbyteArray jcf_name = JniUtil::copyBytes(env, cfd->name);
+ jobject cfopts = ColumnFamilyOptionsJni::construct(env, &(cfd->options));
+
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(jclazz, "<init>",
+ "([BLorg/rocksdb/ColumnFamilyOptions;)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ env->DeleteLocalRef(jcf_name);
+ return nullptr;
+ }
+
+ jobject jcfd = env->NewObject(jclazz, mid, jcf_name, cfopts);
+ if (env->ExceptionCheck()) {
+ env->DeleteLocalRef(jcf_name);
+ return nullptr;
+ }
+
+ return jcfd;
+ }
+
+ /**
+ * Get the Java Method: ColumnFamilyDescriptor#columnFamilyName
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getColumnFamilyNameMethod(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "columnFamilyName", "()[B");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: ColumnFamilyDescriptor#columnFamilyOptions
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getColumnFamilyOptionsMethod(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(
+ jclazz, "columnFamilyOptions", "()Lorg/rocksdb/ColumnFamilyOptions;");
+ assert(mid != nullptr);
+ return mid;
+ }
+};
+
+// The portal class for org.rocksdb.IndexType
+class IndexTypeJni {
+ public:
+ // Returns the equivalent org.rocksdb.IndexType for the provided
+ // C++ ROCKSDB_NAMESPACE::IndexType enum
+ static jbyte toJavaIndexType(
+ const ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType& index_type) {
+ switch (index_type) {
+ case ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType::kBinarySearch:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType::kHashSearch:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType::
+ kTwoLevelIndexSearch:
+ return 0x2;
+ case ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType::
+ kBinarySearchWithFirstKey:
+ return 0x3;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::IndexType enum for the
+ // provided Java org.rocksdb.IndexType
+ static ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType toCppIndexType(
+ jbyte jindex_type) {
+ switch (jindex_type) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType::
+ kBinarySearch;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType::
+ kHashSearch;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType::
+ kTwoLevelIndexSearch;
+ case 0x3:
+ return ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType::
+ kBinarySearchWithFirstKey;
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType::
+ kBinarySearch;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.DataBlockIndexType
+class DataBlockIndexTypeJni {
+ public:
+ // Returns the equivalent org.rocksdb.DataBlockIndexType for the provided
+ // C++ ROCKSDB_NAMESPACE::DataBlockIndexType enum
+ static jbyte toJavaDataBlockIndexType(
+ const ROCKSDB_NAMESPACE::BlockBasedTableOptions::DataBlockIndexType&
+ index_type) {
+ switch (index_type) {
+ case ROCKSDB_NAMESPACE::BlockBasedTableOptions::DataBlockIndexType::
+ kDataBlockBinarySearch:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::BlockBasedTableOptions::DataBlockIndexType::
+ kDataBlockBinaryAndHash:
+ return 0x1;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::DataBlockIndexType enum for
+ // the provided Java org.rocksdb.DataBlockIndexType
+ static ROCKSDB_NAMESPACE::BlockBasedTableOptions::DataBlockIndexType
+ toCppDataBlockIndexType(jbyte jindex_type) {
+ switch (jindex_type) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::BlockBasedTableOptions::DataBlockIndexType::
+ kDataBlockBinarySearch;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::BlockBasedTableOptions::DataBlockIndexType::
+ kDataBlockBinaryAndHash;
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::BlockBasedTableOptions::DataBlockIndexType::
+ kDataBlockBinarySearch;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.ChecksumType
+class ChecksumTypeJni {
+ public:
+ // Returns the equivalent org.rocksdb.ChecksumType for the provided
+ // C++ ROCKSDB_NAMESPACE::ChecksumType enum
+ static jbyte toJavaChecksumType(
+ const ROCKSDB_NAMESPACE::ChecksumType& checksum_type) {
+ switch (checksum_type) {
+ case ROCKSDB_NAMESPACE::ChecksumType::kNoChecksum:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::ChecksumType::kCRC32c:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::ChecksumType::kxxHash:
+ return 0x2;
+ case ROCKSDB_NAMESPACE::ChecksumType::kxxHash64:
+ return 0x3;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::ChecksumType enum for the
+ // provided Java org.rocksdb.ChecksumType
+ static ROCKSDB_NAMESPACE::ChecksumType toCppChecksumType(
+ jbyte jchecksum_type) {
+ switch (jchecksum_type) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::ChecksumType::kNoChecksum;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::ChecksumType::kCRC32c;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::ChecksumType::kxxHash;
+ case 0x3:
+ return ROCKSDB_NAMESPACE::ChecksumType::kxxHash64;
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::ChecksumType::kCRC32c;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.Priority
+class PriorityJni {
+ public:
+ // Returns the equivalent org.rocksdb.Priority for the provided
+ // C++ ROCKSDB_NAMESPACE::Env::Priority enum
+ static jbyte toJavaPriority(
+ const ROCKSDB_NAMESPACE::Env::Priority& priority) {
+ switch (priority) {
+ case ROCKSDB_NAMESPACE::Env::Priority::BOTTOM:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::Env::Priority::LOW:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::Env::Priority::HIGH:
+ return 0x2;
+ case ROCKSDB_NAMESPACE::Env::Priority::TOTAL:
+ return 0x3;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::env::Priority enum for the
+ // provided Java org.rocksdb.Priority
+ static ROCKSDB_NAMESPACE::Env::Priority toCppPriority(jbyte jpriority) {
+ switch (jpriority) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::Env::Priority::BOTTOM;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::Env::Priority::LOW;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::Env::Priority::HIGH;
+ case 0x3:
+ return ROCKSDB_NAMESPACE::Env::Priority::TOTAL;
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::Env::Priority::LOW;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.ThreadType
+class ThreadTypeJni {
+ public:
+ // Returns the equivalent org.rocksdb.ThreadType for the provided
+ // C++ ROCKSDB_NAMESPACE::ThreadStatus::ThreadType enum
+ static jbyte toJavaThreadType(
+ const ROCKSDB_NAMESPACE::ThreadStatus::ThreadType& thread_type) {
+ switch (thread_type) {
+ case ROCKSDB_NAMESPACE::ThreadStatus::ThreadType::HIGH_PRIORITY:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::ThreadStatus::ThreadType::LOW_PRIORITY:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::ThreadStatus::ThreadType::USER:
+ return 0x2;
+ case ROCKSDB_NAMESPACE::ThreadStatus::ThreadType::BOTTOM_PRIORITY:
+ return 0x3;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::ThreadStatus::ThreadType enum
+ // for the provided Java org.rocksdb.ThreadType
+ static ROCKSDB_NAMESPACE::ThreadStatus::ThreadType toCppThreadType(
+ jbyte jthread_type) {
+ switch (jthread_type) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::ThreadStatus::ThreadType::HIGH_PRIORITY;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::ThreadStatus::ThreadType::LOW_PRIORITY;
+ case 0x2:
+ return ThreadStatus::ThreadType::USER;
+ case 0x3:
+ return ROCKSDB_NAMESPACE::ThreadStatus::ThreadType::BOTTOM_PRIORITY;
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::ThreadStatus::ThreadType::LOW_PRIORITY;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.OperationType
+class OperationTypeJni {
+ public:
+ // Returns the equivalent org.rocksdb.OperationType for the provided
+ // C++ ROCKSDB_NAMESPACE::ThreadStatus::OperationType enum
+ static jbyte toJavaOperationType(
+ const ROCKSDB_NAMESPACE::ThreadStatus::OperationType& operation_type) {
+ switch (operation_type) {
+ case ROCKSDB_NAMESPACE::ThreadStatus::OperationType::OP_UNKNOWN:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::ThreadStatus::OperationType::OP_COMPACTION:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::ThreadStatus::OperationType::OP_FLUSH:
+ return 0x2;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::ThreadStatus::OperationType
+ // enum for the provided Java org.rocksdb.OperationType
+ static ROCKSDB_NAMESPACE::ThreadStatus::OperationType toCppOperationType(
+ jbyte joperation_type) {
+ switch (joperation_type) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::ThreadStatus::OperationType::OP_UNKNOWN;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::ThreadStatus::OperationType::OP_COMPACTION;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::ThreadStatus::OperationType::OP_FLUSH;
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::ThreadStatus::OperationType::OP_UNKNOWN;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.OperationStage
+class OperationStageJni {
+ public:
+ // Returns the equivalent org.rocksdb.OperationStage for the provided
+ // C++ ROCKSDB_NAMESPACE::ThreadStatus::OperationStage enum
+ static jbyte toJavaOperationStage(
+ const ROCKSDB_NAMESPACE::ThreadStatus::OperationStage& operation_stage) {
+ switch (operation_stage) {
+ case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::STAGE_UNKNOWN:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::STAGE_FLUSH_RUN:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::
+ STAGE_FLUSH_WRITE_L0:
+ return 0x2;
+ case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::
+ STAGE_COMPACTION_PREPARE:
+ return 0x3;
+ case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::
+ STAGE_COMPACTION_RUN:
+ return 0x4;
+ case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::
+ STAGE_COMPACTION_PROCESS_KV:
+ return 0x5;
+ case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::
+ STAGE_COMPACTION_INSTALL:
+ return 0x6;
+ case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::
+ STAGE_COMPACTION_SYNC_FILE:
+ return 0x7;
+ case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::
+ STAGE_PICK_MEMTABLES_TO_FLUSH:
+ return 0x8;
+ case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::
+ STAGE_MEMTABLE_ROLLBACK:
+ return 0x9;
+ case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::
+ STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS:
+ return 0xA;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::ThreadStatus::OperationStage
+ // enum for the provided Java org.rocksdb.OperationStage
+ static ROCKSDB_NAMESPACE::ThreadStatus::OperationStage toCppOperationStage(
+ jbyte joperation_stage) {
+ switch (joperation_stage) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::STAGE_UNKNOWN;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::STAGE_FLUSH_RUN;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::
+ STAGE_FLUSH_WRITE_L0;
+ case 0x3:
+ return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::
+ STAGE_COMPACTION_PREPARE;
+ case 0x4:
+ return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::
+ STAGE_COMPACTION_RUN;
+ case 0x5:
+ return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::
+ STAGE_COMPACTION_PROCESS_KV;
+ case 0x6:
+ return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::
+ STAGE_COMPACTION_INSTALL;
+ case 0x7:
+ return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::
+ STAGE_COMPACTION_SYNC_FILE;
+ case 0x8:
+ return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::
+ STAGE_PICK_MEMTABLES_TO_FLUSH;
+ case 0x9:
+ return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::
+ STAGE_MEMTABLE_ROLLBACK;
+ case 0xA:
+ return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::
+ STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS;
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::STAGE_UNKNOWN;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.StateType
+class StateTypeJni {
+ public:
+ // Returns the equivalent org.rocksdb.StateType for the provided
+ // C++ ROCKSDB_NAMESPACE::ThreadStatus::StateType enum
+ static jbyte toJavaStateType(
+ const ROCKSDB_NAMESPACE::ThreadStatus::StateType& state_type) {
+ switch (state_type) {
+ case ROCKSDB_NAMESPACE::ThreadStatus::StateType::STATE_UNKNOWN:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::ThreadStatus::StateType::STATE_MUTEX_WAIT:
+ return 0x1;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::ThreadStatus::StateType enum
+ // for the provided Java org.rocksdb.StateType
+ static ROCKSDB_NAMESPACE::ThreadStatus::StateType toCppStateType(
+ jbyte jstate_type) {
+ switch (jstate_type) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::ThreadStatus::StateType::STATE_UNKNOWN;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::ThreadStatus::StateType::STATE_MUTEX_WAIT;
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::ThreadStatus::StateType::STATE_UNKNOWN;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.ThreadStatus
+class ThreadStatusJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.ThreadStatus
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env,
+ "org/rocksdb/ThreadStatus");
+ }
+
+ /**
+ * Create a new Java org.rocksdb.ThreadStatus object with the same
+ * properties as the provided C++ ROCKSDB_NAMESPACE::ThreadStatus object
+ *
+ * @param env A pointer to the Java environment
+ * @param thread_status A pointer to ROCKSDB_NAMESPACE::ThreadStatus object
+ *
+ * @return A reference to a Java org.rocksdb.ColumnFamilyOptions object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject construct(
+ JNIEnv* env, const ROCKSDB_NAMESPACE::ThreadStatus* thread_status) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "(JBLjava/lang/String;Ljava/lang/String;BJB[JB)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ jstring jdb_name =
+ JniUtil::toJavaString(env, &(thread_status->db_name), true);
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ return nullptr;
+ }
+
+ jstring jcf_name =
+ JniUtil::toJavaString(env, &(thread_status->cf_name), true);
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ env->DeleteLocalRef(jdb_name);
+ return nullptr;
+ }
+
+ // long[]
+ const jsize len = static_cast<jsize>(
+ ROCKSDB_NAMESPACE::ThreadStatus::kNumOperationProperties);
+ jlongArray joperation_properties =
+ env->NewLongArray(len);
+ if (joperation_properties == nullptr) {
+ // an exception occurred
+ env->DeleteLocalRef(jdb_name);
+ env->DeleteLocalRef(jcf_name);
+ return nullptr;
+ }
+ jlong *body = env->GetLongArrayElements(joperation_properties, nullptr);
+ if (body == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->DeleteLocalRef(jdb_name);
+ env->DeleteLocalRef(jcf_name);
+ env->DeleteLocalRef(joperation_properties);
+ return nullptr;
+ }
+ for (size_t i = 0; i < len; ++i) {
+ body[i] = static_cast<jlong>(thread_status->op_properties[i]);
+ }
+ env->ReleaseLongArrayElements(joperation_properties, body, 0);
+
+ jobject jcfd = env->NewObject(jclazz, mid,
+ static_cast<jlong>(thread_status->thread_id),
+ ThreadTypeJni::toJavaThreadType(thread_status->thread_type),
+ jdb_name,
+ jcf_name,
+ OperationTypeJni::toJavaOperationType(thread_status->operation_type),
+ static_cast<jlong>(thread_status->op_elapsed_micros),
+ OperationStageJni::toJavaOperationStage(thread_status->operation_stage),
+ joperation_properties,
+ StateTypeJni::toJavaStateType(thread_status->state_type));
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ env->DeleteLocalRef(jdb_name);
+ env->DeleteLocalRef(jcf_name);
+ env->DeleteLocalRef(joperation_properties);
+ return nullptr;
+ }
+
+ // cleanup
+ env->DeleteLocalRef(jdb_name);
+ env->DeleteLocalRef(jcf_name);
+ env->DeleteLocalRef(joperation_properties);
+
+ return jcfd;
+ }
+};
+
+// The portal class for org.rocksdb.CompactionStyle
+class CompactionStyleJni {
+ public:
+ // Returns the equivalent org.rocksdb.CompactionStyle for the provided
+ // C++ ROCKSDB_NAMESPACE::CompactionStyle enum
+ static jbyte toJavaCompactionStyle(
+ const ROCKSDB_NAMESPACE::CompactionStyle& compaction_style) {
+ switch (compaction_style) {
+ case ROCKSDB_NAMESPACE::CompactionStyle::kCompactionStyleLevel:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::CompactionStyle::kCompactionStyleUniversal:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::CompactionStyle::kCompactionStyleFIFO:
+ return 0x2;
+ case ROCKSDB_NAMESPACE::CompactionStyle::kCompactionStyleNone:
+ return 0x3;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::CompactionStyle enum for the
+ // provided Java org.rocksdb.CompactionStyle
+ static ROCKSDB_NAMESPACE::CompactionStyle toCppCompactionStyle(
+ jbyte jcompaction_style) {
+ switch (jcompaction_style) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::CompactionStyle::kCompactionStyleLevel;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::CompactionStyle::kCompactionStyleUniversal;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::CompactionStyle::kCompactionStyleFIFO;
+ case 0x3:
+ return ROCKSDB_NAMESPACE::CompactionStyle::kCompactionStyleNone;
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::CompactionStyle::kCompactionStyleLevel;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.CompactionReason
+class CompactionReasonJni {
+ public:
+ // Returns the equivalent org.rocksdb.CompactionReason for the provided
+ // C++ ROCKSDB_NAMESPACE::CompactionReason enum
+ static jbyte toJavaCompactionReason(
+ const ROCKSDB_NAMESPACE::CompactionReason& compaction_reason) {
+ switch (compaction_reason) {
+ case ROCKSDB_NAMESPACE::CompactionReason::kUnknown:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::CompactionReason::kLevelL0FilesNum:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::CompactionReason::kLevelMaxLevelSize:
+ return 0x2;
+ case ROCKSDB_NAMESPACE::CompactionReason::kUniversalSizeAmplification:
+ return 0x3;
+ case ROCKSDB_NAMESPACE::CompactionReason::kUniversalSizeRatio:
+ return 0x4;
+ case ROCKSDB_NAMESPACE::CompactionReason::kUniversalSortedRunNum:
+ return 0x5;
+ case ROCKSDB_NAMESPACE::CompactionReason::kFIFOMaxSize:
+ return 0x6;
+ case ROCKSDB_NAMESPACE::CompactionReason::kFIFOReduceNumFiles:
+ return 0x7;
+ case ROCKSDB_NAMESPACE::CompactionReason::kFIFOTtl:
+ return 0x8;
+ case ROCKSDB_NAMESPACE::CompactionReason::kManualCompaction:
+ return 0x9;
+ case ROCKSDB_NAMESPACE::CompactionReason::kFilesMarkedForCompaction:
+ return 0x10;
+ case ROCKSDB_NAMESPACE::CompactionReason::kBottommostFiles:
+ return 0x0A;
+ case ROCKSDB_NAMESPACE::CompactionReason::kTtl:
+ return 0x0B;
+ case ROCKSDB_NAMESPACE::CompactionReason::kFlush:
+ return 0x0C;
+ case ROCKSDB_NAMESPACE::CompactionReason::kExternalSstIngestion:
+ return 0x0D;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::CompactionReason enum for the
+ // provided Java org.rocksdb.CompactionReason
+ static ROCKSDB_NAMESPACE::CompactionReason toCppCompactionReason(
+ jbyte jcompaction_reason) {
+ switch (jcompaction_reason) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::CompactionReason::kUnknown;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::CompactionReason::kLevelL0FilesNum;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::CompactionReason::kLevelMaxLevelSize;
+ case 0x3:
+ return ROCKSDB_NAMESPACE::CompactionReason::kUniversalSizeAmplification;
+ case 0x4:
+ return ROCKSDB_NAMESPACE::CompactionReason::kUniversalSizeRatio;
+ case 0x5:
+ return ROCKSDB_NAMESPACE::CompactionReason::kUniversalSortedRunNum;
+ case 0x6:
+ return ROCKSDB_NAMESPACE::CompactionReason::kFIFOMaxSize;
+ case 0x7:
+ return ROCKSDB_NAMESPACE::CompactionReason::kFIFOReduceNumFiles;
+ case 0x8:
+ return ROCKSDB_NAMESPACE::CompactionReason::kFIFOTtl;
+ case 0x9:
+ return ROCKSDB_NAMESPACE::CompactionReason::kManualCompaction;
+ case 0x10:
+ return ROCKSDB_NAMESPACE::CompactionReason::kFilesMarkedForCompaction;
+ case 0x0A:
+ return ROCKSDB_NAMESPACE::CompactionReason::kBottommostFiles;
+ case 0x0B:
+ return ROCKSDB_NAMESPACE::CompactionReason::kTtl;
+ case 0x0C:
+ return ROCKSDB_NAMESPACE::CompactionReason::kFlush;
+ case 0x0D:
+ return ROCKSDB_NAMESPACE::CompactionReason::kExternalSstIngestion;
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::CompactionReason::kUnknown;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.WalFileType
+class WalFileTypeJni {
+ public:
+ // Returns the equivalent org.rocksdb.WalFileType for the provided
+ // C++ ROCKSDB_NAMESPACE::WalFileType enum
+ static jbyte toJavaWalFileType(
+ const ROCKSDB_NAMESPACE::WalFileType& wal_file_type) {
+ switch (wal_file_type) {
+ case ROCKSDB_NAMESPACE::WalFileType::kArchivedLogFile:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::WalFileType::kAliveLogFile:
+ return 0x1;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::WalFileType enum for the
+ // provided Java org.rocksdb.WalFileType
+ static ROCKSDB_NAMESPACE::WalFileType toCppWalFileType(jbyte jwal_file_type) {
+ switch (jwal_file_type) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::WalFileType::kArchivedLogFile;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::WalFileType::kAliveLogFile;
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::WalFileType::kAliveLogFile;
+ }
+ }
+};
+
+class LogFileJni : public JavaClass {
+ public:
+ /**
+ * Create a new Java org.rocksdb.LogFile object.
+ *
+ * @param env A pointer to the Java environment
+ * @param log_file A Cpp log file object
+ *
+ * @return A reference to a Java org.rocksdb.LogFile object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject fromCppLogFile(JNIEnv* env,
+ ROCKSDB_NAMESPACE::LogFile* log_file) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "(Ljava/lang/String;JBJJ)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ std::string path_name = log_file->PathName();
+ jstring jpath_name =
+ ROCKSDB_NAMESPACE::JniUtil::toJavaString(env, &path_name, true);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java string
+ return nullptr;
+ }
+
+ jobject jlog_file = env->NewObject(
+ jclazz, mid, jpath_name, static_cast<jlong>(log_file->LogNumber()),
+ ROCKSDB_NAMESPACE::WalFileTypeJni::toJavaWalFileType(log_file->Type()),
+ static_cast<jlong>(log_file->StartSequence()),
+ static_cast<jlong>(log_file->SizeFileBytes()));
+
+ if (env->ExceptionCheck()) {
+ env->DeleteLocalRef(jpath_name);
+ return nullptr;
+ }
+
+ // cleanup
+ env->DeleteLocalRef(jpath_name);
+
+ return jlog_file;
+ }
+
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/LogFile");
+ }
+};
+
+class LiveFileMetaDataJni : public JavaClass {
+ public:
+ /**
+ * Create a new Java org.rocksdb.LiveFileMetaData object.
+ *
+ * @param env A pointer to the Java environment
+ * @param live_file_meta_data A Cpp live file meta data object
+ *
+ * @return A reference to a Java org.rocksdb.LiveFileMetaData object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject fromCppLiveFileMetaData(
+ JNIEnv* env, ROCKSDB_NAMESPACE::LiveFileMetaData* live_file_meta_data) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "([BILjava/lang/String;Ljava/lang/String;JJJ[B[BJZJJ)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ jbyteArray jcolumn_family_name = ROCKSDB_NAMESPACE::JniUtil::copyBytes(
+ env, live_file_meta_data->column_family_name);
+ if (jcolumn_family_name == nullptr) {
+ // exception occurred creating java byte array
+ return nullptr;
+ }
+
+ jstring jfile_name = ROCKSDB_NAMESPACE::JniUtil::toJavaString(
+ env, &live_file_meta_data->name, true);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java string
+ env->DeleteLocalRef(jcolumn_family_name);
+ return nullptr;
+ }
+
+ jstring jpath = ROCKSDB_NAMESPACE::JniUtil::toJavaString(
+ env, &live_file_meta_data->db_path, true);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java string
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfile_name);
+ return nullptr;
+ }
+
+ jbyteArray jsmallest_key = ROCKSDB_NAMESPACE::JniUtil::copyBytes(
+ env, live_file_meta_data->smallestkey);
+ if (jsmallest_key == nullptr) {
+ // exception occurred creating java byte array
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfile_name);
+ env->DeleteLocalRef(jpath);
+ return nullptr;
+ }
+
+ jbyteArray jlargest_key = ROCKSDB_NAMESPACE::JniUtil::copyBytes(
+ env, live_file_meta_data->largestkey);
+ if (jlargest_key == nullptr) {
+ // exception occurred creating java byte array
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfile_name);
+ env->DeleteLocalRef(jpath);
+ env->DeleteLocalRef(jsmallest_key);
+ return nullptr;
+ }
+
+ jobject jlive_file_meta_data = env->NewObject(jclazz, mid,
+ jcolumn_family_name,
+ static_cast<jint>(live_file_meta_data->level),
+ jfile_name,
+ jpath,
+ static_cast<jlong>(live_file_meta_data->size),
+ static_cast<jlong>(live_file_meta_data->smallest_seqno),
+ static_cast<jlong>(live_file_meta_data->largest_seqno),
+ jsmallest_key,
+ jlargest_key,
+ static_cast<jlong>(live_file_meta_data->num_reads_sampled),
+ static_cast<jboolean>(live_file_meta_data->being_compacted),
+ static_cast<jlong>(live_file_meta_data->num_entries),
+ static_cast<jlong>(live_file_meta_data->num_deletions)
+ );
+
+ if (env->ExceptionCheck()) {
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfile_name);
+ env->DeleteLocalRef(jpath);
+ env->DeleteLocalRef(jsmallest_key);
+ env->DeleteLocalRef(jlargest_key);
+ return nullptr;
+ }
+
+ // cleanup
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfile_name);
+ env->DeleteLocalRef(jpath);
+ env->DeleteLocalRef(jsmallest_key);
+ env->DeleteLocalRef(jlargest_key);
+
+ return jlive_file_meta_data;
+ }
+
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/LiveFileMetaData");
+ }
+};
+
+class SstFileMetaDataJni : public JavaClass {
+ public:
+ /**
+ * Create a new Java org.rocksdb.SstFileMetaData object.
+ *
+ * @param env A pointer to the Java environment
+ * @param sst_file_meta_data A Cpp sst file meta data object
+ *
+ * @return A reference to a Java org.rocksdb.SstFileMetaData object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject fromCppSstFileMetaData(
+ JNIEnv* env,
+ const ROCKSDB_NAMESPACE::SstFileMetaData* sst_file_meta_data) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "(Ljava/lang/String;Ljava/lang/String;JJJ[B[BJZJJ)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ jstring jfile_name = ROCKSDB_NAMESPACE::JniUtil::toJavaString(
+ env, &sst_file_meta_data->name, true);
+ if (jfile_name == nullptr) {
+ // exception occurred creating java byte array
+ return nullptr;
+ }
+
+ jstring jpath = ROCKSDB_NAMESPACE::JniUtil::toJavaString(
+ env, &sst_file_meta_data->db_path, true);
+ if (jpath == nullptr) {
+ // exception occurred creating java byte array
+ env->DeleteLocalRef(jfile_name);
+ return nullptr;
+ }
+
+ jbyteArray jsmallest_key = ROCKSDB_NAMESPACE::JniUtil::copyBytes(
+ env, sst_file_meta_data->smallestkey);
+ if (jsmallest_key == nullptr) {
+ // exception occurred creating java byte array
+ env->DeleteLocalRef(jfile_name);
+ env->DeleteLocalRef(jpath);
+ return nullptr;
+ }
+
+ jbyteArray jlargest_key = ROCKSDB_NAMESPACE::JniUtil::copyBytes(
+ env, sst_file_meta_data->largestkey);
+ if (jlargest_key == nullptr) {
+ // exception occurred creating java byte array
+ env->DeleteLocalRef(jfile_name);
+ env->DeleteLocalRef(jpath);
+ env->DeleteLocalRef(jsmallest_key);
+ return nullptr;
+ }
+
+ jobject jsst_file_meta_data = env->NewObject(jclazz, mid,
+ jfile_name,
+ jpath,
+ static_cast<jlong>(sst_file_meta_data->size),
+ static_cast<jint>(sst_file_meta_data->smallest_seqno),
+ static_cast<jlong>(sst_file_meta_data->largest_seqno),
+ jsmallest_key,
+ jlargest_key,
+ static_cast<jlong>(sst_file_meta_data->num_reads_sampled),
+ static_cast<jboolean>(sst_file_meta_data->being_compacted),
+ static_cast<jlong>(sst_file_meta_data->num_entries),
+ static_cast<jlong>(sst_file_meta_data->num_deletions)
+ );
+
+ if (env->ExceptionCheck()) {
+ env->DeleteLocalRef(jfile_name);
+ env->DeleteLocalRef(jpath);
+ env->DeleteLocalRef(jsmallest_key);
+ env->DeleteLocalRef(jlargest_key);
+ return nullptr;
+ }
+
+ // cleanup
+ env->DeleteLocalRef(jfile_name);
+ env->DeleteLocalRef(jpath);
+ env->DeleteLocalRef(jsmallest_key);
+ env->DeleteLocalRef(jlargest_key);
+
+ return jsst_file_meta_data;
+ }
+
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/SstFileMetaData");
+ }
+};
+
+class LevelMetaDataJni : public JavaClass {
+ public:
+ /**
+ * Create a new Java org.rocksdb.LevelMetaData object.
+ *
+ * @param env A pointer to the Java environment
+ * @param level_meta_data A Cpp level meta data object
+ *
+ * @return A reference to a Java org.rocksdb.LevelMetaData object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject fromCppLevelMetaData(
+ JNIEnv* env, const ROCKSDB_NAMESPACE::LevelMetaData* level_meta_data) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "(IJ[Lorg/rocksdb/SstFileMetaData;)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ const jsize jlen =
+ static_cast<jsize>(level_meta_data->files.size());
+ jobjectArray jfiles = env->NewObjectArray(jlen, SstFileMetaDataJni::getJClass(env), nullptr);
+ if (jfiles == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ jsize i = 0;
+ for (auto it = level_meta_data->files.begin();
+ it != level_meta_data->files.end(); ++it) {
+ jobject jfile = SstFileMetaDataJni::fromCppSstFileMetaData(env, &(*it));
+ if (jfile == nullptr) {
+ // exception occurred
+ env->DeleteLocalRef(jfiles);
+ return nullptr;
+ }
+ env->SetObjectArrayElement(jfiles, i++, jfile);
+ }
+
+ jobject jlevel_meta_data = env->NewObject(jclazz, mid,
+ static_cast<jint>(level_meta_data->level),
+ static_cast<jlong>(level_meta_data->size),
+ jfiles
+ );
+
+ if (env->ExceptionCheck()) {
+ env->DeleteLocalRef(jfiles);
+ return nullptr;
+ }
+
+ // cleanup
+ env->DeleteLocalRef(jfiles);
+
+ return jlevel_meta_data;
+ }
+
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/LevelMetaData");
+ }
+};
+
+class ColumnFamilyMetaDataJni : public JavaClass {
+ public:
+ /**
+ * Create a new Java org.rocksdb.ColumnFamilyMetaData object.
+ *
+ * @param env A pointer to the Java environment
+ * @param column_famly_meta_data A Cpp live file meta data object
+ *
+ * @return A reference to a Java org.rocksdb.ColumnFamilyMetaData object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject fromCppColumnFamilyMetaData(
+ JNIEnv* env,
+ const ROCKSDB_NAMESPACE::ColumnFamilyMetaData* column_famly_meta_data) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "(JJ[B[Lorg/rocksdb/LevelMetaData;)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ jbyteArray jname = ROCKSDB_NAMESPACE::JniUtil::copyBytes(
+ env, column_famly_meta_data->name);
+ if (jname == nullptr) {
+ // exception occurred creating java byte array
+ return nullptr;
+ }
+
+ const jsize jlen =
+ static_cast<jsize>(column_famly_meta_data->levels.size());
+ jobjectArray jlevels = env->NewObjectArray(jlen, LevelMetaDataJni::getJClass(env), nullptr);
+ if(jlevels == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->DeleteLocalRef(jname);
+ return nullptr;
+ }
+
+ jsize i = 0;
+ for (auto it = column_famly_meta_data->levels.begin();
+ it != column_famly_meta_data->levels.end(); ++it) {
+ jobject jlevel = LevelMetaDataJni::fromCppLevelMetaData(env, &(*it));
+ if (jlevel == nullptr) {
+ // exception occurred
+ env->DeleteLocalRef(jname);
+ env->DeleteLocalRef(jlevels);
+ return nullptr;
+ }
+ env->SetObjectArrayElement(jlevels, i++, jlevel);
+ }
+
+ jobject jcolumn_family_meta_data = env->NewObject(jclazz, mid,
+ static_cast<jlong>(column_famly_meta_data->size),
+ static_cast<jlong>(column_famly_meta_data->file_count),
+ jname,
+ jlevels
+ );
+
+ if (env->ExceptionCheck()) {
+ env->DeleteLocalRef(jname);
+ env->DeleteLocalRef(jlevels);
+ return nullptr;
+ }
+
+ // cleanup
+ env->DeleteLocalRef(jname);
+ env->DeleteLocalRef(jlevels);
+
+ return jcolumn_family_meta_data;
+ }
+
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/ColumnFamilyMetaData");
+ }
+};
+
+// The portal class for org.rocksdb.AbstractTraceWriter
+class AbstractTraceWriterJni
+ : public RocksDBNativeClass<
+ const ROCKSDB_NAMESPACE::TraceWriterJniCallback*,
+ AbstractTraceWriterJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.AbstractTraceWriter
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env,
+ "org/rocksdb/AbstractTraceWriter");
+ }
+
+ /**
+ * Get the Java Method: AbstractTraceWriter#write
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getWriteProxyMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(
+ jclazz, "writeProxy", "(J)S");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: AbstractTraceWriter#closeWriter
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getCloseWriterProxyMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(
+ jclazz, "closeWriterProxy", "()S");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: AbstractTraceWriter#getFileSize
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getGetFileSizeMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(
+ jclazz, "getFileSize", "()J");
+ assert(mid != nullptr);
+ return mid;
+ }
+};
+
+// The portal class for org.rocksdb.AbstractWalFilter
+class AbstractWalFilterJni
+ : public RocksDBNativeClass<const ROCKSDB_NAMESPACE::WalFilterJniCallback*,
+ AbstractWalFilterJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.AbstractWalFilter
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env,
+ "org/rocksdb/AbstractWalFilter");
+ }
+
+ /**
+ * Get the Java Method: AbstractWalFilter#columnFamilyLogNumberMap
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getColumnFamilyLogNumberMapMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(
+ jclazz, "columnFamilyLogNumberMap",
+ "(Ljava/util/Map;Ljava/util/Map;)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: AbstractTraceWriter#logRecordFoundProxy
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getLogRecordFoundProxyMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(
+ jclazz, "logRecordFoundProxy", "(JLjava/lang/String;JJ)S");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: AbstractTraceWriter#name
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getNameMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(
+ jclazz, "name", "()Ljava/lang/String;");
+ assert(mid != nullptr);
+ return mid;
+ }
+};
+
+// The portal class for org.rocksdb.WalProcessingOption
+class WalProcessingOptionJni {
+ public:
+ // Returns the equivalent org.rocksdb.WalProcessingOption for the provided
+ // C++ ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption enum
+ static jbyte toJavaWalProcessingOption(
+ const ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption&
+ wal_processing_option) {
+ switch (wal_processing_option) {
+ case ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption::
+ kContinueProcessing:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption::
+ kIgnoreCurrentRecord:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption::kStopReplay:
+ return 0x2;
+ case ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption::kCorruptedRecord:
+ return 0x3;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ // Returns the equivalent C++
+ // ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption enum for the provided
+ // Java org.rocksdb.WalProcessingOption
+ static ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption
+ toCppWalProcessingOption(jbyte jwal_processing_option) {
+ switch (jwal_processing_option) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption::
+ kContinueProcessing;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption::
+ kIgnoreCurrentRecord;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption::kStopReplay;
+ case 0x3:
+ return ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption::
+ kCorruptedRecord;
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption::
+ kCorruptedRecord;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.ReusedSynchronisationType
+class ReusedSynchronisationTypeJni {
+ public:
+ // Returns the equivalent org.rocksdb.ReusedSynchronisationType for the
+ // provided C++ ROCKSDB_NAMESPACE::ReusedSynchronisationType enum
+ static jbyte toJavaReusedSynchronisationType(
+ const ROCKSDB_NAMESPACE::ReusedSynchronisationType&
+ reused_synchronisation_type) {
+ switch(reused_synchronisation_type) {
+ case ROCKSDB_NAMESPACE::ReusedSynchronisationType::MUTEX:
+ return 0x0;
+ case ROCKSDB_NAMESPACE::ReusedSynchronisationType::ADAPTIVE_MUTEX:
+ return 0x1;
+ case ROCKSDB_NAMESPACE::ReusedSynchronisationType::THREAD_LOCAL:
+ return 0x2;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ // Returns the equivalent C++ ROCKSDB_NAMESPACE::ReusedSynchronisationType
+ // enum for the provided Java org.rocksdb.ReusedSynchronisationType
+ static ROCKSDB_NAMESPACE::ReusedSynchronisationType
+ toCppReusedSynchronisationType(jbyte reused_synchronisation_type) {
+ switch(reused_synchronisation_type) {
+ case 0x0:
+ return ROCKSDB_NAMESPACE::ReusedSynchronisationType::MUTEX;
+ case 0x1:
+ return ROCKSDB_NAMESPACE::ReusedSynchronisationType::ADAPTIVE_MUTEX;
+ case 0x2:
+ return ROCKSDB_NAMESPACE::ReusedSynchronisationType::THREAD_LOCAL;
+ default:
+ // undefined/default
+ return ROCKSDB_NAMESPACE::ReusedSynchronisationType::ADAPTIVE_MUTEX;
+ }
+ }
+};
+} // namespace ROCKSDB_NAMESPACE
+#endif // JAVA_ROCKSJNI_PORTAL_H_
diff --git a/src/rocksdb/java/rocksjni/ratelimiterjni.cc b/src/rocksdb/java/rocksjni/ratelimiterjni.cc
new file mode 100644
index 000000000..96b440dcc
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/ratelimiterjni.cc
@@ -0,0 +1,127 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for RateLimiter.
+
+#include "include/org_rocksdb_RateLimiter.h"
+#include "rocksdb/rate_limiter.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_RateLimiter
+ * Method: newRateLimiterHandle
+ * Signature: (JJIBZ)J
+ */
+jlong Java_org_rocksdb_RateLimiter_newRateLimiterHandle(
+ JNIEnv* /*env*/, jclass /*jclazz*/, jlong jrate_bytes_per_second,
+ jlong jrefill_period_micros, jint jfairness, jbyte jrate_limiter_mode,
+ jboolean jauto_tune) {
+ auto rate_limiter_mode =
+ ROCKSDB_NAMESPACE::RateLimiterModeJni::toCppRateLimiterMode(
+ jrate_limiter_mode);
+ auto* sptr_rate_limiter = new std::shared_ptr<ROCKSDB_NAMESPACE::RateLimiter>(
+ ROCKSDB_NAMESPACE::NewGenericRateLimiter(
+ static_cast<int64_t>(jrate_bytes_per_second),
+ static_cast<int64_t>(jrefill_period_micros),
+ static_cast<int32_t>(jfairness), rate_limiter_mode, jauto_tune));
+
+ return reinterpret_cast<jlong>(sptr_rate_limiter);
+}
+
+/*
+ * Class: org_rocksdb_RateLimiter
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RateLimiter_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* handle =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::RateLimiter>*>(
+ jhandle);
+ delete handle; // delete std::shared_ptr
+}
+
+/*
+ * Class: org_rocksdb_RateLimiter
+ * Method: setBytesPerSecond
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_RateLimiter_setBytesPerSecond(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle,
+ jlong jbytes_per_second) {
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::RateLimiter>*>(handle)
+ ->get()
+ ->SetBytesPerSecond(jbytes_per_second);
+}
+
+/*
+ * Class: org_rocksdb_RateLimiter
+ * Method: getBytesPerSecond
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_RateLimiter_getBytesPerSecond(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ return reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::RateLimiter>*>(
+ handle)
+ ->get()
+ ->GetBytesPerSecond();
+}
+
+/*
+ * Class: org_rocksdb_RateLimiter
+ * Method: request
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_RateLimiter_request(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong handle, jlong jbytes) {
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::RateLimiter>*>(handle)
+ ->get()
+ ->Request(jbytes, ROCKSDB_NAMESPACE::Env::IO_TOTAL);
+}
+
+/*
+ * Class: org_rocksdb_RateLimiter
+ * Method: getSingleBurstBytes
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_RateLimiter_getSingleBurstBytes(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ return reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::RateLimiter>*>(
+ handle)
+ ->get()
+ ->GetSingleBurstBytes();
+}
+
+/*
+ * Class: org_rocksdb_RateLimiter
+ * Method: getTotalBytesThrough
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_RateLimiter_getTotalBytesThrough(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ return reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::RateLimiter>*>(
+ handle)
+ ->get()
+ ->GetTotalBytesThrough();
+}
+
+/*
+ * Class: org_rocksdb_RateLimiter
+ * Method: getTotalRequests
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_RateLimiter_getTotalRequests(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ return reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::RateLimiter>*>(
+ handle)
+ ->get()
+ ->GetTotalRequests();
+}
diff --git a/src/rocksdb/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc b/src/rocksdb/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc
new file mode 100644
index 000000000..8f0037b39
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc
@@ -0,0 +1,23 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+#include <jni.h>
+
+#include "include/org_rocksdb_RemoveEmptyValueCompactionFilter.h"
+#include "utilities/compaction_filters/remove_emptyvalue_compactionfilter.h"
+
+/*
+ * Class: org_rocksdb_RemoveEmptyValueCompactionFilter
+ * Method: createNewRemoveEmptyValueCompactionFilter0
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_RemoveEmptyValueCompactionFilter_createNewRemoveEmptyValueCompactionFilter0(
+ JNIEnv* /*env*/, jclass /*jcls*/) {
+ auto* compaction_filter =
+ new ROCKSDB_NAMESPACE::RemoveEmptyValueCompactionFilter();
+
+ // set the native handle to our native compaction filter
+ return reinterpret_cast<jlong>(compaction_filter);
+}
diff --git a/src/rocksdb/java/rocksjni/restorejni.cc b/src/rocksdb/java/rocksjni/restorejni.cc
new file mode 100644
index 000000000..9b605c704
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/restorejni.cc
@@ -0,0 +1,40 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling C++ ROCKSDB_NAMESPACE::RestoreOptions methods
+// from Java side.
+
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string>
+
+#include "include/org_rocksdb_RestoreOptions.h"
+#include "rocksdb/utilities/backupable_db.h"
+#include "rocksjni/portal.h"
+/*
+ * Class: org_rocksdb_RestoreOptions
+ * Method: newRestoreOptions
+ * Signature: (Z)J
+ */
+jlong Java_org_rocksdb_RestoreOptions_newRestoreOptions(
+ JNIEnv* /*env*/, jclass /*jcls*/, jboolean keep_log_files) {
+ auto* ropt = new ROCKSDB_NAMESPACE::RestoreOptions(keep_log_files);
+ return reinterpret_cast<jlong>(ropt);
+}
+
+/*
+ * Class: org_rocksdb_RestoreOptions
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RestoreOptions_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* ropt = reinterpret_cast<ROCKSDB_NAMESPACE::RestoreOptions*>(jhandle);
+ assert(ropt);
+ delete ropt;
+}
diff --git a/src/rocksdb/java/rocksjni/rocks_callback_object.cc b/src/rocksdb/java/rocksjni/rocks_callback_object.cc
new file mode 100644
index 000000000..73aa86137
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/rocks_callback_object.cc
@@ -0,0 +1,31 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// JNI Callbacks from C++ to sub-classes or org.rocksdb.RocksCallbackObject
+
+#include <jni.h>
+
+#include "include/org_rocksdb_RocksCallbackObject.h"
+#include "jnicallback.h"
+
+/*
+ * Class: org_rocksdb_RocksCallbackObject
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksCallbackObject_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ // TODO(AR) is deleting from the super class JniCallback OK, or must we delete
+ // the subclass? Example hierarchies:
+ // 1) Comparator -> BaseComparatorJniCallback + JniCallback ->
+ // DirectComparatorJniCallback 2) Comparator -> BaseComparatorJniCallback +
+ // JniCallback -> ComparatorJniCallback
+ // I think this is okay, as Comparator and JniCallback both have virtual
+ // destructors...
+ delete reinterpret_cast<ROCKSDB_NAMESPACE::JniCallback*>(handle);
+ // @lint-ignore TXT4 T25377293 Grandfathered in
+} \ No newline at end of file
diff --git a/src/rocksdb/java/rocksjni/rocksdb_exception_test.cc b/src/rocksdb/java/rocksjni/rocksdb_exception_test.cc
new file mode 100644
index 000000000..d0fd834ba
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/rocksdb_exception_test.cc
@@ -0,0 +1,82 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+#include <jni.h>
+
+#include "include/org_rocksdb_RocksDBExceptionTest.h"
+
+#include "rocksdb/slice.h"
+#include "rocksdb/status.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_RocksDBExceptionTest
+ * Method: raiseException
+ * Signature: ()V
+ */
+void Java_org_rocksdb_RocksDBExceptionTest_raiseException(JNIEnv* env,
+ jobject /*jobj*/) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env,
+ std::string("test message"));
+}
+
+/*
+ * Class: org_rocksdb_RocksDBExceptionTest
+ * Method: raiseExceptionWithStatusCode
+ * Signature: ()V
+ */
+void Java_org_rocksdb_RocksDBExceptionTest_raiseExceptionWithStatusCode(
+ JNIEnv* env, jobject /*jobj*/) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, "test message", ROCKSDB_NAMESPACE::Status::NotSupported());
+}
+
+/*
+ * Class: org_rocksdb_RocksDBExceptionTest
+ * Method: raiseExceptionNoMsgWithStatusCode
+ * Signature: ()V
+ */
+void Java_org_rocksdb_RocksDBExceptionTest_raiseExceptionNoMsgWithStatusCode(
+ JNIEnv* env, jobject /*jobj*/) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::NotSupported());
+}
+
+/*
+ * Class: org_rocksdb_RocksDBExceptionTest
+ * Method: raiseExceptionWithStatusCodeSubCode
+ * Signature: ()V
+ */
+void Java_org_rocksdb_RocksDBExceptionTest_raiseExceptionWithStatusCodeSubCode(
+ JNIEnv* env, jobject /*jobj*/) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, "test message",
+ ROCKSDB_NAMESPACE::Status::TimedOut(
+ ROCKSDB_NAMESPACE::Status::SubCode::kLockTimeout));
+}
+
+/*
+ * Class: org_rocksdb_RocksDBExceptionTest
+ * Method: raiseExceptionNoMsgWithStatusCodeSubCode
+ * Signature: ()V
+ */
+void Java_org_rocksdb_RocksDBExceptionTest_raiseExceptionNoMsgWithStatusCodeSubCode(
+ JNIEnv* env, jobject /*jobj*/) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::TimedOut(
+ ROCKSDB_NAMESPACE::Status::SubCode::kLockTimeout));
+}
+
+/*
+ * Class: org_rocksdb_RocksDBExceptionTest
+ * Method: raiseExceptionWithStatusCodeState
+ * Signature: ()V
+ */
+void Java_org_rocksdb_RocksDBExceptionTest_raiseExceptionWithStatusCodeState(
+ JNIEnv* env, jobject /*jobj*/) {
+ ROCKSDB_NAMESPACE::Slice state("test state");
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, "test message", ROCKSDB_NAMESPACE::Status::NotSupported(state));
+}
diff --git a/src/rocksdb/java/rocksjni/rocksjni.cc b/src/rocksdb/java/rocksjni/rocksjni.cc
new file mode 100644
index 000000000..84175913a
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/rocksjni.cc
@@ -0,0 +1,3406 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling c++ ROCKSDB_NAMESPACE::DB methods from Java side.
+
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <algorithm>
+#include <functional>
+#include <memory>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include "include/org_rocksdb_RocksDB.h"
+#include "rocksdb/cache.h"
+#include "rocksdb/convenience.h"
+#include "rocksdb/db.h"
+#include "rocksdb/options.h"
+#include "rocksdb/types.h"
+#include "rocksjni/portal.h"
+
+#ifdef min
+#undef min
+#endif
+
+jlong rocksdb_open_helper(JNIEnv* env, jlong jopt_handle, jstring jdb_path,
+ std::function<ROCKSDB_NAMESPACE::Status(
+ const ROCKSDB_NAMESPACE::Options&,
+ const std::string&, ROCKSDB_NAMESPACE::DB**)>
+ open_fn) {
+ const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
+ if (db_path == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return 0;
+ }
+
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jopt_handle);
+ ROCKSDB_NAMESPACE::DB* db = nullptr;
+ ROCKSDB_NAMESPACE::Status s = open_fn(*opt, db_path, &db);
+
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+
+ if (s.ok()) {
+ return reinterpret_cast<jlong>(db);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return 0;
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: open
+ * Signature: (JLjava/lang/String;)J
+ */
+jlong Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2(
+ JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path) {
+ return rocksdb_open_helper(env, jopt_handle, jdb_path,
+ (ROCKSDB_NAMESPACE::Status(*)(
+ const ROCKSDB_NAMESPACE::Options&,
+ const std::string&, ROCKSDB_NAMESPACE::DB**)) &
+ ROCKSDB_NAMESPACE::DB::Open);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: openROnly
+ * Signature: (JLjava/lang/String;)J
+ */
+jlong Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2(
+ JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path) {
+ return rocksdb_open_helper(
+ env, jopt_handle, jdb_path,
+ [](const ROCKSDB_NAMESPACE::Options& options, const std::string& db_path,
+ ROCKSDB_NAMESPACE::DB** db) {
+ return ROCKSDB_NAMESPACE::DB::OpenForReadOnly(options, db_path, db);
+ });
+}
+
+jlongArray rocksdb_open_helper(
+ JNIEnv* env, jlong jopt_handle, jstring jdb_path,
+ jobjectArray jcolumn_names, jlongArray jcolumn_options,
+ std::function<ROCKSDB_NAMESPACE::Status(
+ const ROCKSDB_NAMESPACE::DBOptions&, const std::string&,
+ const std::vector<ROCKSDB_NAMESPACE::ColumnFamilyDescriptor>&,
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>*,
+ ROCKSDB_NAMESPACE::DB**)>
+ open_fn) {
+ const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
+ if (db_path == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ const jsize len_cols = env->GetArrayLength(jcolumn_names);
+ jlong* jco = env->GetLongArrayElements(jcolumn_options, nullptr);
+ if (jco == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+ return nullptr;
+ }
+
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyDescriptor> column_families;
+ jboolean has_exception = JNI_FALSE;
+ ROCKSDB_NAMESPACE::JniUtil::byteStrings<std::string>(
+ env, jcolumn_names,
+ [](const char* str_data, const size_t str_len) {
+ return std::string(str_data, str_len);
+ },
+ [&jco, &column_families](size_t idx, std::string cf_name) {
+ ROCKSDB_NAMESPACE::ColumnFamilyOptions* cf_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jco[idx]);
+ column_families.push_back(
+ ROCKSDB_NAMESPACE::ColumnFamilyDescriptor(cf_name, *cf_options));
+ },
+ &has_exception);
+
+ env->ReleaseLongArrayElements(jcolumn_options, jco, JNI_ABORT);
+
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+ return nullptr;
+ }
+
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jopt_handle);
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*> cf_handles;
+ ROCKSDB_NAMESPACE::DB* db = nullptr;
+ ROCKSDB_NAMESPACE::Status s =
+ open_fn(*opt, db_path, column_families, &cf_handles, &db);
+
+ // we have now finished with db_path
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+
+ // check if open operation was successful
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+
+ const jsize resultsLen = 1 + len_cols; // db handle + column family handles
+ std::unique_ptr<jlong[]> results =
+ std::unique_ptr<jlong[]>(new jlong[resultsLen]);
+ results[0] = reinterpret_cast<jlong>(db);
+ for (int i = 1; i <= len_cols; i++) {
+ results[i] = reinterpret_cast<jlong>(cf_handles[i - 1]);
+ }
+
+ jlongArray jresults = env->NewLongArray(resultsLen);
+ if (jresults == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ env->SetLongArrayRegion(jresults, 0, resultsLen, results.get());
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jresults);
+ return nullptr;
+ }
+
+ return jresults;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: openROnly
+ * Signature: (JLjava/lang/String;[[B[J)[J
+ */
+jlongArray Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2_3_3B_3J(
+ JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path,
+ jobjectArray jcolumn_names, jlongArray jcolumn_options) {
+ return rocksdb_open_helper(
+ env, jopt_handle, jdb_path, jcolumn_names, jcolumn_options,
+ [](const ROCKSDB_NAMESPACE::DBOptions& options,
+ const std::string& db_path,
+ const std::vector<ROCKSDB_NAMESPACE::ColumnFamilyDescriptor>&
+ column_families,
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>* handles,
+ ROCKSDB_NAMESPACE::DB** db) {
+ return ROCKSDB_NAMESPACE::DB::OpenForReadOnly(
+ options, db_path, column_families, handles, db);
+ });
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: open
+ * Signature: (JLjava/lang/String;[[B[J)[J
+ */
+jlongArray Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2_3_3B_3J(
+ JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path,
+ jobjectArray jcolumn_names, jlongArray jcolumn_options) {
+ return rocksdb_open_helper(
+ env, jopt_handle, jdb_path, jcolumn_names, jcolumn_options,
+ (ROCKSDB_NAMESPACE::Status(*)(
+ const ROCKSDB_NAMESPACE::DBOptions&, const std::string&,
+ const std::vector<ROCKSDB_NAMESPACE::ColumnFamilyDescriptor>&,
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>*,
+ ROCKSDB_NAMESPACE::DB**)) &
+ ROCKSDB_NAMESPACE::DB::Open);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksDB_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jhandle);
+ assert(db != nullptr);
+ delete db;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: closeDatabase
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksDB_closeDatabase(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jhandle);
+ assert(db != nullptr);
+ ROCKSDB_NAMESPACE::Status s = db->Close();
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: listColumnFamilies
+ * Signature: (JLjava/lang/String;)[[B
+ */
+jobjectArray Java_org_rocksdb_RocksDB_listColumnFamilies(
+ JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path) {
+ std::vector<std::string> column_family_names;
+ const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
+ if (db_path == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jopt_handle);
+ ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::DB::ListColumnFamilies(
+ *opt, db_path, &column_family_names);
+
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+
+ jobjectArray jcolumn_family_names =
+ ROCKSDB_NAMESPACE::JniUtil::stringsBytes(env, column_family_names);
+
+ return jcolumn_family_names;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: createColumnFamily
+ * Signature: (J[BIJ)J
+ */
+jlong Java_org_rocksdb_RocksDB_createColumnFamily(
+ JNIEnv* env, jobject, jlong jhandle, jbyteArray jcf_name,
+ jint jcf_name_len, jlong jcf_options_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jhandle);
+ jboolean has_exception = JNI_FALSE;
+ const std::string cf_name =
+ ROCKSDB_NAMESPACE::JniUtil::byteString<std::string>(
+ env, jcf_name, jcf_name_len,
+ [](const char* str, const size_t len) {
+ return std::string(str, len);
+ },
+ &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return 0;
+ }
+ auto* cf_options = reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(
+ jcf_options_handle);
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle;
+ ROCKSDB_NAMESPACE::Status s =
+ db->CreateColumnFamily(*cf_options, cf_name, &cf_handle);
+ if (!s.ok()) {
+ // error occurred
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return 0;
+ }
+ return reinterpret_cast<jlong>(cf_handle);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: createColumnFamilies
+ * Signature: (JJ[[B)[J
+ */
+jlongArray Java_org_rocksdb_RocksDB_createColumnFamilies__JJ_3_3B(
+ JNIEnv* env, jobject, jlong jhandle, jlong jcf_options_handle,
+ jobjectArray jcf_names) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jhandle);
+ auto* cf_options = reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(
+ jcf_options_handle);
+ jboolean has_exception = JNI_FALSE;
+ std::vector<std::string> cf_names;
+ ROCKSDB_NAMESPACE::JniUtil::byteStrings<std::string>(
+ env, jcf_names,
+ [](const char* str, const size_t len) { return std::string(str, len); },
+ [&cf_names](const size_t, std::string str) { cf_names.push_back(str); },
+ &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return nullptr;
+ }
+
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*> cf_handles;
+ ROCKSDB_NAMESPACE::Status s =
+ db->CreateColumnFamilies(*cf_options, cf_names, &cf_handles);
+ if (!s.ok()) {
+ // error occurred
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+
+ jlongArray jcf_handles = ROCKSDB_NAMESPACE::JniUtil::toJPointers<
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle>(env, cf_handles, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return nullptr;
+ }
+ return jcf_handles;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: createColumnFamilies
+ * Signature: (J[J[[B)[J
+ */
+jlongArray Java_org_rocksdb_RocksDB_createColumnFamilies__J_3J_3_3B(
+ JNIEnv* env, jobject, jlong jhandle, jlongArray jcf_options_handles,
+ jobjectArray jcf_names) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jhandle);
+ const jsize jlen = env->GetArrayLength(jcf_options_handles);
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyDescriptor> cf_descriptors;
+ cf_descriptors.reserve(jlen);
+
+ jboolean jcf_options_handles_is_copy = JNI_FALSE;
+ jlong *jcf_options_handles_elems = env->GetLongArrayElements(jcf_options_handles, &jcf_options_handles_is_copy);
+ if(jcf_options_handles_elems == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ // extract the column family descriptors
+ jboolean has_exception = JNI_FALSE;
+ for (jsize i = 0; i < jlen; i++) {
+ auto* cf_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(
+ jcf_options_handles_elems[i]);
+ jbyteArray jcf_name = static_cast<jbyteArray>(
+ env->GetObjectArrayElement(jcf_names, i));
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->ReleaseLongArrayElements(jcf_options_handles, jcf_options_handles_elems, JNI_ABORT);
+ return nullptr;
+ }
+ const std::string cf_name =
+ ROCKSDB_NAMESPACE::JniUtil::byteString<std::string>(
+ env, jcf_name,
+ [](const char* str, const size_t len) {
+ return std::string(str, len);
+ },
+ &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ env->DeleteLocalRef(jcf_name);
+ env->ReleaseLongArrayElements(jcf_options_handles, jcf_options_handles_elems, JNI_ABORT);
+ return nullptr;
+ }
+
+ cf_descriptors.push_back(
+ ROCKSDB_NAMESPACE::ColumnFamilyDescriptor(cf_name, *cf_options));
+
+ env->DeleteLocalRef(jcf_name);
+ }
+
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*> cf_handles;
+ ROCKSDB_NAMESPACE::Status s =
+ db->CreateColumnFamilies(cf_descriptors, &cf_handles);
+
+ env->ReleaseLongArrayElements(jcf_options_handles, jcf_options_handles_elems, JNI_ABORT);
+
+ if (!s.ok()) {
+ // error occurred
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+
+ jlongArray jcf_handles = ROCKSDB_NAMESPACE::JniUtil::toJPointers<
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle>(env, cf_handles, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return nullptr;
+ }
+ return jcf_handles;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: dropColumnFamily
+ * Signature: (JJ)V;
+ */
+void Java_org_rocksdb_RocksDB_dropColumnFamily(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jlong jcf_handle) {
+ auto* db_handle = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ ROCKSDB_NAMESPACE::Status s = db_handle->DropColumnFamily(cf_handle);
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: dropColumnFamilies
+ * Signature: (J[J)V
+ */
+void Java_org_rocksdb_RocksDB_dropColumnFamilies(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jlongArray jcolumn_family_handles) {
+ auto* db_handle = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*> cf_handles;
+ if (jcolumn_family_handles != nullptr) {
+ const jsize len_cols = env->GetArrayLength(jcolumn_family_handles);
+
+ jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, nullptr);
+ if (jcfh == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ for (jsize i = 0; i < len_cols; i++) {
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcfh[i]);
+ cf_handles.push_back(cf_handle);
+ }
+ env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT);
+ }
+
+ ROCKSDB_NAMESPACE::Status s = db_handle->DropColumnFamilies(cf_handles);
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// ROCKSDB_NAMESPACE::DB::Put
+
+/**
+ * @return true if the put succeeded, false if a Java Exception was thrown
+ */
+bool rocksdb_put_helper(JNIEnv* env, ROCKSDB_NAMESPACE::DB* db,
+ const ROCKSDB_NAMESPACE::WriteOptions& write_options,
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len) {
+ jbyte* key = new jbyte[jkey_len];
+ env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ delete[] key;
+ return false;
+ }
+
+ jbyte* value = new jbyte[jval_len];
+ env->GetByteArrayRegion(jval, jval_off, jval_len, value);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ delete[] value;
+ delete[] key;
+ return false;
+ }
+
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
+ ROCKSDB_NAMESPACE::Slice value_slice(reinterpret_cast<char*>(value),
+ jval_len);
+
+ ROCKSDB_NAMESPACE::Status s;
+ if (cf_handle != nullptr) {
+ s = db->Put(write_options, cf_handle, key_slice, value_slice);
+ } else {
+ // backwards compatibility
+ s = db->Put(write_options, key_slice, value_slice);
+ }
+
+ // cleanup
+ delete[] value;
+ delete[] key;
+
+ if (s.ok()) {
+ return true;
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return false;
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: put
+ * Signature: (J[BII[BII)V
+ */
+void Java_org_rocksdb_RocksDB_put__J_3BII_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ static const ROCKSDB_NAMESPACE::WriteOptions default_write_options =
+ ROCKSDB_NAMESPACE::WriteOptions();
+ rocksdb_put_helper(env, db, default_write_options, nullptr, jkey, jkey_off,
+ jkey_len, jval, jval_off, jval_len);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: put
+ * Signature: (J[BII[BIIJ)V
+ */
+void Java_org_rocksdb_RocksDB_put__J_3BII_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len,
+ jlong jcf_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ static const ROCKSDB_NAMESPACE::WriteOptions default_write_options =
+ ROCKSDB_NAMESPACE::WriteOptions();
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ if (cf_handle != nullptr) {
+ rocksdb_put_helper(env, db, default_write_options, cf_handle, jkey,
+ jkey_off, jkey_len, jval, jval_off, jval_len);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::InvalidArgument(
+ "Invalid ColumnFamilyHandle."));
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: put
+ * Signature: (JJ[BII[BII)V
+ */
+void Java_org_rocksdb_RocksDB_put__JJ_3BII_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jlong jwrite_options_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options_handle);
+ rocksdb_put_helper(env, db, *write_options, nullptr, jkey, jkey_off, jkey_len,
+ jval, jval_off, jval_len);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: put
+ * Signature: (JJ[BII[BIIJ)V
+ */
+void Java_org_rocksdb_RocksDB_put__JJ_3BII_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len,
+ jlong jcf_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options_handle);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ if (cf_handle != nullptr) {
+ rocksdb_put_helper(env, db, *write_options, cf_handle, jkey, jkey_off,
+ jkey_len, jval, jval_off, jval_len);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::InvalidArgument(
+ "Invalid ColumnFamilyHandle."));
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: putDirect
+ * Signature: (JJLjava/nio/ByteBuffer;IILjava/nio/ByteBuffer;IIJ)V
+ */
+void Java_org_rocksdb_RocksDB_putDirect(
+ JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jwrite_options_handle,
+ jobject jkey, jint jkey_off, jint jkey_len, jobject jval, jint jval_off,
+ jint jval_len, jlong jcf_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options_handle);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ auto put = [&env, &db, &cf_handle, &write_options](
+ ROCKSDB_NAMESPACE::Slice& key,
+ ROCKSDB_NAMESPACE::Slice& value) {
+ ROCKSDB_NAMESPACE::Status s;
+ if (cf_handle == nullptr) {
+ s = db->Put(*write_options, key, value);
+ } else {
+ s = db->Put(*write_options, cf_handle, key, value);
+ }
+ if (s.ok()) {
+ return;
+ }
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ };
+ ROCKSDB_NAMESPACE::JniUtil::kv_op_direct(put, env, jkey, jkey_off, jkey_len,
+ jval, jval_off, jval_len);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// ROCKSDB_NAMESPACE::DB::Delete()
+
+/**
+ * @return true if the delete succeeded, false if a Java Exception was thrown
+ */
+bool rocksdb_delete_helper(JNIEnv* env, ROCKSDB_NAMESPACE::DB* db,
+ const ROCKSDB_NAMESPACE::WriteOptions& write_options,
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len) {
+ jbyte* key = new jbyte[jkey_len];
+ env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ delete[] key;
+ return false;
+ }
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
+
+ ROCKSDB_NAMESPACE::Status s;
+ if (cf_handle != nullptr) {
+ s = db->Delete(write_options, cf_handle, key_slice);
+ } else {
+ // backwards compatibility
+ s = db->Delete(write_options, key_slice);
+ }
+
+ // cleanup
+ delete[] key;
+
+ if (s.ok()) {
+ return true;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return false;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: delete
+ * Signature: (J[BII)V
+ */
+void Java_org_rocksdb_RocksDB_delete__J_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ static const ROCKSDB_NAMESPACE::WriteOptions default_write_options =
+ ROCKSDB_NAMESPACE::WriteOptions();
+ rocksdb_delete_helper(env, db, default_write_options, nullptr, jkey, jkey_off,
+ jkey_len);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: delete
+ * Signature: (J[BIIJ)V
+ */
+void Java_org_rocksdb_RocksDB_delete__J_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jlong jcf_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ static const ROCKSDB_NAMESPACE::WriteOptions default_write_options =
+ ROCKSDB_NAMESPACE::WriteOptions();
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ if (cf_handle != nullptr) {
+ rocksdb_delete_helper(env, db, default_write_options, cf_handle, jkey,
+ jkey_off, jkey_len);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::InvalidArgument(
+ "Invalid ColumnFamilyHandle."));
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: delete
+ * Signature: (JJ[BII)V
+ */
+void Java_org_rocksdb_RocksDB_delete__JJ_3BII(
+ JNIEnv* env, jobject,
+ jlong jdb_handle,
+ jlong jwrite_options,
+ jbyteArray jkey, jint jkey_off, jint jkey_len) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options);
+ rocksdb_delete_helper(env, db, *write_options, nullptr, jkey, jkey_off,
+ jkey_len);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: delete
+ * Signature: (JJ[BIIJ)V
+ */
+void Java_org_rocksdb_RocksDB_delete__JJ_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options,
+ jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ if (cf_handle != nullptr) {
+ rocksdb_delete_helper(env, db, *write_options, cf_handle, jkey, jkey_off,
+ jkey_len);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::InvalidArgument(
+ "Invalid ColumnFamilyHandle."));
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// ROCKSDB_NAMESPACE::DB::SingleDelete()
+/**
+ * @return true if the single delete succeeded, false if a Java Exception
+ * was thrown
+ */
+bool rocksdb_single_delete_helper(
+ JNIEnv* env, ROCKSDB_NAMESPACE::DB* db,
+ const ROCKSDB_NAMESPACE::WriteOptions& write_options,
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle, jbyteArray jkey,
+ jint jkey_len) {
+ jbyte* key = env->GetByteArrayElements(jkey, nullptr);
+ if (key == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return false;
+ }
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
+
+ ROCKSDB_NAMESPACE::Status s;
+ if (cf_handle != nullptr) {
+ s = db->SingleDelete(write_options, cf_handle, key_slice);
+ } else {
+ // backwards compatibility
+ s = db->SingleDelete(write_options, key_slice);
+ }
+
+ // trigger java unref on key and value.
+ // by passing JNI_ABORT, it will simply release the reference without
+ // copying the result back to the java byte array.
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+
+ if (s.ok()) {
+ return true;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return false;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: singleDelete
+ * Signature: (J[BI)V
+ */
+void Java_org_rocksdb_RocksDB_singleDelete__J_3BI(
+ JNIEnv* env, jobject,
+ jlong jdb_handle,
+ jbyteArray jkey,
+ jint jkey_len) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ static const ROCKSDB_NAMESPACE::WriteOptions default_write_options =
+ ROCKSDB_NAMESPACE::WriteOptions();
+ rocksdb_single_delete_helper(env, db, default_write_options, nullptr,
+ jkey, jkey_len);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: singleDelete
+ * Signature: (J[BIJ)V
+ */
+void Java_org_rocksdb_RocksDB_singleDelete__J_3BIJ(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_len, jlong jcf_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ static const ROCKSDB_NAMESPACE::WriteOptions default_write_options =
+ ROCKSDB_NAMESPACE::WriteOptions();
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ if (cf_handle != nullptr) {
+ rocksdb_single_delete_helper(env, db, default_write_options, cf_handle,
+ jkey, jkey_len);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::InvalidArgument(
+ "Invalid ColumnFamilyHandle."));
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: singleDelete
+ * Signature: (JJ[BIJ)V
+ */
+void Java_org_rocksdb_RocksDB_singleDelete__JJ_3BI(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jlong jwrite_options,
+ jbyteArray jkey,
+ jint jkey_len) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options);
+ rocksdb_single_delete_helper(env, db, *write_options, nullptr, jkey,
+ jkey_len);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: singleDelete
+ * Signature: (JJ[BIJ)V
+ */
+void Java_org_rocksdb_RocksDB_singleDelete__JJ_3BIJ(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options,
+ jbyteArray jkey, jint jkey_len, jlong jcf_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ if (cf_handle != nullptr) {
+ rocksdb_single_delete_helper(env, db, *write_options, cf_handle, jkey,
+ jkey_len);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::InvalidArgument(
+ "Invalid ColumnFamilyHandle."));
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// ROCKSDB_NAMESPACE::DB::DeleteRange()
+/**
+ * @return true if the delete range succeeded, false if a Java Exception
+ * was thrown
+ */
+bool rocksdb_delete_range_helper(
+ JNIEnv* env, ROCKSDB_NAMESPACE::DB* db,
+ const ROCKSDB_NAMESPACE::WriteOptions& write_options,
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle, jbyteArray jbegin_key,
+ jint jbegin_key_off, jint jbegin_key_len, jbyteArray jend_key,
+ jint jend_key_off, jint jend_key_len) {
+ jbyte* begin_key = new jbyte[jbegin_key_len];
+ env->GetByteArrayRegion(jbegin_key, jbegin_key_off, jbegin_key_len,
+ begin_key);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ delete[] begin_key;
+ return false;
+ }
+ ROCKSDB_NAMESPACE::Slice begin_key_slice(reinterpret_cast<char*>(begin_key),
+ jbegin_key_len);
+
+ jbyte* end_key = new jbyte[jend_key_len];
+ env->GetByteArrayRegion(jend_key, jend_key_off, jend_key_len, end_key);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ delete[] begin_key;
+ delete[] end_key;
+ return false;
+ }
+ ROCKSDB_NAMESPACE::Slice end_key_slice(reinterpret_cast<char*>(end_key),
+ jend_key_len);
+
+ ROCKSDB_NAMESPACE::Status s =
+ db->DeleteRange(write_options, cf_handle, begin_key_slice, end_key_slice);
+
+ // cleanup
+ delete[] begin_key;
+ delete[] end_key;
+
+ if (s.ok()) {
+ return true;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return false;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: deleteRange
+ * Signature: (J[BII[BII)V
+ */
+void Java_org_rocksdb_RocksDB_deleteRange__J_3BII_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len,
+ jbyteArray jend_key, jint jend_key_off, jint jend_key_len) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ static const ROCKSDB_NAMESPACE::WriteOptions default_write_options =
+ ROCKSDB_NAMESPACE::WriteOptions();
+ rocksdb_delete_range_helper(env, db, default_write_options, nullptr,
+ jbegin_key, jbegin_key_off, jbegin_key_len,
+ jend_key, jend_key_off, jend_key_len);
+}
+
+jint rocksdb_get_helper_direct(
+ JNIEnv* env, ROCKSDB_NAMESPACE::DB* db,
+ const ROCKSDB_NAMESPACE::ReadOptions& read_options,
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* column_family_handle, jobject jkey,
+ jint jkey_off, jint jkey_len, jobject jval, jint jval_off, jint jval_len,
+ bool* has_exception) {
+ static const int kNotFound = -1;
+ static const int kStatusError = -2;
+ static const int kArgumentError = -3;
+
+ char* key = reinterpret_cast<char*>(env->GetDirectBufferAddress(jkey));
+ if (key == nullptr) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env,
+ "Invalid key argument (argument is not a valid direct ByteBuffer)");
+ *has_exception = true;
+ return kArgumentError;
+ }
+ if (env->GetDirectBufferCapacity(jkey) < (jkey_off + jkey_len)) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env,
+ "Invalid key argument. Capacity is less than requested region (offset "
+ "+ length).");
+ *has_exception = true;
+ return kArgumentError;
+ }
+
+ char* value = reinterpret_cast<char*>(env->GetDirectBufferAddress(jval));
+ if (value == nullptr) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env,
+ "Invalid value argument (argument is not a valid direct ByteBuffer)");
+ *has_exception = true;
+ return kArgumentError;
+ }
+
+ if (env->GetDirectBufferCapacity(jval) < (jval_off + jval_len)) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env,
+ "Invalid value argument. Capacity is less than requested region "
+ "(offset + length).");
+ *has_exception = true;
+ return kArgumentError;
+ }
+
+ key += jkey_off;
+ value += jval_off;
+
+ ROCKSDB_NAMESPACE::Slice key_slice(key, jkey_len);
+
+ // TODO(yhchiang): we might save one memory allocation here by adding
+ // a DB::Get() function which takes preallocated jbyte* as input.
+ std::string cvalue;
+ ROCKSDB_NAMESPACE::Status s;
+ if (column_family_handle != nullptr) {
+ s = db->Get(read_options, column_family_handle, key_slice, &cvalue);
+ } else {
+ // backwards compatibility
+ s = db->Get(read_options, key_slice, &cvalue);
+ }
+
+ if (s.IsNotFound()) {
+ *has_exception = false;
+ return kNotFound;
+ } else if (!s.ok()) {
+ *has_exception = true;
+ // Here since we are throwing a Java exception from c++ side.
+ // As a result, c++ does not know calling this function will in fact
+ // throwing an exception. As a result, the execution flow will
+ // not stop here, and codes after this throw will still be
+ // executed.
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+
+ // Return a dummy const value to avoid compilation error, although
+ // java side might not have a chance to get the return value :)
+ return kStatusError;
+ }
+
+ const jint cvalue_len = static_cast<jint>(cvalue.size());
+ const jint length = std::min(jval_len, cvalue_len);
+
+ memcpy(value, cvalue.c_str(), length);
+
+ *has_exception = false;
+ return cvalue_len;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: deleteRange
+ * Signature: (J[BII[BIIJ)V
+ */
+void Java_org_rocksdb_RocksDB_deleteRange__J_3BII_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len,
+ jbyteArray jend_key, jint jend_key_off, jint jend_key_len,
+ jlong jcf_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ static const ROCKSDB_NAMESPACE::WriteOptions default_write_options =
+ ROCKSDB_NAMESPACE::WriteOptions();
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ if (cf_handle != nullptr) {
+ rocksdb_delete_range_helper(env, db, default_write_options, cf_handle,
+ jbegin_key, jbegin_key_off, jbegin_key_len,
+ jend_key, jend_key_off, jend_key_len);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::InvalidArgument(
+ "Invalid ColumnFamilyHandle."));
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: deleteRange
+ * Signature: (JJ[BII[BII)V
+ */
+void Java_org_rocksdb_RocksDB_deleteRange__JJ_3BII_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options,
+ jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len,
+ jbyteArray jend_key, jint jend_key_off, jint jend_key_len) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options);
+ rocksdb_delete_range_helper(env, db, *write_options, nullptr, jbegin_key,
+ jbegin_key_off, jbegin_key_len, jend_key,
+ jend_key_off, jend_key_len);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: deleteRange
+ * Signature: (JJ[BII[BIIJ)V
+ */
+void Java_org_rocksdb_RocksDB_deleteRange__JJ_3BII_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options,
+ jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len,
+ jbyteArray jend_key, jint jend_key_off, jint jend_key_len,
+ jlong jcf_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ if (cf_handle != nullptr) {
+ rocksdb_delete_range_helper(env, db, *write_options, cf_handle,
+ jbegin_key, jbegin_key_off, jbegin_key_len,
+ jend_key, jend_key_off, jend_key_len);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::InvalidArgument(
+ "Invalid ColumnFamilyHandle."));
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getDirect
+ * Signature: (JJLjava/nio/ByteBuffer;IILjava/nio/ByteBuffer;IIJ)I
+ */
+jint Java_org_rocksdb_RocksDB_getDirect(JNIEnv* env, jobject /*jdb*/,
+ jlong jdb_handle, jlong jropt_handle,
+ jobject jkey, jint jkey_off,
+ jint jkey_len, jobject jval,
+ jint jval_off, jint jval_len,
+ jlong jcf_handle) {
+ auto* db_handle = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* ro_opt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jropt_handle);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ bool has_exception = false;
+ return rocksdb_get_helper_direct(
+ env, db_handle,
+ ro_opt == nullptr ? ROCKSDB_NAMESPACE::ReadOptions() : *ro_opt, cf_handle,
+ jkey, jkey_off, jkey_len, jval, jval_off, jval_len, &has_exception);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// ROCKSDB_NAMESPACE::DB::Merge
+
+/**
+ * @return true if the merge succeeded, false if a Java Exception was thrown
+ */
+bool rocksdb_merge_helper(JNIEnv* env, ROCKSDB_NAMESPACE::DB* db,
+ const ROCKSDB_NAMESPACE::WriteOptions& write_options,
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len) {
+ jbyte* key = new jbyte[jkey_len];
+ env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ delete[] key;
+ return false;
+ }
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
+
+ jbyte* value = new jbyte[jval_len];
+ env->GetByteArrayRegion(jval, jval_off, jval_len, value);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ delete[] value;
+ delete[] key;
+ return false;
+ }
+ ROCKSDB_NAMESPACE::Slice value_slice(reinterpret_cast<char*>(value),
+ jval_len);
+
+ ROCKSDB_NAMESPACE::Status s;
+ if (cf_handle != nullptr) {
+ s = db->Merge(write_options, cf_handle, key_slice, value_slice);
+ } else {
+ s = db->Merge(write_options, key_slice, value_slice);
+ }
+
+ // cleanup
+ delete[] value;
+ delete[] key;
+
+ if (s.ok()) {
+ return true;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return false;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: merge
+ * Signature: (J[BII[BII)V
+ */
+void Java_org_rocksdb_RocksDB_merge__J_3BII_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ static const ROCKSDB_NAMESPACE::WriteOptions default_write_options =
+ ROCKSDB_NAMESPACE::WriteOptions();
+ rocksdb_merge_helper(env, db, default_write_options, nullptr, jkey, jkey_off,
+ jkey_len, jval, jval_off, jval_len);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: merge
+ * Signature: (J[BII[BIIJ)V
+ */
+void Java_org_rocksdb_RocksDB_merge__J_3BII_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len,
+ jlong jcf_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ static const ROCKSDB_NAMESPACE::WriteOptions default_write_options =
+ ROCKSDB_NAMESPACE::WriteOptions();
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ if (cf_handle != nullptr) {
+ rocksdb_merge_helper(env, db, default_write_options, cf_handle, jkey,
+ jkey_off, jkey_len, jval, jval_off, jval_len);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::InvalidArgument(
+ "Invalid ColumnFamilyHandle."));
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: merge
+ * Signature: (JJ[BII[BII)V
+ */
+void Java_org_rocksdb_RocksDB_merge__JJ_3BII_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options_handle);
+ rocksdb_merge_helper(env, db, *write_options, nullptr, jkey, jkey_off,
+ jkey_len, jval, jval_off, jval_len);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: merge
+ * Signature: (JJ[BII[BIIJ)V
+ */
+void Java_org_rocksdb_RocksDB_merge__JJ_3BII_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len, jlong jcf_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options_handle);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ if (cf_handle != nullptr) {
+ rocksdb_merge_helper(env, db, *write_options, cf_handle, jkey, jkey_off,
+ jkey_len, jval, jval_off, jval_len);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::InvalidArgument(
+ "Invalid ColumnFamilyHandle."));
+ }
+}
+
+jlong rocksdb_iterator_helper(
+ ROCKSDB_NAMESPACE::DB* db, ROCKSDB_NAMESPACE::ReadOptions read_options,
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle) {
+ ROCKSDB_NAMESPACE::Iterator* iterator = nullptr;
+ if (cf_handle != nullptr) {
+ iterator = db->NewIterator(read_options, cf_handle);
+ } else {
+ iterator = db->NewIterator(read_options);
+ }
+ return reinterpret_cast<jlong>(iterator);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: deleteDirect
+ * Signature: (JJLjava/nio/ByteBuffer;IIJ)V
+ */
+void Java_org_rocksdb_RocksDB_deleteDirect(JNIEnv* env, jobject /*jdb*/,
+ jlong jdb_handle,
+ jlong jwrite_options, jobject jkey,
+ jint jkey_offset, jint jkey_len,
+ jlong jcf_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ auto remove = [&env, &db, &write_options,
+ &cf_handle](ROCKSDB_NAMESPACE::Slice& key) {
+ ROCKSDB_NAMESPACE::Status s;
+ if (cf_handle == nullptr) {
+ s = db->Delete(*write_options, key);
+ } else {
+ s = db->Delete(*write_options, cf_handle, key);
+ }
+ if (s.ok()) {
+ return;
+ }
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ };
+ ROCKSDB_NAMESPACE::JniUtil::k_op_direct(remove, env, jkey, jkey_offset,
+ jkey_len);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// ROCKSDB_NAMESPACE::DB::Write
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: write0
+ * Signature: (JJJ)V
+ */
+void Java_org_rocksdb_RocksDB_write0(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jlong jwrite_options_handle, jlong jwb_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options_handle);
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+
+ ROCKSDB_NAMESPACE::Status s = db->Write(*write_options, wb);
+
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: write1
+ * Signature: (JJJ)V
+ */
+void Java_org_rocksdb_RocksDB_write1(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jlong jwrite_options_handle, jlong jwbwi_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options_handle);
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ auto* wb = wbwi->GetWriteBatch();
+
+ ROCKSDB_NAMESPACE::Status s = db->Write(*write_options, wb);
+
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// ROCKSDB_NAMESPACE::DB::Get
+
+jbyteArray rocksdb_get_helper(
+ JNIEnv* env, ROCKSDB_NAMESPACE::DB* db,
+ const ROCKSDB_NAMESPACE::ReadOptions& read_opt,
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* column_family_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len) {
+ jbyte* key = new jbyte[jkey_len];
+ env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ delete[] key;
+ return nullptr;
+ }
+
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
+
+ std::string value;
+ ROCKSDB_NAMESPACE::Status s;
+ if (column_family_handle != nullptr) {
+ s = db->Get(read_opt, column_family_handle, key_slice, &value);
+ } else {
+ // backwards compatibility
+ s = db->Get(read_opt, key_slice, &value);
+ }
+
+ // cleanup
+ delete[] key;
+
+ if (s.IsNotFound()) {
+ return nullptr;
+ }
+
+ if (s.ok()) {
+ jbyteArray jret_value = ROCKSDB_NAMESPACE::JniUtil::copyBytes(env, value);
+ if (jret_value == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+ return jret_value;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: get
+ * Signature: (J[BII)[B
+ */
+jbyteArray Java_org_rocksdb_RocksDB_get__J_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len) {
+ return rocksdb_get_helper(
+ env, reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle),
+ ROCKSDB_NAMESPACE::ReadOptions(), nullptr, jkey, jkey_off, jkey_len);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: get
+ * Signature: (J[BIIJ)[B
+ */
+jbyteArray Java_org_rocksdb_RocksDB_get__J_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle) {
+ auto db_handle = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ if (cf_handle != nullptr) {
+ return rocksdb_get_helper(env, db_handle, ROCKSDB_NAMESPACE::ReadOptions(),
+ cf_handle, jkey, jkey_off, jkey_len);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::InvalidArgument(
+ "Invalid ColumnFamilyHandle."));
+ return nullptr;
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: get
+ * Signature: (JJ[BII)[B
+ */
+jbyteArray Java_org_rocksdb_RocksDB_get__JJ_3BII(
+ JNIEnv* env, jobject,
+ jlong jdb_handle, jlong jropt_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len) {
+ return rocksdb_get_helper(
+ env, reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle),
+ *reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jropt_handle), nullptr,
+ jkey, jkey_off, jkey_len);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: get
+ * Signature: (JJ[BIIJ)[B
+ */
+jbyteArray Java_org_rocksdb_RocksDB_get__JJ_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jropt_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle) {
+ auto* db_handle = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto& ro_opt =
+ *reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jropt_handle);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ if (cf_handle != nullptr) {
+ return rocksdb_get_helper(
+ env, db_handle, ro_opt, cf_handle, jkey, jkey_off, jkey_len);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::InvalidArgument(
+ "Invalid ColumnFamilyHandle."));
+ return nullptr;
+ }
+}
+
+jint rocksdb_get_helper(
+ JNIEnv* env, ROCKSDB_NAMESPACE::DB* db,
+ const ROCKSDB_NAMESPACE::ReadOptions& read_options,
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* column_family_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len, jbyteArray jval,
+ jint jval_off, jint jval_len, bool* has_exception) {
+ static const int kNotFound = -1;
+ static const int kStatusError = -2;
+
+ jbyte* key = new jbyte[jkey_len];
+ env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
+ if (env->ExceptionCheck()) {
+ // exception thrown: OutOfMemoryError
+ delete[] key;
+ *has_exception = true;
+ return kStatusError;
+ }
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
+
+ // TODO(yhchiang): we might save one memory allocation here by adding
+ // a DB::Get() function which takes preallocated jbyte* as input.
+ std::string cvalue;
+ ROCKSDB_NAMESPACE::Status s;
+ if (column_family_handle != nullptr) {
+ s = db->Get(read_options, column_family_handle, key_slice, &cvalue);
+ } else {
+ // backwards compatibility
+ s = db->Get(read_options, key_slice, &cvalue);
+ }
+
+ // cleanup
+ delete[] key;
+
+ if (s.IsNotFound()) {
+ *has_exception = false;
+ return kNotFound;
+ } else if (!s.ok()) {
+ *has_exception = true;
+ // Here since we are throwing a Java exception from c++ side.
+ // As a result, c++ does not know calling this function will in fact
+ // throwing an exception. As a result, the execution flow will
+ // not stop here, and codes after this throw will still be
+ // executed.
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+
+ // Return a dummy const value to avoid compilation error, although
+ // java side might not have a chance to get the return value :)
+ return kStatusError;
+ }
+
+ const jint cvalue_len = static_cast<jint>(cvalue.size());
+ const jint length = std::min(jval_len, cvalue_len);
+
+ env->SetByteArrayRegion(
+ jval, jval_off, length,
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(cvalue.c_str())));
+ if (env->ExceptionCheck()) {
+ // exception thrown: OutOfMemoryError
+ *has_exception = true;
+ return kStatusError;
+ }
+
+ *has_exception = false;
+ return cvalue_len;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: get
+ * Signature: (J[BII[BII)I
+ */
+jint Java_org_rocksdb_RocksDB_get__J_3BII_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len) {
+ bool has_exception = false;
+ return rocksdb_get_helper(
+ env, reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle),
+ ROCKSDB_NAMESPACE::ReadOptions(), nullptr, jkey, jkey_off, jkey_len, jval,
+ jval_off, jval_len, &has_exception);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: get
+ * Signature: (J[BII[BIIJ)I
+ */
+jint Java_org_rocksdb_RocksDB_get__J_3BII_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len,
+ jlong jcf_handle) {
+ auto* db_handle = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ if (cf_handle != nullptr) {
+ bool has_exception = false;
+ return rocksdb_get_helper(env, db_handle, ROCKSDB_NAMESPACE::ReadOptions(),
+ cf_handle, jkey, jkey_off, jkey_len, jval,
+ jval_off, jval_len, &has_exception);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::InvalidArgument(
+ "Invalid ColumnFamilyHandle."));
+ // will never be evaluated
+ return 0;
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: get
+ * Signature: (JJ[BII[BII)I
+ */
+jint Java_org_rocksdb_RocksDB_get__JJ_3BII_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jropt_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len) {
+ bool has_exception = false;
+ return rocksdb_get_helper(
+ env, reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle),
+ *reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jropt_handle), nullptr,
+ jkey, jkey_off, jkey_len, jval, jval_off, jval_len, &has_exception);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: get
+ * Signature: (JJ[BII[BIIJ)I
+ */
+jint Java_org_rocksdb_RocksDB_get__JJ_3BII_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jropt_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len,
+ jlong jcf_handle) {
+ auto* db_handle = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto& ro_opt =
+ *reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jropt_handle);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ if (cf_handle != nullptr) {
+ bool has_exception = false;
+ return rocksdb_get_helper(env, db_handle, ro_opt, cf_handle,
+ jkey, jkey_off, jkey_len,
+ jval, jval_off, jval_len,
+ &has_exception);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::InvalidArgument(
+ "Invalid ColumnFamilyHandle."));
+ // will never be evaluated
+ return 0;
+ }
+}
+
+inline void multi_get_helper_release_keys(
+ JNIEnv* env, std::vector<std::pair<jbyte*, jobject>>& keys_to_free) {
+ auto end = keys_to_free.end();
+ for (auto it = keys_to_free.begin(); it != end; ++it) {
+ delete[] it->first;
+ env->DeleteLocalRef(it->second);
+ }
+ keys_to_free.clear();
+}
+
+/**
+ * cf multi get
+ *
+ * @return byte[][] of values or nullptr if an exception occurs
+ */
+jobjectArray multi_get_helper(JNIEnv* env, jobject, ROCKSDB_NAMESPACE::DB* db,
+ const ROCKSDB_NAMESPACE::ReadOptions& rOpt,
+ jobjectArray jkeys, jintArray jkey_offs,
+ jintArray jkey_lens,
+ jlongArray jcolumn_family_handles) {
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*> cf_handles;
+ if (jcolumn_family_handles != nullptr) {
+ const jsize len_cols = env->GetArrayLength(jcolumn_family_handles);
+
+ jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, nullptr);
+ if (jcfh == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ for (jsize i = 0; i < len_cols; i++) {
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcfh[i]);
+ cf_handles.push_back(cf_handle);
+ }
+ env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT);
+ }
+
+ const jsize len_keys = env->GetArrayLength(jkeys);
+ if (env->EnsureLocalCapacity(len_keys) != 0) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ jint* jkey_off = env->GetIntArrayElements(jkey_offs, nullptr);
+ if (jkey_off == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ jint* jkey_len = env->GetIntArrayElements(jkey_lens, nullptr);
+ if (jkey_len == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT);
+ return nullptr;
+ }
+
+ std::vector<ROCKSDB_NAMESPACE::Slice> keys;
+ std::vector<std::pair<jbyte*, jobject>> keys_to_free;
+ for (jsize i = 0; i < len_keys; i++) {
+ jobject jkey = env->GetObjectArrayElement(jkeys, i);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT);
+ env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT);
+ multi_get_helper_release_keys(env, keys_to_free);
+ return nullptr;
+ }
+
+ jbyteArray jkey_ba = reinterpret_cast<jbyteArray>(jkey);
+
+ const jint len_key = jkey_len[i];
+ jbyte* key = new jbyte[len_key];
+ env->GetByteArrayRegion(jkey_ba, jkey_off[i], len_key, key);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ delete[] key;
+ env->DeleteLocalRef(jkey);
+ env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT);
+ env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT);
+ multi_get_helper_release_keys(env, keys_to_free);
+ return nullptr;
+ }
+
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char*>(key), len_key);
+ keys.push_back(key_slice);
+
+ keys_to_free.push_back(std::pair<jbyte*, jobject>(key, jkey));
+ }
+
+ // cleanup jkey_off and jken_len
+ env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT);
+ env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT);
+
+ std::vector<std::string> values;
+ std::vector<ROCKSDB_NAMESPACE::Status> s;
+ if (cf_handles.size() == 0) {
+ s = db->MultiGet(rOpt, keys, &values);
+ } else {
+ s = db->MultiGet(rOpt, cf_handles, keys, &values);
+ }
+
+ // free up allocated byte arrays
+ multi_get_helper_release_keys(env, keys_to_free);
+
+ // prepare the results
+ jobjectArray jresults = ROCKSDB_NAMESPACE::ByteJni::new2dByteArray(
+ env, static_cast<jsize>(s.size()));
+ if (jresults == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ // TODO(AR) it is not clear to me why EnsureLocalCapacity is needed for the
+ // loop as we cleanup references with env->DeleteLocalRef(jentry_value);
+ if (env->EnsureLocalCapacity(static_cast<jint>(s.size())) != 0) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ // add to the jresults
+ for (std::vector<ROCKSDB_NAMESPACE::Status>::size_type i = 0; i != s.size();
+ i++) {
+ if (s[i].ok()) {
+ std::string* value = &values[i];
+ const jsize jvalue_len = static_cast<jsize>(value->size());
+ jbyteArray jentry_value = env->NewByteArray(jvalue_len);
+ if (jentry_value == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ env->SetByteArrayRegion(
+ jentry_value, 0, static_cast<jsize>(jvalue_len),
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(value->c_str())));
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jentry_value);
+ return nullptr;
+ }
+
+ env->SetObjectArrayElement(jresults, static_cast<jsize>(i), jentry_value);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jentry_value);
+ return nullptr;
+ }
+
+ env->DeleteLocalRef(jentry_value);
+ }
+ }
+
+ return jresults;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: multiGet
+ * Signature: (J[[B[I[I)[[B
+ */
+jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B_3I_3I(
+ JNIEnv* env, jobject jdb, jlong jdb_handle,
+ jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens) {
+ return multi_get_helper(
+ env, jdb, reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle),
+ ROCKSDB_NAMESPACE::ReadOptions(), jkeys, jkey_offs, jkey_lens, nullptr);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: multiGet
+ * Signature: (J[[B[I[I[J)[[B
+ */
+jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B_3I_3I_3J(
+ JNIEnv* env, jobject jdb, jlong jdb_handle,
+ jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens,
+ jlongArray jcolumn_family_handles) {
+ return multi_get_helper(env, jdb,
+ reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle),
+ ROCKSDB_NAMESPACE::ReadOptions(), jkeys, jkey_offs,
+ jkey_lens, jcolumn_family_handles);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: multiGet
+ * Signature: (JJ[[B[I[I)[[B
+ */
+jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B_3I_3I(
+ JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle,
+ jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens) {
+ return multi_get_helper(
+ env, jdb, reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle),
+ *reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jropt_handle), jkeys,
+ jkey_offs, jkey_lens, nullptr);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: multiGet
+ * Signature: (JJ[[B[I[I[J)[[B
+ */
+jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B_3I_3I_3J(
+ JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle,
+ jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens,
+ jlongArray jcolumn_family_handles) {
+ return multi_get_helper(
+ env, jdb, reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle),
+ *reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jropt_handle), jkeys,
+ jkey_offs, jkey_lens, jcolumn_family_handles);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// ROCKSDB_NAMESPACE::DB::KeyMayExist
+bool key_may_exist_helper(JNIEnv* env, jlong jdb_handle, jlong jcf_handle,
+ jlong jread_opts_handle,
+ jbyteArray jkey, jint jkey_offset, jint jkey_len,
+ bool* has_exception, std::string* value, bool* value_found) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ }
+ ROCKSDB_NAMESPACE::ReadOptions read_opts =
+ jread_opts_handle == 0
+ ? ROCKSDB_NAMESPACE::ReadOptions()
+ : *(reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(
+ jread_opts_handle));
+
+ jbyte* key = new jbyte[jkey_len];
+ env->GetByteArrayRegion(jkey, jkey_offset, jkey_len, key);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ delete[] key;
+ *has_exception = true;
+ return false;
+ }
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
+
+ const bool exists = db->KeyMayExist(
+ read_opts, cf_handle, key_slice, value, value_found);
+
+ // cleanup
+ delete[] key;
+
+ return exists;
+}
+
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: keyMayExist
+ * Signature: (JJJ[BII)Z
+ */
+jboolean Java_org_rocksdb_RocksDB_keyMayExist(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle,
+ jlong jread_opts_handle,
+ jbyteArray jkey, jint jkey_offset, jint jkey_len) {
+
+ bool has_exception = false;
+ std::string value;
+ bool value_found = false;
+
+ const bool exists = key_may_exist_helper(
+ env, jdb_handle, jcf_handle, jread_opts_handle,
+ jkey, jkey_offset, jkey_len,
+ &has_exception, &value, &value_found);
+
+ if (has_exception) {
+ // java exception already raised
+ return false;
+ }
+
+ return static_cast<jboolean>(exists);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: keyMayExistFoundValue
+ * Signature: (JJJ[BII)[[B
+ */
+jobjectArray Java_org_rocksdb_RocksDB_keyMayExistFoundValue(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle,
+ jlong jread_opts_handle,
+ jbyteArray jkey, jint jkey_offset, jint jkey_len) {
+
+ bool has_exception = false;
+ std::string value;
+ bool value_found = false;
+
+ const bool exists = key_may_exist_helper(
+ env, jdb_handle, jcf_handle, jread_opts_handle,
+ jkey, jkey_offset, jkey_len,
+ &has_exception, &value, &value_found);
+
+ if (has_exception) {
+ // java exception already raised
+ return nullptr;
+ }
+
+ jbyte result_flags[1];
+ if (!exists) {
+ result_flags[0] = 0;
+ } else if (!value_found) {
+ result_flags[0] = 1;
+ } else {
+ // found
+ result_flags[0] = 2;
+ }
+
+ jobjectArray jresults = ROCKSDB_NAMESPACE::ByteJni::new2dByteArray(env, 2);
+ if (jresults == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ // prepare the result flag
+ jbyteArray jresult_flags = env->NewByteArray(1);
+ if (jresult_flags == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ env->SetByteArrayRegion(jresult_flags, 0, 1, result_flags);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jresult_flags);
+ return nullptr;
+ }
+
+ env->SetObjectArrayElement(jresults, 0, jresult_flags);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jresult_flags);
+ return nullptr;
+ }
+
+ env->DeleteLocalRef(jresult_flags);
+
+ if (result_flags[0] == 2) {
+ // set the value
+ const jsize jvalue_len = static_cast<jsize>(value.size());
+ jbyteArray jresult_value = env->NewByteArray(jvalue_len);
+ if (jresult_value == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ env->SetByteArrayRegion(jresult_value, 0, jvalue_len,
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(value.data())));
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jresult_value);
+ return nullptr;
+ }
+ env->SetObjectArrayElement(jresults, 1, jresult_value);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jresult_value);
+ return nullptr;
+ }
+
+ env->DeleteLocalRef(jresult_value);
+ }
+
+ return jresults;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: iterator
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_RocksDB_iterator__J(
+ JNIEnv*, jobject, jlong db_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(db_handle);
+ return rocksdb_iterator_helper(db, ROCKSDB_NAMESPACE::ReadOptions(), nullptr);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: iterator
+ * Signature: (JJ)J
+ */
+jlong Java_org_rocksdb_RocksDB_iterator__JJ(
+ JNIEnv*, jobject, jlong db_handle, jlong jread_options_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(db_handle);
+ auto& read_options =
+ *reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jread_options_handle);
+ return rocksdb_iterator_helper(db, read_options, nullptr);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: iteratorCF
+ * Signature: (JJ)J
+ */
+jlong Java_org_rocksdb_RocksDB_iteratorCF__JJ(
+ JNIEnv*, jobject, jlong db_handle, jlong jcf_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(db_handle);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ return rocksdb_iterator_helper(db, ROCKSDB_NAMESPACE::ReadOptions(),
+ cf_handle);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: iteratorCF
+ * Signature: (JJJ)J
+ */
+jlong Java_org_rocksdb_RocksDB_iteratorCF__JJJ(
+ JNIEnv*, jobject,
+ jlong db_handle, jlong jcf_handle, jlong jread_options_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(db_handle);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ auto& read_options =
+ *reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jread_options_handle);
+ return rocksdb_iterator_helper(db, read_options, cf_handle);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: iterators
+ * Signature: (J[JJ)[J
+ */
+jlongArray Java_org_rocksdb_RocksDB_iterators(
+ JNIEnv* env, jobject, jlong db_handle,
+ jlongArray jcolumn_family_handles,
+ jlong jread_options_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(db_handle);
+ auto& read_options =
+ *reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jread_options_handle);
+
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*> cf_handles;
+ if (jcolumn_family_handles != nullptr) {
+ const jsize len_cols = env->GetArrayLength(jcolumn_family_handles);
+ jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, nullptr);
+ if (jcfh == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ for (jsize i = 0; i < len_cols; i++) {
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcfh[i]);
+ cf_handles.push_back(cf_handle);
+ }
+
+ env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT);
+ }
+
+ std::vector<ROCKSDB_NAMESPACE::Iterator*> iterators;
+ ROCKSDB_NAMESPACE::Status s =
+ db->NewIterators(read_options, cf_handles, &iterators);
+ if (s.ok()) {
+ jlongArray jLongArray =
+ env->NewLongArray(static_cast<jsize>(iterators.size()));
+ if (jLongArray == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ for (std::vector<ROCKSDB_NAMESPACE::Iterator*>::size_type i = 0;
+ i < iterators.size(); i++) {
+ env->SetLongArrayRegion(
+ jLongArray, static_cast<jsize>(i), 1,
+ const_cast<jlong*>(reinterpret_cast<const jlong*>(&iterators[i])));
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jLongArray);
+ return nullptr;
+ }
+ }
+
+ return jLongArray;
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+}
+
+/*
+ * Method: getSnapshot
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_RocksDB_getSnapshot(
+ JNIEnv*, jobject, jlong db_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(db_handle);
+ const ROCKSDB_NAMESPACE::Snapshot* snapshot = db->GetSnapshot();
+ return reinterpret_cast<jlong>(snapshot);
+}
+
+/*
+ * Method: releaseSnapshot
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_RocksDB_releaseSnapshot(
+ JNIEnv*, jobject, jlong db_handle,
+ jlong snapshot_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(db_handle);
+ auto* snapshot =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Snapshot*>(snapshot_handle);
+ db->ReleaseSnapshot(snapshot);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getProperty
+ * Signature: (JJLjava/lang/String;I)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_RocksDB_getProperty(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle,
+ jstring jproperty, jint jproperty_len) {
+ const char* property = env->GetStringUTFChars(jproperty, nullptr);
+ if (property == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ ROCKSDB_NAMESPACE::Slice property_name(property, jproperty_len);
+
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ }
+
+ std::string property_value;
+ bool retCode = db->GetProperty(cf_handle, property_name, &property_value);
+ env->ReleaseStringUTFChars(jproperty, property);
+
+ if (retCode) {
+ return env->NewStringUTF(property_value.c_str());
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::NotFound());
+ return nullptr;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getMapProperty
+ * Signature: (JJLjava/lang/String;I)Ljava/util/Map;
+ */
+jobject Java_org_rocksdb_RocksDB_getMapProperty(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle,
+ jstring jproperty, jint jproperty_len) {
+ const char* property = env->GetStringUTFChars(jproperty, nullptr);
+ if (property == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ ROCKSDB_NAMESPACE::Slice property_name(property, jproperty_len);
+
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ }
+
+ std::map<std::string, std::string> property_value;
+ bool retCode = db->GetMapProperty(cf_handle, property_name, &property_value);
+ env->ReleaseStringUTFChars(jproperty, property);
+
+ if (retCode) {
+ return ROCKSDB_NAMESPACE::HashMapJni::fromCppMap(env, &property_value);
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::NotFound());
+ return nullptr;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getLongProperty
+ * Signature: (JJLjava/lang/String;I)J
+ */
+jlong Java_org_rocksdb_RocksDB_getLongProperty(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle,
+ jstring jproperty, jint jproperty_len) {
+ const char* property = env->GetStringUTFChars(jproperty, nullptr);
+ if (property == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return 0;
+ }
+ ROCKSDB_NAMESPACE::Slice property_name(property, jproperty_len);
+
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ }
+
+ uint64_t property_value;
+ bool retCode = db->GetIntProperty(cf_handle, property_name, &property_value);
+ env->ReleaseStringUTFChars(jproperty, property);
+
+ if (retCode) {
+ return property_value;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::NotFound());
+ return 0;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: resetStats
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksDB_resetStats(
+ JNIEnv *, jobject, jlong jdb_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ db->ResetStats();
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getAggregatedLongProperty
+ * Signature: (JLjava/lang/String;I)J
+ */
+jlong Java_org_rocksdb_RocksDB_getAggregatedLongProperty(
+ JNIEnv* env, jobject, jlong db_handle,
+ jstring jproperty, jint jproperty_len) {
+ const char* property = env->GetStringUTFChars(jproperty, nullptr);
+ if (property == nullptr) {
+ return 0;
+ }
+ ROCKSDB_NAMESPACE::Slice property_name(property, jproperty_len);
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(db_handle);
+ uint64_t property_value = 0;
+ bool retCode = db->GetAggregatedIntProperty(property_name, &property_value);
+ env->ReleaseStringUTFChars(jproperty, property);
+
+ if (retCode) {
+ return property_value;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::NotFound());
+ return 0;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getApproximateSizes
+ * Signature: (JJ[JB)[J
+ */
+jlongArray Java_org_rocksdb_RocksDB_getApproximateSizes(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle,
+ jlongArray jrange_slice_handles, jbyte jinclude_flags) {
+ const jsize jlen = env->GetArrayLength(jrange_slice_handles);
+ const size_t range_count = jlen / 2;
+
+ jboolean jranges_is_copy = JNI_FALSE;
+ jlong* jranges = env->GetLongArrayElements(jrange_slice_handles,
+ &jranges_is_copy);
+ if (jranges == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ auto ranges = std::unique_ptr<ROCKSDB_NAMESPACE::Range[]>(
+ new ROCKSDB_NAMESPACE::Range[range_count]);
+ for (jsize i = 0; i < jlen; ++i) {
+ auto* start = reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(jranges[i]);
+ auto* limit = reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(jranges[++i]);
+ ranges.get()[i] = ROCKSDB_NAMESPACE::Range(*start, *limit);
+ }
+
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ }
+
+ auto sizes = std::unique_ptr<uint64_t[]>(new uint64_t[range_count]);
+ db->GetApproximateSizes(cf_handle, ranges.get(),
+ static_cast<int>(range_count), sizes.get(),
+ static_cast<uint8_t>(jinclude_flags));
+
+ // release LongArrayElements
+ env->ReleaseLongArrayElements(jrange_slice_handles, jranges, JNI_ABORT);
+
+ // prepare results
+ auto results = std::unique_ptr<jlong[]>(new jlong[range_count]);
+ for (size_t i = 0; i < range_count; ++i) {
+ results.get()[i] = static_cast<jlong>(sizes.get()[i]);
+ }
+
+ const jsize jrange_count = jlen / 2;
+ jlongArray jresults = env->NewLongArray(jrange_count);
+ if (jresults == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ env->SetLongArrayRegion(jresults, 0, jrange_count, results.get());
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jresults);
+ return nullptr;
+ }
+
+ return jresults;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getApproximateMemTableStats
+ * Signature: (JJJJ)[J
+ */
+jlongArray Java_org_rocksdb_RocksDB_getApproximateMemTableStats(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle,
+ jlong jstartHandle, jlong jlimitHandle) {
+ auto* start = reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(jstartHandle);
+ auto* limit = reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(jlimitHandle);
+ const ROCKSDB_NAMESPACE::Range range(*start, *limit);
+
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ }
+
+ uint64_t count = 0;
+ uint64_t sizes = 0;
+ db->GetApproximateMemTableStats(cf_handle, range, &count, &sizes);
+
+ // prepare results
+ jlong results[2] = {
+ static_cast<jlong>(count),
+ static_cast<jlong>(sizes)};
+
+ const jsize jcount = static_cast<jsize>(count);
+ jlongArray jsizes = env->NewLongArray(jcount);
+ if (jsizes == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ env->SetLongArrayRegion(jsizes, 0, jcount, results);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jsizes);
+ return nullptr;
+ }
+
+ return jsizes;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: compactRange
+ * Signature: (J[BI[BIJJ)V
+ */
+void Java_org_rocksdb_RocksDB_compactRange(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jbegin, jint jbegin_len,
+ jbyteArray jend, jint jend_len,
+ jlong jcompact_range_opts_handle,
+ jlong jcf_handle) {
+ jboolean has_exception = JNI_FALSE;
+
+ std::string str_begin;
+ if (jbegin_len > 0) {
+ str_begin = ROCKSDB_NAMESPACE::JniUtil::byteString<std::string>(
+ env, jbegin, jbegin_len,
+ [](const char* str, const size_t len) { return std::string(str, len); },
+ &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return;
+ }
+ }
+
+ std::string str_end;
+ if (jend_len > 0) {
+ str_end = ROCKSDB_NAMESPACE::JniUtil::byteString<std::string>(
+ env, jend, jend_len,
+ [](const char* str, const size_t len) { return std::string(str, len); },
+ &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return;
+ }
+ }
+
+ ROCKSDB_NAMESPACE::CompactRangeOptions* compact_range_opts = nullptr;
+ if (jcompact_range_opts_handle == 0) {
+ // NOTE: we DO own the pointer!
+ compact_range_opts = new ROCKSDB_NAMESPACE::CompactRangeOptions();
+ } else {
+ // NOTE: we do NOT own the pointer!
+ compact_range_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactRangeOptions*>(
+ jcompact_range_opts_handle);
+ }
+
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ }
+
+ ROCKSDB_NAMESPACE::Status s;
+ if (jbegin_len > 0 || jend_len > 0) {
+ const ROCKSDB_NAMESPACE::Slice begin(str_begin);
+ const ROCKSDB_NAMESPACE::Slice end(str_end);
+ s = db->CompactRange(*compact_range_opts, cf_handle, &begin, &end);
+ } else {
+ s = db->CompactRange(*compact_range_opts, cf_handle, nullptr, nullptr);
+ }
+
+ if (jcompact_range_opts_handle == 0) {
+ delete compact_range_opts;
+ }
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: setOptions
+ * Signature: (JJ[Ljava/lang/String;[Ljava/lang/String;)V
+ */
+void Java_org_rocksdb_RocksDB_setOptions(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle,
+ jobjectArray jkeys, jobjectArray jvalues) {
+ const jsize len = env->GetArrayLength(jkeys);
+ assert(len == env->GetArrayLength(jvalues));
+
+ std::unordered_map<std::string, std::string> options_map;
+ for (jsize i = 0; i < len; i++) {
+ jobject jobj_key = env->GetObjectArrayElement(jkeys, i);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ return;
+ }
+
+ jobject jobj_value = env->GetObjectArrayElement(jvalues, i);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jobj_key);
+ return;
+ }
+
+ jboolean has_exception = JNI_FALSE;
+ std::string s_key = ROCKSDB_NAMESPACE::JniUtil::copyStdString(
+ env, reinterpret_cast<jstring>(jobj_key), &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ env->DeleteLocalRef(jobj_value);
+ env->DeleteLocalRef(jobj_key);
+ return;
+ }
+
+ std::string s_value = ROCKSDB_NAMESPACE::JniUtil::copyStdString(
+ env, reinterpret_cast<jstring>(jobj_value), &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ env->DeleteLocalRef(jobj_value);
+ env->DeleteLocalRef(jobj_key);
+ return;
+ }
+
+ options_map[s_key] = s_value;
+
+ env->DeleteLocalRef(jobj_key);
+ env->DeleteLocalRef(jobj_value);
+ }
+
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ auto s = db->SetOptions(cf_handle, options_map);
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: setDBOptions
+ * Signature: (J[Ljava/lang/String;[Ljava/lang/String;)V
+ */
+void Java_org_rocksdb_RocksDB_setDBOptions(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jobjectArray jkeys, jobjectArray jvalues) {
+ const jsize len = env->GetArrayLength(jkeys);
+ assert(len == env->GetArrayLength(jvalues));
+
+ std::unordered_map<std::string, std::string> options_map;
+ for (jsize i = 0; i < len; i++) {
+ jobject jobj_key = env->GetObjectArrayElement(jkeys, i);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ return;
+ }
+
+ jobject jobj_value = env->GetObjectArrayElement(jvalues, i);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jobj_key);
+ return;
+ }
+
+ jboolean has_exception = JNI_FALSE;
+ std::string s_key = ROCKSDB_NAMESPACE::JniUtil::copyStdString(
+ env, reinterpret_cast<jstring>(jobj_key), &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ env->DeleteLocalRef(jobj_value);
+ env->DeleteLocalRef(jobj_key);
+ return;
+ }
+
+ std::string s_value = ROCKSDB_NAMESPACE::JniUtil::copyStdString(
+ env, reinterpret_cast<jstring>(jobj_value), &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ env->DeleteLocalRef(jobj_value);
+ env->DeleteLocalRef(jobj_key);
+ return;
+ }
+
+ options_map[s_key] = s_value;
+
+ env->DeleteLocalRef(jobj_key);
+ env->DeleteLocalRef(jobj_value);
+ }
+
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto s = db->SetDBOptions(options_map);
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: compactFiles
+ * Signature: (JJJ[Ljava/lang/String;IIJ)[Ljava/lang/String;
+ */
+jobjectArray Java_org_rocksdb_RocksDB_compactFiles(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcompaction_opts_handle,
+ jlong jcf_handle, jobjectArray jinput_file_names, jint joutput_level,
+ jint joutput_path_id, jlong jcompaction_job_info_handle) {
+ jboolean has_exception = JNI_FALSE;
+ const std::vector<std::string> input_file_names =
+ ROCKSDB_NAMESPACE::JniUtil::copyStrings(env, jinput_file_names,
+ &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return nullptr;
+ }
+
+ auto* compaction_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionOptions*>(
+ jcompaction_opts_handle);
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ }
+
+ ROCKSDB_NAMESPACE::CompactionJobInfo* compaction_job_info = nullptr;
+ if (jcompaction_job_info_handle != 0) {
+ compaction_job_info =
+ reinterpret_cast<ROCKSDB_NAMESPACE::CompactionJobInfo*>(
+ jcompaction_job_info_handle);
+ }
+
+ std::vector<std::string> output_file_names;
+ auto s = db->CompactFiles(*compaction_opts, cf_handle, input_file_names,
+ static_cast<int>(joutput_level), static_cast<int>(joutput_path_id),
+ &output_file_names, compaction_job_info);
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+
+ return ROCKSDB_NAMESPACE::JniUtil::toJavaStrings(env, &output_file_names);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: pauseBackgroundWork
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksDB_pauseBackgroundWork(
+ JNIEnv* env, jobject, jlong jdb_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto s = db->PauseBackgroundWork();
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: continueBackgroundWork
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksDB_continueBackgroundWork(
+ JNIEnv* env, jobject, jlong jdb_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto s = db->ContinueBackgroundWork();
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: enableAutoCompaction
+ * Signature: (J[J)V
+ */
+void Java_org_rocksdb_RocksDB_enableAutoCompaction(
+ JNIEnv* env, jobject, jlong jdb_handle, jlongArray jcf_handles) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ jboolean has_exception = JNI_FALSE;
+ const std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*> cf_handles =
+ ROCKSDB_NAMESPACE::JniUtil::fromJPointers<
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle>(env, jcf_handles,
+ &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return;
+ }
+ db->EnableAutoCompaction(cf_handles);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: numberLevels
+ * Signature: (JJ)I
+ */
+jint Java_org_rocksdb_RocksDB_numberLevels(
+ JNIEnv*, jobject, jlong jdb_handle, jlong jcf_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ }
+ return static_cast<jint>(db->NumberLevels(cf_handle));
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: maxMemCompactionLevel
+ * Signature: (JJ)I
+ */
+jint Java_org_rocksdb_RocksDB_maxMemCompactionLevel(
+ JNIEnv*, jobject, jlong jdb_handle, jlong jcf_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ }
+ return static_cast<jint>(db->MaxMemCompactionLevel(cf_handle));
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: level0StopWriteTrigger
+ * Signature: (JJ)I
+ */
+jint Java_org_rocksdb_RocksDB_level0StopWriteTrigger(
+ JNIEnv*, jobject, jlong jdb_handle, jlong jcf_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ }
+ return static_cast<jint>(db->Level0StopWriteTrigger(cf_handle));
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getName
+ * Signature: (J)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_RocksDB_getName(
+ JNIEnv* env, jobject, jlong jdb_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ std::string name = db->GetName();
+ return ROCKSDB_NAMESPACE::JniUtil::toJavaString(env, &name, false);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getEnv
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_RocksDB_getEnv(
+ JNIEnv*, jobject, jlong jdb_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ return reinterpret_cast<jlong>(db->GetEnv());
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: flush
+ * Signature: (JJ[J)V
+ */
+void Java_org_rocksdb_RocksDB_flush(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jflush_opts_handle,
+ jlongArray jcf_handles) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* flush_opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::FlushOptions*>(jflush_opts_handle);
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*> cf_handles;
+ if (jcf_handles == nullptr) {
+ cf_handles.push_back(db->DefaultColumnFamily());
+ } else {
+ jboolean has_exception = JNI_FALSE;
+ cf_handles = ROCKSDB_NAMESPACE::JniUtil::fromJPointers<
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle>(env, jcf_handles,
+ &has_exception);
+ if (has_exception) {
+ // exception occurred
+ return;
+ }
+ }
+ auto s = db->Flush(*flush_opts, cf_handles);
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: flushWal
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_RocksDB_flushWal(
+ JNIEnv* env, jobject, jlong jdb_handle, jboolean jsync) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto s = db->FlushWAL(jsync == JNI_TRUE);
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: syncWal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksDB_syncWal(
+ JNIEnv* env, jobject, jlong jdb_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto s = db->SyncWAL();
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getLatestSequenceNumber
+ * Signature: (J)V
+ */
+jlong Java_org_rocksdb_RocksDB_getLatestSequenceNumber(
+ JNIEnv*, jobject, jlong jdb_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ return db->GetLatestSequenceNumber();
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: setPreserveDeletesSequenceNumber
+ * Signature: (JJ)Z
+ */
+jboolean JNICALL Java_org_rocksdb_RocksDB_setPreserveDeletesSequenceNumber(
+ JNIEnv*, jobject, jlong jdb_handle, jlong jseq_number) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ if (db->SetPreserveDeletesSequenceNumber(
+ static_cast<uint64_t>(jseq_number))) {
+ return JNI_TRUE;
+ } else {
+ return JNI_FALSE;
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: disableFileDeletions
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksDB_disableFileDeletions(
+ JNIEnv* env, jobject, jlong jdb_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::Status s = db->DisableFileDeletions();
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: enableFileDeletions
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_RocksDB_enableFileDeletions(
+ JNIEnv* env, jobject, jlong jdb_handle, jboolean jforce) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::Status s = db->EnableFileDeletions(jforce);
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getLiveFiles
+ * Signature: (JZ)[Ljava/lang/String;
+ */
+jobjectArray Java_org_rocksdb_RocksDB_getLiveFiles(
+ JNIEnv* env, jobject, jlong jdb_handle, jboolean jflush_memtable) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ std::vector<std::string> live_files;
+ uint64_t manifest_file_size = 0;
+ auto s = db->GetLiveFiles(
+ live_files, &manifest_file_size, jflush_memtable == JNI_TRUE);
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+
+ // append the manifest_file_size to the vector
+ // for passing back to java
+ live_files.push_back(std::to_string(manifest_file_size));
+
+ return ROCKSDB_NAMESPACE::JniUtil::toJavaStrings(env, &live_files);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getSortedWalFiles
+ * Signature: (J)[Lorg/rocksdb/LogFile;
+ */
+jobjectArray Java_org_rocksdb_RocksDB_getSortedWalFiles(
+ JNIEnv* env, jobject, jlong jdb_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ std::vector<std::unique_ptr<ROCKSDB_NAMESPACE::LogFile>> sorted_wal_files;
+ auto s = db->GetSortedWalFiles(sorted_wal_files);
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+
+ // convert to Java type
+ const jsize jlen = static_cast<jsize>(sorted_wal_files.size());
+ jobjectArray jsorted_wal_files = env->NewObjectArray(
+ jlen, ROCKSDB_NAMESPACE::LogFileJni::getJClass(env), nullptr);
+ if(jsorted_wal_files == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ jsize i = 0;
+ for (auto it = sorted_wal_files.begin(); it != sorted_wal_files.end(); ++it) {
+ jobject jlog_file =
+ ROCKSDB_NAMESPACE::LogFileJni::fromCppLogFile(env, it->get());
+ if (jlog_file == nullptr) {
+ // exception occurred
+ env->DeleteLocalRef(jsorted_wal_files);
+ return nullptr;
+ }
+
+ env->SetObjectArrayElement(jsorted_wal_files, i++, jlog_file);
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ env->DeleteLocalRef(jlog_file);
+ env->DeleteLocalRef(jsorted_wal_files);
+ return nullptr;
+ }
+
+ env->DeleteLocalRef(jlog_file);
+ }
+
+ return jsorted_wal_files;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getUpdatesSince
+ * Signature: (JJ)J
+ */
+jlong Java_org_rocksdb_RocksDB_getUpdatesSince(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jsequence_number) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::SequenceNumber sequence_number =
+ static_cast<ROCKSDB_NAMESPACE::SequenceNumber>(jsequence_number);
+ std::unique_ptr<ROCKSDB_NAMESPACE::TransactionLogIterator> iter;
+ ROCKSDB_NAMESPACE::Status s = db->GetUpdatesSince(sequence_number, &iter);
+ if (s.ok()) {
+ return reinterpret_cast<jlong>(iter.release());
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return 0;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: deleteFile
+ * Signature: (JLjava/lang/String;)V
+ */
+void Java_org_rocksdb_RocksDB_deleteFile(
+ JNIEnv* env, jobject, jlong jdb_handle, jstring jname) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ jboolean has_exception = JNI_FALSE;
+ std::string name =
+ ROCKSDB_NAMESPACE::JniUtil::copyStdString(env, jname, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return;
+ }
+ db->DeleteFile(name);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getLiveFilesMetaData
+ * Signature: (J)[Lorg/rocksdb/LiveFileMetaData;
+ */
+jobjectArray Java_org_rocksdb_RocksDB_getLiveFilesMetaData(
+ JNIEnv* env, jobject, jlong jdb_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ std::vector<ROCKSDB_NAMESPACE::LiveFileMetaData> live_files_meta_data;
+ db->GetLiveFilesMetaData(&live_files_meta_data);
+
+ // convert to Java type
+ const jsize jlen = static_cast<jsize>(live_files_meta_data.size());
+ jobjectArray jlive_files_meta_data = env->NewObjectArray(
+ jlen, ROCKSDB_NAMESPACE::LiveFileMetaDataJni::getJClass(env), nullptr);
+ if(jlive_files_meta_data == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ jsize i = 0;
+ for (auto it = live_files_meta_data.begin(); it != live_files_meta_data.end(); ++it) {
+ jobject jlive_file_meta_data =
+ ROCKSDB_NAMESPACE::LiveFileMetaDataJni::fromCppLiveFileMetaData(env,
+ &(*it));
+ if (jlive_file_meta_data == nullptr) {
+ // exception occurred
+ env->DeleteLocalRef(jlive_files_meta_data);
+ return nullptr;
+ }
+
+ env->SetObjectArrayElement(jlive_files_meta_data, i++, jlive_file_meta_data);
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ env->DeleteLocalRef(jlive_file_meta_data);
+ env->DeleteLocalRef(jlive_files_meta_data);
+ return nullptr;
+ }
+
+ env->DeleteLocalRef(jlive_file_meta_data);
+ }
+
+ return jlive_files_meta_data;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getColumnFamilyMetaData
+ * Signature: (JJ)Lorg/rocksdb/ColumnFamilyMetaData;
+ */
+jobject Java_org_rocksdb_RocksDB_getColumnFamilyMetaData(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ }
+ ROCKSDB_NAMESPACE::ColumnFamilyMetaData cf_metadata;
+ db->GetColumnFamilyMetaData(cf_handle, &cf_metadata);
+ return ROCKSDB_NAMESPACE::ColumnFamilyMetaDataJni::
+ fromCppColumnFamilyMetaData(env, &cf_metadata);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: ingestExternalFile
+ * Signature: (JJ[Ljava/lang/String;IJ)V
+ */
+void Java_org_rocksdb_RocksDB_ingestExternalFile(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle,
+ jobjectArray jfile_path_list, jint jfile_path_list_len,
+ jlong jingest_external_file_options_handle) {
+ jboolean has_exception = JNI_FALSE;
+ std::vector<std::string> file_path_list =
+ ROCKSDB_NAMESPACE::JniUtil::copyStrings(
+ env, jfile_path_list, jfile_path_list_len, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return;
+ }
+
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* column_family =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ auto* ifo = reinterpret_cast<ROCKSDB_NAMESPACE::IngestExternalFileOptions*>(
+ jingest_external_file_options_handle);
+ ROCKSDB_NAMESPACE::Status s =
+ db->IngestExternalFile(column_family, file_path_list, *ifo);
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: verifyChecksum
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksDB_verifyChecksum(
+ JNIEnv* env, jobject, jlong jdb_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto s = db->VerifyChecksum();
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getDefaultColumnFamily
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_RocksDB_getDefaultColumnFamily(
+ JNIEnv*, jobject, jlong jdb_handle) {
+ auto* db_handle = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* cf_handle = db_handle->DefaultColumnFamily();
+ return reinterpret_cast<jlong>(cf_handle);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getPropertiesOfAllTables
+ * Signature: (JJ)Ljava/util/Map;
+ */
+jobject Java_org_rocksdb_RocksDB_getPropertiesOfAllTables(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ }
+ ROCKSDB_NAMESPACE::TablePropertiesCollection table_properties_collection;
+ auto s = db->GetPropertiesOfAllTables(cf_handle,
+ &table_properties_collection);
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+
+ // convert to Java type
+ jobject jhash_map = ROCKSDB_NAMESPACE::HashMapJni::construct(
+ env, static_cast<uint32_t>(table_properties_collection.size()));
+ if (jhash_map == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const ROCKSDB_NAMESPACE::HashMapJni::FnMapKV<
+ const std::string,
+ const std::shared_ptr<const ROCKSDB_NAMESPACE::TableProperties>, jobject,
+ jobject>
+ fn_map_kv =
+ [env](const std::pair<const std::string,
+ const std::shared_ptr<
+ const ROCKSDB_NAMESPACE::TableProperties>>&
+ kv) {
+ jstring jkey = ROCKSDB_NAMESPACE::JniUtil::toJavaString(
+ env, &(kv.first), false);
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ jobject jtable_properties =
+ ROCKSDB_NAMESPACE::TablePropertiesJni::fromCppTableProperties(
+ env, *(kv.second.get()));
+ if (jtable_properties == nullptr) {
+ // an error occurred
+ env->DeleteLocalRef(jkey);
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ return std::unique_ptr<std::pair<jobject, jobject>>(
+ new std::pair<jobject, jobject>(
+ static_cast<jobject>(jkey),
+ static_cast<jobject>(jtable_properties)));
+ };
+
+ if (!ROCKSDB_NAMESPACE::HashMapJni::putAll(
+ env, jhash_map, table_properties_collection.begin(),
+ table_properties_collection.end(), fn_map_kv)) {
+ // exception occurred
+ return nullptr;
+ }
+
+ return jhash_map;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getPropertiesOfTablesInRange
+ * Signature: (JJ[J)Ljava/util/Map;
+ */
+jobject Java_org_rocksdb_RocksDB_getPropertiesOfTablesInRange(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle,
+ jlongArray jrange_slice_handles) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ }
+ const jsize jlen = env->GetArrayLength(jrange_slice_handles);
+ jboolean jrange_slice_handles_is_copy = JNI_FALSE;
+ jlong *jrange_slice_handle = env->GetLongArrayElements(
+ jrange_slice_handles, &jrange_slice_handles_is_copy);
+ if (jrange_slice_handle == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const size_t ranges_len = static_cast<size_t>(jlen / 2);
+ auto ranges = std::unique_ptr<ROCKSDB_NAMESPACE::Range[]>(
+ new ROCKSDB_NAMESPACE::Range[ranges_len]);
+ for (jsize i = 0, j = 0; i < jlen; ++i) {
+ auto* start =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(jrange_slice_handle[i]);
+ auto* limit =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(jrange_slice_handle[++i]);
+ ranges[j++] = ROCKSDB_NAMESPACE::Range(*start, *limit);
+ }
+
+ ROCKSDB_NAMESPACE::TablePropertiesCollection table_properties_collection;
+ auto s = db->GetPropertiesOfTablesInRange(
+ cf_handle, ranges.get(), ranges_len, &table_properties_collection);
+ if (!s.ok()) {
+ // error occurred
+ env->ReleaseLongArrayElements(jrange_slice_handles, jrange_slice_handle, JNI_ABORT);
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+
+ // cleanup
+ env->ReleaseLongArrayElements(jrange_slice_handles, jrange_slice_handle, JNI_ABORT);
+
+ return jrange_slice_handles;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: suggestCompactRange
+ * Signature: (JJ)[J
+ */
+jlongArray Java_org_rocksdb_RocksDB_suggestCompactRange(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ }
+ auto* begin = new ROCKSDB_NAMESPACE::Slice();
+ auto* end = new ROCKSDB_NAMESPACE::Slice();
+ auto s = db->SuggestCompactRange(cf_handle, begin, end);
+ if (!s.ok()) {
+ // error occurred
+ delete begin;
+ delete end;
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+
+ jlongArray jslice_handles = env->NewLongArray(2);
+ if (jslice_handles == nullptr) {
+ // exception thrown: OutOfMemoryError
+ delete begin;
+ delete end;
+ return nullptr;
+ }
+
+ jlong slice_handles[2];
+ slice_handles[0] = reinterpret_cast<jlong>(begin);
+ slice_handles[1] = reinterpret_cast<jlong>(end);
+ env->SetLongArrayRegion(jslice_handles, 0, 2, slice_handles);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ delete begin;
+ delete end;
+ env->DeleteLocalRef(jslice_handles);
+ return nullptr;
+ }
+
+ return jslice_handles;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: promoteL0
+ * Signature: (JJI)V
+ */
+void Java_org_rocksdb_RocksDB_promoteL0(
+ JNIEnv*, jobject, jlong jdb_handle, jlong jcf_handle, jint jtarget_level) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ }
+ db->PromoteL0(cf_handle, static_cast<int>(jtarget_level));
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: startTrace
+ * Signature: (JJJ)V
+ */
+void Java_org_rocksdb_RocksDB_startTrace(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jmax_trace_file_size,
+ jlong jtrace_writer_jnicallback_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ ROCKSDB_NAMESPACE::TraceOptions trace_options;
+ trace_options.max_trace_file_size =
+ static_cast<uint64_t>(jmax_trace_file_size);
+ // transfer ownership of trace writer from Java to C++
+ auto trace_writer =
+ std::unique_ptr<ROCKSDB_NAMESPACE::TraceWriterJniCallback>(
+ reinterpret_cast<ROCKSDB_NAMESPACE::TraceWriterJniCallback*>(
+ jtrace_writer_jnicallback_handle));
+ auto s = db->StartTrace(trace_options, std::move(trace_writer));
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: endTrace
+ * Signature: (J)V
+ */
+JNIEXPORT void JNICALL Java_org_rocksdb_RocksDB_endTrace(
+ JNIEnv* env, jobject, jlong jdb_handle) {
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto s = db->EndTrace();
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: destroyDB
+ * Signature: (Ljava/lang/String;J)V
+ */
+void Java_org_rocksdb_RocksDB_destroyDB(
+ JNIEnv* env, jclass, jstring jdb_path, jlong joptions_handle) {
+ const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
+ if (db_path == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(joptions_handle);
+ if (options == nullptr) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::InvalidArgument("Invalid Options."));
+ }
+
+ ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::DestroyDB(db_path, *options);
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+bool get_slice_helper(JNIEnv* env, jobjectArray ranges, jsize index,
+ std::unique_ptr<ROCKSDB_NAMESPACE::Slice>& slice,
+ std::vector<std::unique_ptr<jbyte[]>>& ranges_to_free) {
+ jobject jArray = env->GetObjectArrayElement(ranges, index);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ return false;
+ }
+
+ if (jArray == nullptr) {
+ return true;
+ }
+
+ jbyteArray jba = reinterpret_cast<jbyteArray>(jArray);
+ jsize len_ba = env->GetArrayLength(jba);
+ ranges_to_free.push_back(std::unique_ptr<jbyte[]>(new jbyte[len_ba]));
+ env->GetByteArrayRegion(jba, 0, len_ba, ranges_to_free.back().get());
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jArray);
+ return false;
+ }
+ env->DeleteLocalRef(jArray);
+ slice.reset(new ROCKSDB_NAMESPACE::Slice(
+ reinterpret_cast<char*>(ranges_to_free.back().get()), len_ba));
+ return true;
+}
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: deleteFilesInRanges
+ * Signature: (JJLjava/util/List;Z)V
+ */
+JNIEXPORT void JNICALL Java_org_rocksdb_RocksDB_deleteFilesInRanges(
+ JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jcf_handle,
+ jobjectArray ranges, jboolean include_end) {
+ jsize length = env->GetArrayLength(ranges);
+
+ std::vector<ROCKSDB_NAMESPACE::RangePtr> rangesVector;
+ std::vector<std::unique_ptr<ROCKSDB_NAMESPACE::Slice>> slices;
+ std::vector<std::unique_ptr<jbyte[]>> ranges_to_free;
+ for (jsize i = 0; (i + 1) < length; i += 2) {
+ slices.push_back(std::unique_ptr<ROCKSDB_NAMESPACE::Slice>());
+ if (!get_slice_helper(env, ranges, i, slices.back(), ranges_to_free)) {
+ // exception thrown
+ return;
+ }
+
+ slices.push_back(std::unique_ptr<ROCKSDB_NAMESPACE::Slice>());
+ if (!get_slice_helper(env, ranges, i + 1, slices.back(), ranges_to_free)) {
+ // exception thrown
+ return;
+ }
+
+ rangesVector.push_back(ROCKSDB_NAMESPACE::RangePtr(
+ slices[slices.size() - 2].get(), slices[slices.size() - 1].get()));
+ }
+
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* column_family =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+
+ ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::DeleteFilesInRanges(
+ db, column_family == nullptr ? db->DefaultColumnFamily() : column_family,
+ rangesVector.data(), rangesVector.size(), include_end);
+
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
diff --git a/src/rocksdb/java/rocksjni/slice.cc b/src/rocksdb/java/rocksjni/slice.cc
new file mode 100644
index 000000000..d9e58992b
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/slice.cc
@@ -0,0 +1,360 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::Slice.
+
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string>
+
+#include "include/org_rocksdb_AbstractSlice.h"
+#include "include/org_rocksdb_DirectSlice.h"
+#include "include/org_rocksdb_Slice.h"
+#include "rocksdb/slice.h"
+#include "rocksjni/portal.h"
+
+// <editor-fold desc="org.rocksdb.AbstractSlice>
+
+/*
+ * Class: org_rocksdb_AbstractSlice
+ * Method: createNewSliceFromString
+ * Signature: (Ljava/lang/String;)J
+ */
+jlong Java_org_rocksdb_AbstractSlice_createNewSliceFromString(JNIEnv* env,
+ jclass /*jcls*/,
+ jstring jstr) {
+ const auto* str = env->GetStringUTFChars(jstr, nullptr);
+ if (str == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return 0;
+ }
+
+ const size_t len = strlen(str);
+
+ // NOTE: buf will be deleted in the
+ // Java_org_rocksdb_Slice_disposeInternalBuf or
+ // or Java_org_rocksdb_DirectSlice_disposeInternalBuf methods
+ char* buf = new char[len + 1];
+ memcpy(buf, str, len);
+ buf[len] = 0;
+ env->ReleaseStringUTFChars(jstr, str);
+
+ const auto* slice = new ROCKSDB_NAMESPACE::Slice(buf);
+ return reinterpret_cast<jlong>(slice);
+}
+
+/*
+ * Class: org_rocksdb_AbstractSlice
+ * Method: size0
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_AbstractSlice_size0(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong handle) {
+ const auto* slice = reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(handle);
+ return static_cast<jint>(slice->size());
+}
+
+/*
+ * Class: org_rocksdb_AbstractSlice
+ * Method: empty0
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_AbstractSlice_empty0(JNIEnv* /*env*/,
+ jobject /*jobj*/, jlong handle) {
+ const auto* slice = reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(handle);
+ return slice->empty();
+}
+
+/*
+ * Class: org_rocksdb_AbstractSlice
+ * Method: toString0
+ * Signature: (JZ)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_AbstractSlice_toString0(JNIEnv* env, jobject /*jobj*/,
+ jlong handle, jboolean hex) {
+ const auto* slice = reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(handle);
+ const std::string s = slice->ToString(hex);
+ return env->NewStringUTF(s.c_str());
+}
+
+/*
+ * Class: org_rocksdb_AbstractSlice
+ * Method: compare0
+ * Signature: (JJ)I;
+ */
+jint Java_org_rocksdb_AbstractSlice_compare0(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong handle, jlong otherHandle) {
+ const auto* slice = reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(handle);
+ const auto* otherSlice =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(otherHandle);
+ return slice->compare(*otherSlice);
+}
+
+/*
+ * Class: org_rocksdb_AbstractSlice
+ * Method: startsWith0
+ * Signature: (JJ)Z;
+ */
+jboolean Java_org_rocksdb_AbstractSlice_startsWith0(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle,
+ jlong otherHandle) {
+ const auto* slice = reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(handle);
+ const auto* otherSlice =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(otherHandle);
+ return slice->starts_with(*otherSlice);
+}
+
+/*
+ * Class: org_rocksdb_AbstractSlice
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_AbstractSlice_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ delete reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(handle);
+}
+
+// </editor-fold>
+
+// <editor-fold desc="org.rocksdb.Slice>
+
+/*
+ * Class: org_rocksdb_Slice
+ * Method: createNewSlice0
+ * Signature: ([BI)J
+ */
+jlong Java_org_rocksdb_Slice_createNewSlice0(JNIEnv* env, jclass /*jcls*/,
+ jbyteArray data, jint offset) {
+ const jsize dataSize = env->GetArrayLength(data);
+ const int len = dataSize - offset;
+
+ // NOTE: buf will be deleted in the Java_org_rocksdb_Slice_disposeInternalBuf
+ // method
+ jbyte* buf = new jbyte[len];
+ env->GetByteArrayRegion(data, offset, len, buf);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ return 0;
+ }
+
+ const auto* slice = new ROCKSDB_NAMESPACE::Slice((const char*)buf, len);
+ return reinterpret_cast<jlong>(slice);
+}
+
+/*
+ * Class: org_rocksdb_Slice
+ * Method: createNewSlice1
+ * Signature: ([B)J
+ */
+jlong Java_org_rocksdb_Slice_createNewSlice1(JNIEnv* env, jclass /*jcls*/,
+ jbyteArray data) {
+ jbyte* ptrData = env->GetByteArrayElements(data, nullptr);
+ if (ptrData == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return 0;
+ }
+ const int len = env->GetArrayLength(data) + 1;
+
+ // NOTE: buf will be deleted in the Java_org_rocksdb_Slice_disposeInternalBuf
+ // method
+ char* buf = new char[len];
+ memcpy(buf, ptrData, len - 1);
+ buf[len - 1] = '\0';
+
+ const auto* slice = new ROCKSDB_NAMESPACE::Slice(buf, len - 1);
+
+ env->ReleaseByteArrayElements(data, ptrData, JNI_ABORT);
+
+ return reinterpret_cast<jlong>(slice);
+}
+
+/*
+ * Class: org_rocksdb_Slice
+ * Method: data0
+ * Signature: (J)[B
+ */
+jbyteArray Java_org_rocksdb_Slice_data0(JNIEnv* env, jobject /*jobj*/,
+ jlong handle) {
+ const auto* slice = reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(handle);
+ const jsize len = static_cast<jsize>(slice->size());
+ const jbyteArray data = env->NewByteArray(len);
+ if (data == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ env->SetByteArrayRegion(
+ data, 0, len,
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(slice->data())));
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(data);
+ return nullptr;
+ }
+
+ return data;
+}
+
+/*
+ * Class: org_rocksdb_Slice
+ * Method: clear0
+ * Signature: (JZJ)V
+ */
+void Java_org_rocksdb_Slice_clear0(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong handle, jboolean shouldRelease,
+ jlong internalBufferOffset) {
+ auto* slice = reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(handle);
+ if (shouldRelease == JNI_TRUE) {
+ const char* buf = slice->data_ - internalBufferOffset;
+ delete[] buf;
+ }
+ slice->clear();
+}
+
+/*
+ * Class: org_rocksdb_Slice
+ * Method: removePrefix0
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_Slice_removePrefix0(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong handle, jint length) {
+ auto* slice = reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(handle);
+ slice->remove_prefix(length);
+}
+
+/*
+ * Class: org_rocksdb_Slice
+ * Method: disposeInternalBuf
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Slice_disposeInternalBuf(JNIEnv* /*env*/,
+ jobject /*jobj*/, jlong handle,
+ jlong internalBufferOffset) {
+ const auto* slice = reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(handle);
+ const char* buf = slice->data_ - internalBufferOffset;
+ delete[] buf;
+}
+
+// </editor-fold>
+
+// <editor-fold desc="org.rocksdb.DirectSlice>
+
+/*
+ * Class: org_rocksdb_DirectSlice
+ * Method: createNewDirectSlice0
+ * Signature: (Ljava/nio/ByteBuffer;I)J
+ */
+jlong Java_org_rocksdb_DirectSlice_createNewDirectSlice0(JNIEnv* env,
+ jclass /*jcls*/,
+ jobject data,
+ jint length) {
+ assert(data != nullptr);
+ void* data_addr = env->GetDirectBufferAddress(data);
+ if (data_addr == nullptr) {
+ // error: memory region is undefined, given object is not a direct
+ // java.nio.Buffer, or JNI access to direct buffers is not supported by JVM
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::InvalidArgument(
+ "Could not access DirectBuffer"));
+ return 0;
+ }
+
+ const auto* ptrData = reinterpret_cast<char*>(data_addr);
+ const auto* slice = new ROCKSDB_NAMESPACE::Slice(ptrData, length);
+ return reinterpret_cast<jlong>(slice);
+}
+
+/*
+ * Class: org_rocksdb_DirectSlice
+ * Method: createNewDirectSlice1
+ * Signature: (Ljava/nio/ByteBuffer;)J
+ */
+jlong Java_org_rocksdb_DirectSlice_createNewDirectSlice1(JNIEnv* env,
+ jclass /*jcls*/,
+ jobject data) {
+ void* data_addr = env->GetDirectBufferAddress(data);
+ if (data_addr == nullptr) {
+ // error: memory region is undefined, given object is not a direct
+ // java.nio.Buffer, or JNI access to direct buffers is not supported by JVM
+ ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(
+ env, ROCKSDB_NAMESPACE::Status::InvalidArgument(
+ "Could not access DirectBuffer"));
+ return 0;
+ }
+
+ const auto* ptrData = reinterpret_cast<char*>(data_addr);
+ const auto* slice = new ROCKSDB_NAMESPACE::Slice(ptrData);
+ return reinterpret_cast<jlong>(slice);
+}
+
+/*
+ * Class: org_rocksdb_DirectSlice
+ * Method: data0
+ * Signature: (J)Ljava/lang/Object;
+ */
+jobject Java_org_rocksdb_DirectSlice_data0(JNIEnv* env, jobject /*jobj*/,
+ jlong handle) {
+ const auto* slice = reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(handle);
+ return env->NewDirectByteBuffer(const_cast<char*>(slice->data()),
+ slice->size());
+}
+
+/*
+ * Class: org_rocksdb_DirectSlice
+ * Method: get0
+ * Signature: (JI)B
+ */
+jbyte Java_org_rocksdb_DirectSlice_get0(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong handle, jint offset) {
+ const auto* slice = reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(handle);
+ return (*slice)[offset];
+}
+
+/*
+ * Class: org_rocksdb_DirectSlice
+ * Method: clear0
+ * Signature: (JZJ)V
+ */
+void Java_org_rocksdb_DirectSlice_clear0(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong handle, jboolean shouldRelease,
+ jlong internalBufferOffset) {
+ auto* slice = reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(handle);
+ if (shouldRelease == JNI_TRUE) {
+ const char* buf = slice->data_ - internalBufferOffset;
+ delete[] buf;
+ }
+ slice->clear();
+}
+
+/*
+ * Class: org_rocksdb_DirectSlice
+ * Method: removePrefix0
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_DirectSlice_removePrefix0(JNIEnv* /*env*/,
+ jobject /*jobj*/, jlong handle,
+ jint length) {
+ auto* slice = reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(handle);
+ slice->remove_prefix(length);
+}
+
+/*
+ * Class: org_rocksdb_DirectSlice
+ * Method: disposeInternalBuf
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DirectSlice_disposeInternalBuf(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong handle,
+ jlong internalBufferOffset) {
+ const auto* slice = reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(handle);
+ const char* buf = slice->data_ - internalBufferOffset;
+ delete[] buf;
+}
+
+// </editor-fold>
diff --git a/src/rocksdb/java/rocksjni/snapshot.cc b/src/rocksdb/java/rocksjni/snapshot.cc
new file mode 100644
index 000000000..2a1265a58
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/snapshot.cc
@@ -0,0 +1,27 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++.
+
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "include/org_rocksdb_Snapshot.h"
+#include "rocksdb/db.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_Snapshot
+ * Method: getSequenceNumber
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Snapshot_getSequenceNumber(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jsnapshot_handle) {
+ auto* snapshot =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Snapshot*>(jsnapshot_handle);
+ return snapshot->GetSequenceNumber();
+}
diff --git a/src/rocksdb/java/rocksjni/sst_file_manager.cc b/src/rocksdb/java/rocksjni/sst_file_manager.cc
new file mode 100644
index 000000000..e7445d80f
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/sst_file_manager.cc
@@ -0,0 +1,247 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling C++ ROCKSDB_NAMESPACE::SstFileManager methods
+// from Java side.
+
+#include <jni.h>
+#include <memory>
+
+#include "include/org_rocksdb_SstFileManager.h"
+#include "rocksdb/sst_file_manager.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_SstFileManager
+ * Method: newSstFileManager
+ * Signature: (JJJDJ)J
+ */
+jlong Java_org_rocksdb_SstFileManager_newSstFileManager(
+ JNIEnv* jnienv, jclass /*jcls*/, jlong jenv_handle, jlong jlogger_handle,
+ jlong jrate_bytes, jdouble jmax_trash_db_ratio,
+ jlong jmax_delete_chunk_bytes) {
+ auto* env = reinterpret_cast<ROCKSDB_NAMESPACE::Env*>(jenv_handle);
+ ROCKSDB_NAMESPACE::Status s;
+ ROCKSDB_NAMESPACE::SstFileManager* sst_file_manager = nullptr;
+
+ if (jlogger_handle != 0) {
+ auto* sptr_logger =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Logger>*>(
+ jlogger_handle);
+ sst_file_manager = ROCKSDB_NAMESPACE::NewSstFileManager(
+ env, *sptr_logger, "", jrate_bytes, true, &s, jmax_trash_db_ratio,
+ jmax_delete_chunk_bytes);
+ } else {
+ sst_file_manager = ROCKSDB_NAMESPACE::NewSstFileManager(
+ env, nullptr, "", jrate_bytes, true, &s, jmax_trash_db_ratio,
+ jmax_delete_chunk_bytes);
+ }
+
+ if (!s.ok()) {
+ if (sst_file_manager != nullptr) {
+ delete sst_file_manager;
+ }
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(jnienv, s);
+ }
+ auto* sptr_sst_file_manager =
+ new std::shared_ptr<ROCKSDB_NAMESPACE::SstFileManager>(sst_file_manager);
+
+ return reinterpret_cast<jlong>(sptr_sst_file_manager);
+}
+
+/*
+ * Class: org_rocksdb_SstFileManager
+ * Method: setMaxAllowedSpaceUsage
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_SstFileManager_setMaxAllowedSpaceUsage(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ jlong jmax_allowed_space) {
+ auto* sptr_sst_file_manager =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::SstFileManager>*>(
+ jhandle);
+ sptr_sst_file_manager->get()->SetMaxAllowedSpaceUsage(jmax_allowed_space);
+}
+
+/*
+ * Class: org_rocksdb_SstFileManager
+ * Method: setCompactionBufferSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_SstFileManager_setCompactionBufferSize(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ jlong jcompaction_buffer_size) {
+ auto* sptr_sst_file_manager =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::SstFileManager>*>(
+ jhandle);
+ sptr_sst_file_manager->get()->SetCompactionBufferSize(
+ jcompaction_buffer_size);
+}
+
+/*
+ * Class: org_rocksdb_SstFileManager
+ * Method: isMaxAllowedSpaceReached
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_SstFileManager_isMaxAllowedSpaceReached(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* sptr_sst_file_manager =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::SstFileManager>*>(
+ jhandle);
+ return sptr_sst_file_manager->get()->IsMaxAllowedSpaceReached();
+}
+
+/*
+ * Class: org_rocksdb_SstFileManager
+ * Method: isMaxAllowedSpaceReachedIncludingCompactions
+ * Signature: (J)Z
+ */
+jboolean
+Java_org_rocksdb_SstFileManager_isMaxAllowedSpaceReachedIncludingCompactions(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* sptr_sst_file_manager =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::SstFileManager>*>(
+ jhandle);
+ return sptr_sst_file_manager->get()
+ ->IsMaxAllowedSpaceReachedIncludingCompactions();
+}
+
+/*
+ * Class: org_rocksdb_SstFileManager
+ * Method: getTotalSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_SstFileManager_getTotalSize(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* sptr_sst_file_manager =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::SstFileManager>*>(
+ jhandle);
+ return sptr_sst_file_manager->get()->GetTotalSize();
+}
+
+/*
+ * Class: org_rocksdb_SstFileManager
+ * Method: getTrackedFiles
+ * Signature: (J)Ljava/util/Map;
+ */
+jobject Java_org_rocksdb_SstFileManager_getTrackedFiles(JNIEnv* env,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* sptr_sst_file_manager =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::SstFileManager>*>(
+ jhandle);
+ auto tracked_files = sptr_sst_file_manager->get()->GetTrackedFiles();
+
+ // TODO(AR) could refactor to share code with
+ // ROCKSDB_NAMESPACE::HashMapJni::fromCppMap(env, tracked_files);
+
+ const jobject jtracked_files = ROCKSDB_NAMESPACE::HashMapJni::construct(
+ env, static_cast<uint32_t>(tracked_files.size()));
+ if (jtracked_files == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const ROCKSDB_NAMESPACE::HashMapJni::FnMapKV<const std::string,
+ const uint64_t, jobject, jobject>
+ fn_map_kv =
+ [env](const std::pair<const std::string, const uint64_t>& pair) {
+ const jstring jtracked_file_path =
+ env->NewStringUTF(pair.first.c_str());
+ if (jtracked_file_path == nullptr) {
+ // an error occurred
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+ const jobject jtracked_file_size =
+ ROCKSDB_NAMESPACE::LongJni::valueOf(env, pair.second);
+ if (jtracked_file_size == nullptr) {
+ // an error occurred
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+ return std::unique_ptr<std::pair<jobject, jobject>>(
+ new std::pair<jobject, jobject>(jtracked_file_path,
+ jtracked_file_size));
+ };
+
+ if (!ROCKSDB_NAMESPACE::HashMapJni::putAll(env, jtracked_files,
+ tracked_files.begin(),
+ tracked_files.end(), fn_map_kv)) {
+ // exception occcurred
+ return nullptr;
+ }
+
+ return jtracked_files;
+}
+
+/*
+ * Class: org_rocksdb_SstFileManager
+ * Method: getDeleteRateBytesPerSecond
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_SstFileManager_getDeleteRateBytesPerSecond(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* sptr_sst_file_manager =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::SstFileManager>*>(
+ jhandle);
+ return sptr_sst_file_manager->get()->GetDeleteRateBytesPerSecond();
+}
+
+/*
+ * Class: org_rocksdb_SstFileManager
+ * Method: setDeleteRateBytesPerSecond
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_SstFileManager_setDeleteRateBytesPerSecond(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jlong jdelete_rate) {
+ auto* sptr_sst_file_manager =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::SstFileManager>*>(
+ jhandle);
+ sptr_sst_file_manager->get()->SetDeleteRateBytesPerSecond(jdelete_rate);
+}
+
+/*
+ * Class: org_rocksdb_SstFileManager
+ * Method: getMaxTrashDBRatio
+ * Signature: (J)D
+ */
+jdouble Java_org_rocksdb_SstFileManager_getMaxTrashDBRatio(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* sptr_sst_file_manager =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::SstFileManager>*>(
+ jhandle);
+ return sptr_sst_file_manager->get()->GetMaxTrashDBRatio();
+}
+
+/*
+ * Class: org_rocksdb_SstFileManager
+ * Method: setMaxTrashDBRatio
+ * Signature: (JD)V
+ */
+void Java_org_rocksdb_SstFileManager_setMaxTrashDBRatio(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle,
+ jdouble jratio) {
+ auto* sptr_sst_file_manager =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::SstFileManager>*>(
+ jhandle);
+ sptr_sst_file_manager->get()->SetMaxTrashDBRatio(jratio);
+}
+
+/*
+ * Class: org_rocksdb_SstFileManager
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_SstFileManager_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* sptr_sst_file_manager =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::SstFileManager>*>(
+ jhandle);
+ delete sptr_sst_file_manager;
+}
diff --git a/src/rocksdb/java/rocksjni/sst_file_reader_iterator.cc b/src/rocksdb/java/rocksjni/sst_file_reader_iterator.cc
new file mode 100644
index 000000000..29cf2c5da
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/sst_file_reader_iterator.cc
@@ -0,0 +1,253 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling c++ ROCKSDB_NAMESPACE::Iterator methods from Java side.
+
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "include/org_rocksdb_SstFileReaderIterator.h"
+#include "rocksdb/iterator.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_SstFileReaderIterator
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_SstFileReaderIterator_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ assert(it != nullptr);
+ delete it;
+}
+
+/*
+ * Class: org_rocksdb_SstFileReaderIterator
+ * Method: isValid0
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_SstFileReaderIterator_isValid0(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle)->Valid();
+}
+
+/*
+ * Class: org_rocksdb_SstFileReaderIterator
+ * Method: seekToFirst0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_SstFileReaderIterator_seekToFirst0(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle)->SeekToFirst();
+}
+
+/*
+ * Class: org_rocksdb_SstFileReaderIterator
+ * Method: seekToLast0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_SstFileReaderIterator_seekToLast0(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle)->SeekToLast();
+}
+
+/*
+ * Class: org_rocksdb_SstFileReaderIterator
+ * Method: next0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_SstFileReaderIterator_next0(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle)->Next();
+}
+
+/*
+ * Class: org_rocksdb_SstFileReaderIterator
+ * Method: prev0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_SstFileReaderIterator_prev0(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle)->Prev();
+}
+
+/*
+ * Class: org_rocksdb_SstFileReaderIterator
+ * Method: seek0
+ * Signature: (J[BI)V
+ */
+void Java_org_rocksdb_SstFileReaderIterator_seek0(JNIEnv* env, jobject /*jobj*/,
+ jlong handle,
+ jbyteArray jtarget,
+ jint jtarget_len) {
+ jbyte* target = env->GetByteArrayElements(jtarget, nullptr);
+ if (target == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::Slice target_slice(reinterpret_cast<char*>(target),
+ jtarget_len);
+
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ it->Seek(target_slice);
+
+ env->ReleaseByteArrayElements(jtarget, target, JNI_ABORT);
+}
+
+/*
+ * Class: org_rocksdb_SstFileReaderIterator
+ * Method: seekForPrev0
+ * Signature: (J[BI)V
+ */
+void Java_org_rocksdb_SstFileReaderIterator_seekForPrev0(JNIEnv* env,
+ jobject /*jobj*/,
+ jlong handle,
+ jbyteArray jtarget,
+ jint jtarget_len) {
+ jbyte* target = env->GetByteArrayElements(jtarget, nullptr);
+ if (target == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::Slice target_slice(reinterpret_cast<char*>(target),
+ jtarget_len);
+
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ it->SeekForPrev(target_slice);
+
+ env->ReleaseByteArrayElements(jtarget, target, JNI_ABORT);
+}
+
+/*
+ * Class: org_rocksdb_SstFileReaderIterator
+ * Method: status0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_SstFileReaderIterator_status0(JNIEnv* env,
+ jobject /*jobj*/,
+ jlong handle) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ ROCKSDB_NAMESPACE::Status s = it->status();
+
+ if (s.ok()) {
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+}
+
+/*
+ * Class: org_rocksdb_SstFileReaderIterator
+ * Method: key0
+ * Signature: (J)[B
+ */
+jbyteArray Java_org_rocksdb_SstFileReaderIterator_key0(JNIEnv* env,
+ jobject /*jobj*/,
+ jlong handle) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ ROCKSDB_NAMESPACE::Slice key_slice = it->key();
+
+ jbyteArray jkey = env->NewByteArray(static_cast<jsize>(key_slice.size()));
+ if (jkey == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ env->SetByteArrayRegion(
+ jkey, 0, static_cast<jsize>(key_slice.size()),
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(key_slice.data())));
+ return jkey;
+}
+
+/*
+ * Class: org_rocksdb_SstFileReaderIterator
+ * Method: value0
+ * Signature: (J)[B
+ */
+jbyteArray Java_org_rocksdb_SstFileReaderIterator_value0(JNIEnv* env, jobject /*jobj*/,
+ jlong handle) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ ROCKSDB_NAMESPACE::Slice value_slice = it->value();
+
+ jbyteArray jkeyValue =
+ env->NewByteArray(static_cast<jsize>(value_slice.size()));
+ if (jkeyValue == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ env->SetByteArrayRegion(jkeyValue, 0, static_cast<jsize>(value_slice.size()),
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(value_slice.data())));
+ return jkeyValue;
+}
+
+/*
+ * Class: org_rocksdb_SstFileReaderIterator
+ * Method: keyDirect0
+ * Signature: (JLjava/nio/ByteBuffer;II)I
+ */
+jint Java_org_rocksdb_SstFileReaderIterator_keyDirect0(
+ JNIEnv* env, jobject /*jobj*/, jlong handle, jobject jtarget,
+ jint jtarget_off, jint jtarget_len) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ ROCKSDB_NAMESPACE::Slice key_slice = it->key();
+ return ROCKSDB_NAMESPACE::JniUtil::copyToDirect(env, key_slice, jtarget,
+ jtarget_off, jtarget_len);
+}
+
+/*
+ * Class: org_rocksdb_SstFileReaderIterator
+ * Method: valueDirect0
+ * Signature: (JLjava/nio/ByteBuffer;II)I
+ */
+jint Java_org_rocksdb_SstFileReaderIterator_valueDirect0(
+ JNIEnv* env, jobject /*jobj*/, jlong handle, jobject jtarget,
+ jint jtarget_off, jint jtarget_len) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ ROCKSDB_NAMESPACE::Slice value_slice = it->value();
+ return ROCKSDB_NAMESPACE::JniUtil::copyToDirect(env, value_slice, jtarget,
+ jtarget_off, jtarget_len);
+}
+
+/*
+ * Class: org_rocksdb_SstFileReaderIterator
+ * Method: seekDirect0
+ * Signature: (JLjava/nio/ByteBuffer;II)V
+ */
+void Java_org_rocksdb_SstFileReaderIterator_seekDirect0(
+ JNIEnv* env, jobject /*jobj*/, jlong handle, jobject jtarget,
+ jint jtarget_off, jint jtarget_len) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ auto seek = [&it](ROCKSDB_NAMESPACE::Slice& target_slice) {
+ it->Seek(target_slice);
+ };
+ ROCKSDB_NAMESPACE::JniUtil::k_op_direct(seek, env, jtarget, jtarget_off,
+ jtarget_len);
+}
+
+/*
+ * Class: org_rocksdb_SstFileReaderIterator
+ * Method: seekForPrevDirect0
+ * Signature: (JLjava/nio/ByteBuffer;II)V
+ */
+void Java_org_rocksdb_SstFileReaderIterator_seekForPrevDirect0(
+ JNIEnv* env, jobject /*jobj*/, jlong handle, jobject jtarget,
+ jint jtarget_off, jint jtarget_len) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(handle);
+ auto seekPrev = [&it](ROCKSDB_NAMESPACE::Slice& target_slice) {
+ it->SeekForPrev(target_slice);
+ };
+ ROCKSDB_NAMESPACE::JniUtil::k_op_direct(seekPrev, env, jtarget, jtarget_off,
+ jtarget_len);
+}
diff --git a/src/rocksdb/java/rocksjni/sst_file_readerjni.cc b/src/rocksdb/java/rocksjni/sst_file_readerjni.cc
new file mode 100644
index 000000000..d79a2c09a
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/sst_file_readerjni.cc
@@ -0,0 +1,116 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling C++ ROCKSDB_NAMESPACE::SstFileReader methods
+// from Java side.
+
+#include <jni.h>
+#include <string>
+
+#include "include/org_rocksdb_SstFileReader.h"
+#include "rocksdb/comparator.h"
+#include "rocksdb/env.h"
+#include "rocksdb/options.h"
+#include "rocksdb/sst_file_reader.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_SstFileReader
+ * Method: newSstFileReader
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_SstFileReader_newSstFileReader(JNIEnv * /*env*/,
+ jclass /*jcls*/,
+ jlong joptions) {
+ auto *options =
+ reinterpret_cast<const ROCKSDB_NAMESPACE::Options *>(joptions);
+ ROCKSDB_NAMESPACE::SstFileReader *sst_file_reader =
+ new ROCKSDB_NAMESPACE::SstFileReader(*options);
+ return reinterpret_cast<jlong>(sst_file_reader);
+}
+
+/*
+ * Class: org_rocksdb_SstFileReader
+ * Method: open
+ * Signature: (JLjava/lang/String;)V
+ */
+void Java_org_rocksdb_SstFileReader_open(JNIEnv *env, jobject /*jobj*/,
+ jlong jhandle, jstring jfile_path) {
+ const char *file_path = env->GetStringUTFChars(jfile_path, nullptr);
+ if (file_path == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+ ROCKSDB_NAMESPACE::Status s =
+ reinterpret_cast<ROCKSDB_NAMESPACE::SstFileReader *>(jhandle)->Open(
+ file_path);
+ env->ReleaseStringUTFChars(jfile_path, file_path);
+
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_SstFileReader
+ * Method: newIterator
+ * Signature: (JJ)J
+ */
+jlong Java_org_rocksdb_SstFileReader_newIterator(JNIEnv * /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle,
+ jlong jread_options_handle) {
+ auto *sst_file_reader =
+ reinterpret_cast<ROCKSDB_NAMESPACE::SstFileReader *>(jhandle);
+ auto *read_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions *>(jread_options_handle);
+ return reinterpret_cast<jlong>(sst_file_reader->NewIterator(*read_options));
+}
+
+/*
+ * Class: org_rocksdb_SstFileReader
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_SstFileReader_disposeInternal(JNIEnv * /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ delete reinterpret_cast<ROCKSDB_NAMESPACE::SstFileReader *>(jhandle);
+}
+
+/*
+ * Class: org_rocksdb_SstFileReader
+ * Method: verifyChecksum
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_SstFileReader_verifyChecksum(JNIEnv *env,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto *sst_file_reader =
+ reinterpret_cast<ROCKSDB_NAMESPACE::SstFileReader *>(jhandle);
+ auto s = sst_file_reader->VerifyChecksum();
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_SstFileReader
+ * Method: getTableProperties
+ * Signature: (J)J
+ */
+jobject Java_org_rocksdb_SstFileReader_getTableProperties(JNIEnv *env,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto *sst_file_reader =
+ reinterpret_cast<ROCKSDB_NAMESPACE::SstFileReader *>(jhandle);
+ std::shared_ptr<const ROCKSDB_NAMESPACE::TableProperties> tp =
+ sst_file_reader->GetTableProperties();
+ jobject jtable_properties =
+ ROCKSDB_NAMESPACE::TablePropertiesJni::fromCppTableProperties(
+ env, *(tp.get()));
+ return jtable_properties;
+}
diff --git a/src/rocksdb/java/rocksjni/sst_file_writerjni.cc b/src/rocksdb/java/rocksjni/sst_file_writerjni.cc
new file mode 100644
index 000000000..5ca8c5309
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/sst_file_writerjni.cc
@@ -0,0 +1,308 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling C++ ROCKSDB_NAMESPACE::SstFileWriter methods
+// from Java side.
+
+#include <jni.h>
+#include <string>
+
+#include "include/org_rocksdb_SstFileWriter.h"
+#include "rocksdb/comparator.h"
+#include "rocksdb/env.h"
+#include "rocksdb/options.h"
+#include "rocksdb/sst_file_writer.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_SstFileWriter
+ * Method: newSstFileWriter
+ * Signature: (JJJB)J
+ */
+jlong Java_org_rocksdb_SstFileWriter_newSstFileWriter__JJJB(
+ JNIEnv * /*env*/, jclass /*jcls*/, jlong jenvoptions, jlong joptions,
+ jlong jcomparator_handle, jbyte jcomparator_type) {
+ ROCKSDB_NAMESPACE::Comparator *comparator = nullptr;
+ switch (jcomparator_type) {
+ // JAVA_COMPARATOR
+ case 0x0:
+ comparator = reinterpret_cast<ROCKSDB_NAMESPACE::ComparatorJniCallback *>(
+ jcomparator_handle);
+ break;
+
+ // JAVA_NATIVE_COMPARATOR_WRAPPER
+ case 0x1:
+ comparator =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Comparator *>(jcomparator_handle);
+ break;
+ }
+ auto *env_options =
+ reinterpret_cast<const ROCKSDB_NAMESPACE::EnvOptions *>(jenvoptions);
+ auto *options =
+ reinterpret_cast<const ROCKSDB_NAMESPACE::Options *>(joptions);
+ ROCKSDB_NAMESPACE::SstFileWriter *sst_file_writer =
+ new ROCKSDB_NAMESPACE::SstFileWriter(*env_options, *options, comparator);
+ return reinterpret_cast<jlong>(sst_file_writer);
+}
+
+/*
+ * Class: org_rocksdb_SstFileWriter
+ * Method: newSstFileWriter
+ * Signature: (JJ)J
+ */
+jlong Java_org_rocksdb_SstFileWriter_newSstFileWriter__JJ(JNIEnv * /*env*/,
+ jclass /*jcls*/,
+ jlong jenvoptions,
+ jlong joptions) {
+ auto *env_options =
+ reinterpret_cast<const ROCKSDB_NAMESPACE::EnvOptions *>(jenvoptions);
+ auto *options =
+ reinterpret_cast<const ROCKSDB_NAMESPACE::Options *>(joptions);
+ ROCKSDB_NAMESPACE::SstFileWriter *sst_file_writer =
+ new ROCKSDB_NAMESPACE::SstFileWriter(*env_options, *options);
+ return reinterpret_cast<jlong>(sst_file_writer);
+}
+
+/*
+ * Class: org_rocksdb_SstFileWriter
+ * Method: open
+ * Signature: (JLjava/lang/String;)V
+ */
+void Java_org_rocksdb_SstFileWriter_open(JNIEnv *env, jobject /*jobj*/,
+ jlong jhandle, jstring jfile_path) {
+ const char *file_path = env->GetStringUTFChars(jfile_path, nullptr);
+ if (file_path == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+ ROCKSDB_NAMESPACE::Status s =
+ reinterpret_cast<ROCKSDB_NAMESPACE::SstFileWriter *>(jhandle)->Open(
+ file_path);
+ env->ReleaseStringUTFChars(jfile_path, file_path);
+
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_SstFileWriter
+ * Method: put
+ * Signature: (JJJ)V
+ */
+void Java_org_rocksdb_SstFileWriter_put__JJJ(JNIEnv *env, jobject /*jobj*/,
+ jlong jhandle, jlong jkey_handle,
+ jlong jvalue_handle) {
+ auto *key_slice = reinterpret_cast<ROCKSDB_NAMESPACE::Slice *>(jkey_handle);
+ auto *value_slice =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Slice *>(jvalue_handle);
+ ROCKSDB_NAMESPACE::Status s =
+ reinterpret_cast<ROCKSDB_NAMESPACE::SstFileWriter *>(jhandle)->Put(
+ *key_slice, *value_slice);
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_SstFileWriter
+ * Method: put
+ * Signature: (JJJ)V
+ */
+void Java_org_rocksdb_SstFileWriter_put__J_3B_3B(JNIEnv *env, jobject /*jobj*/,
+ jlong jhandle, jbyteArray jkey,
+ jbyteArray jval) {
+ jbyte *key = env->GetByteArrayElements(jkey, nullptr);
+ if (key == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char *>(key),
+ env->GetArrayLength(jkey));
+
+ jbyte *value = env->GetByteArrayElements(jval, nullptr);
+ if (value == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+ return;
+ }
+ ROCKSDB_NAMESPACE::Slice value_slice(reinterpret_cast<char *>(value),
+ env->GetArrayLength(jval));
+
+ ROCKSDB_NAMESPACE::Status s =
+ reinterpret_cast<ROCKSDB_NAMESPACE::SstFileWriter *>(jhandle)->Put(
+ key_slice, value_slice);
+
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+ env->ReleaseByteArrayElements(jval, value, JNI_ABORT);
+
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_SstFileWriter
+ * Method: putDirect
+ * Signature: (JLjava/nio/ByteBuffer;IILjava/nio/ByteBuffer;II)V
+ */
+void Java_org_rocksdb_SstFileWriter_putDirect(JNIEnv *env, jobject /*jdb*/,
+ jlong jdb_handle, jobject jkey,
+ jint jkey_off, jint jkey_len,
+ jobject jval, jint jval_off,
+ jint jval_len) {
+ auto *writer =
+ reinterpret_cast<ROCKSDB_NAMESPACE::SstFileWriter *>(jdb_handle);
+ auto put = [&env, &writer](ROCKSDB_NAMESPACE::Slice &key,
+ ROCKSDB_NAMESPACE::Slice &value) {
+ ROCKSDB_NAMESPACE::Status s = writer->Put(key, value);
+ if (s.ok()) {
+ return;
+ }
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ };
+ ROCKSDB_NAMESPACE::JniUtil::kv_op_direct(put, env, jkey, jkey_off, jkey_len,
+ jval, jval_off, jval_len);
+}
+
+/*
+ * Class: org_rocksdb_SstFileWriter
+ * Method: fileSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_SstFileWriter_fileSize(JNIEnv * /*env*/, jobject /*jdb*/,
+ jlong jdb_handle) {
+ auto *writer =
+ reinterpret_cast<ROCKSDB_NAMESPACE::SstFileWriter *>(jdb_handle);
+ return static_cast<jlong>(writer->FileSize());
+}
+
+/*
+ * Class: org_rocksdb_SstFileWriter
+ * Method: merge
+ * Signature: (JJJ)V
+ */
+void Java_org_rocksdb_SstFileWriter_merge__JJJ(JNIEnv *env, jobject /*jobj*/,
+ jlong jhandle, jlong jkey_handle,
+ jlong jvalue_handle) {
+ auto *key_slice = reinterpret_cast<ROCKSDB_NAMESPACE::Slice *>(jkey_handle);
+ auto *value_slice =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Slice *>(jvalue_handle);
+ ROCKSDB_NAMESPACE::Status s =
+ reinterpret_cast<ROCKSDB_NAMESPACE::SstFileWriter *>(jhandle)->Merge(
+ *key_slice, *value_slice);
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_SstFileWriter
+ * Method: merge
+ * Signature: (J[B[B)V
+ */
+void Java_org_rocksdb_SstFileWriter_merge__J_3B_3B(JNIEnv *env,
+ jobject /*jobj*/,
+ jlong jhandle,
+ jbyteArray jkey,
+ jbyteArray jval) {
+ jbyte *key = env->GetByteArrayElements(jkey, nullptr);
+ if (key == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char *>(key),
+ env->GetArrayLength(jkey));
+
+ jbyte *value = env->GetByteArrayElements(jval, nullptr);
+ if (value == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+ return;
+ }
+ ROCKSDB_NAMESPACE::Slice value_slice(reinterpret_cast<char *>(value),
+ env->GetArrayLength(jval));
+
+ ROCKSDB_NAMESPACE::Status s =
+ reinterpret_cast<ROCKSDB_NAMESPACE::SstFileWriter *>(jhandle)->Merge(
+ key_slice, value_slice);
+
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+ env->ReleaseByteArrayElements(jval, value, JNI_ABORT);
+
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_SstFileWriter
+ * Method: delete
+ * Signature: (JJJ)V
+ */
+void Java_org_rocksdb_SstFileWriter_delete__J_3B(JNIEnv *env, jobject /*jobj*/,
+ jlong jhandle,
+ jbyteArray jkey) {
+ jbyte *key = env->GetByteArrayElements(jkey, nullptr);
+ if (key == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char *>(key),
+ env->GetArrayLength(jkey));
+
+ ROCKSDB_NAMESPACE::Status s =
+ reinterpret_cast<ROCKSDB_NAMESPACE::SstFileWriter *>(jhandle)->Delete(
+ key_slice);
+
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_SstFileWriter
+ * Method: delete
+ * Signature: (JJJ)V
+ */
+void Java_org_rocksdb_SstFileWriter_delete__JJ(JNIEnv *env, jobject /*jobj*/,
+ jlong jhandle,
+ jlong jkey_handle) {
+ auto *key_slice = reinterpret_cast<ROCKSDB_NAMESPACE::Slice *>(jkey_handle);
+ ROCKSDB_NAMESPACE::Status s =
+ reinterpret_cast<ROCKSDB_NAMESPACE::SstFileWriter *>(jhandle)->Delete(
+ *key_slice);
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_SstFileWriter
+ * Method: finish
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_SstFileWriter_finish(JNIEnv *env, jobject /*jobj*/,
+ jlong jhandle) {
+ ROCKSDB_NAMESPACE::Status s =
+ reinterpret_cast<ROCKSDB_NAMESPACE::SstFileWriter *>(jhandle)->Finish();
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_SstFileWriter
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_SstFileWriter_disposeInternal(JNIEnv * /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ delete reinterpret_cast<ROCKSDB_NAMESPACE::SstFileWriter *>(jhandle);
+}
diff --git a/src/rocksdb/java/rocksjni/statistics.cc b/src/rocksdb/java/rocksjni/statistics.cc
new file mode 100644
index 000000000..f59e79e6c
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/statistics.cc
@@ -0,0 +1,264 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling c++ ROCKSDB_NAMESPACE::Statistics methods from Java side.
+
+#include <jni.h>
+#include <memory>
+#include <set>
+
+#include "include/org_rocksdb_Statistics.h"
+#include "rocksdb/statistics.h"
+#include "rocksjni/portal.h"
+#include "rocksjni/statisticsjni.h"
+
+/*
+ * Class: org_rocksdb_Statistics
+ * Method: newStatistics
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_Statistics_newStatistics__(
+ JNIEnv* env, jclass jcls) {
+ return Java_org_rocksdb_Statistics_newStatistics___3BJ(
+ env, jcls, nullptr, 0);
+}
+
+/*
+ * Class: org_rocksdb_Statistics
+ * Method: newStatistics
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Statistics_newStatistics__J(
+ JNIEnv* env, jclass jcls, jlong jother_statistics_handle) {
+ return Java_org_rocksdb_Statistics_newStatistics___3BJ(
+ env, jcls, nullptr, jother_statistics_handle);
+}
+
+/*
+ * Class: org_rocksdb_Statistics
+ * Method: newStatistics
+ * Signature: ([B)J
+ */
+jlong Java_org_rocksdb_Statistics_newStatistics___3B(
+ JNIEnv* env, jclass jcls, jbyteArray jhistograms) {
+ return Java_org_rocksdb_Statistics_newStatistics___3BJ(
+ env, jcls, jhistograms, 0);
+}
+
+/*
+ * Class: org_rocksdb_Statistics
+ * Method: newStatistics
+ * Signature: ([BJ)J
+ */
+jlong Java_org_rocksdb_Statistics_newStatistics___3BJ(
+ JNIEnv* env, jclass, jbyteArray jhistograms, jlong jother_statistics_handle) {
+ std::shared_ptr<ROCKSDB_NAMESPACE::Statistics>* pSptr_other_statistics =
+ nullptr;
+ if (jother_statistics_handle > 0) {
+ pSptr_other_statistics =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Statistics>*>(
+ jother_statistics_handle);
+ }
+
+ std::set<uint32_t> histograms;
+ if (jhistograms != nullptr) {
+ const jsize len = env->GetArrayLength(jhistograms);
+ if (len > 0) {
+ jbyte* jhistogram = env->GetByteArrayElements(jhistograms, nullptr);
+ if (jhistogram == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return 0;
+ }
+
+ for (jsize i = 0; i < len; i++) {
+ const ROCKSDB_NAMESPACE::Histograms histogram =
+ ROCKSDB_NAMESPACE::HistogramTypeJni::toCppHistograms(jhistogram[i]);
+ histograms.emplace(histogram);
+ }
+
+ env->ReleaseByteArrayElements(jhistograms, jhistogram, JNI_ABORT);
+ }
+ }
+
+ std::shared_ptr<ROCKSDB_NAMESPACE::Statistics> sptr_other_statistics =
+ nullptr;
+ if (pSptr_other_statistics != nullptr) {
+ sptr_other_statistics = *pSptr_other_statistics;
+ }
+
+ auto* pSptr_statistics =
+ new std::shared_ptr<ROCKSDB_NAMESPACE::StatisticsJni>(
+ new ROCKSDB_NAMESPACE::StatisticsJni(sptr_other_statistics,
+ histograms));
+
+ return reinterpret_cast<jlong>(pSptr_statistics);
+}
+
+/*
+ * Class: org_rocksdb_Statistics
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_Statistics_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ if (jhandle > 0) {
+ auto* pSptr_statistics =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Statistics>*>(
+ jhandle);
+ delete pSptr_statistics;
+ }
+}
+
+/*
+ * Class: org_rocksdb_Statistics
+ * Method: statsLevel
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_Statistics_statsLevel(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* pSptr_statistics =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Statistics>*>(
+ jhandle);
+ assert(pSptr_statistics != nullptr);
+ return ROCKSDB_NAMESPACE::StatsLevelJni::toJavaStatsLevel(
+ pSptr_statistics->get()->get_stats_level());
+}
+
+/*
+ * Class: org_rocksdb_Statistics
+ * Method: setStatsLevel
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_Statistics_setStatsLevel(
+ JNIEnv*, jobject, jlong jhandle, jbyte jstats_level) {
+ auto* pSptr_statistics =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Statistics>*>(
+ jhandle);
+ assert(pSptr_statistics != nullptr);
+ auto stats_level =
+ ROCKSDB_NAMESPACE::StatsLevelJni::toCppStatsLevel(jstats_level);
+ pSptr_statistics->get()->set_stats_level(stats_level);
+}
+
+/*
+ * Class: org_rocksdb_Statistics
+ * Method: getTickerCount
+ * Signature: (JB)J
+ */
+jlong Java_org_rocksdb_Statistics_getTickerCount(
+ JNIEnv*, jobject, jlong jhandle, jbyte jticker_type) {
+ auto* pSptr_statistics =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Statistics>*>(
+ jhandle);
+ assert(pSptr_statistics != nullptr);
+ auto ticker = ROCKSDB_NAMESPACE::TickerTypeJni::toCppTickers(jticker_type);
+ uint64_t count = pSptr_statistics->get()->getTickerCount(ticker);
+ return static_cast<jlong>(count);
+}
+
+/*
+ * Class: org_rocksdb_Statistics
+ * Method: getAndResetTickerCount
+ * Signature: (JB)J
+ */
+jlong Java_org_rocksdb_Statistics_getAndResetTickerCount(
+ JNIEnv*, jobject, jlong jhandle, jbyte jticker_type) {
+ auto* pSptr_statistics =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Statistics>*>(
+ jhandle);
+ assert(pSptr_statistics != nullptr);
+ auto ticker = ROCKSDB_NAMESPACE::TickerTypeJni::toCppTickers(jticker_type);
+ return pSptr_statistics->get()->getAndResetTickerCount(ticker);
+}
+
+/*
+ * Class: org_rocksdb_Statistics
+ * Method: getHistogramData
+ * Signature: (JB)Lorg/rocksdb/HistogramData;
+ */
+jobject Java_org_rocksdb_Statistics_getHistogramData(
+ JNIEnv* env, jobject, jlong jhandle, jbyte jhistogram_type) {
+ auto* pSptr_statistics =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Statistics>*>(
+ jhandle);
+ assert(pSptr_statistics != nullptr);
+
+ // TODO(AR) perhaps better to construct a Java Object Wrapper that
+ // uses ptr to C++ `new HistogramData`
+ ROCKSDB_NAMESPACE::HistogramData data;
+
+ auto histogram =
+ ROCKSDB_NAMESPACE::HistogramTypeJni::toCppHistograms(jhistogram_type);
+ pSptr_statistics->get()->histogramData(
+ static_cast<ROCKSDB_NAMESPACE::Histograms>(histogram), &data);
+
+ jclass jclazz = ROCKSDB_NAMESPACE::HistogramDataJni::getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid =
+ ROCKSDB_NAMESPACE::HistogramDataJni::getConstructorMethodId(env);
+ if (mid == nullptr) {
+ // exception occurred accessing method
+ return nullptr;
+ }
+
+ return env->NewObject(jclazz, mid, data.median, data.percentile95,
+ data.percentile99, data.average,
+ data.standard_deviation, data.max, data.count,
+ data.sum, data.min);
+}
+
+/*
+ * Class: org_rocksdb_Statistics
+ * Method: getHistogramString
+ * Signature: (JB)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_Statistics_getHistogramString(
+ JNIEnv* env, jobject, jlong jhandle, jbyte jhistogram_type) {
+ auto* pSptr_statistics =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Statistics>*>(
+ jhandle);
+ assert(pSptr_statistics != nullptr);
+ auto histogram =
+ ROCKSDB_NAMESPACE::HistogramTypeJni::toCppHistograms(jhistogram_type);
+ auto str = pSptr_statistics->get()->getHistogramString(histogram);
+ return env->NewStringUTF(str.c_str());
+}
+
+/*
+ * Class: org_rocksdb_Statistics
+ * Method: reset
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_Statistics_reset(
+ JNIEnv* env, jobject, jlong jhandle) {
+ auto* pSptr_statistics =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Statistics>*>(
+ jhandle);
+ assert(pSptr_statistics != nullptr);
+ ROCKSDB_NAMESPACE::Status s = pSptr_statistics->get()->Reset();
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_Statistics
+ * Method: toString
+ * Signature: (J)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_Statistics_toString(
+ JNIEnv* env, jobject, jlong jhandle) {
+ auto* pSptr_statistics =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Statistics>*>(
+ jhandle);
+ assert(pSptr_statistics != nullptr);
+ auto str = pSptr_statistics->get()->ToString();
+ return env->NewStringUTF(str.c_str());
+}
diff --git a/src/rocksdb/java/rocksjni/statisticsjni.cc b/src/rocksdb/java/rocksjni/statisticsjni.cc
new file mode 100644
index 000000000..afb9d6cff
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/statisticsjni.cc
@@ -0,0 +1,32 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::Statistics
+
+#include "rocksjni/statisticsjni.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+StatisticsJni::StatisticsJni(std::shared_ptr<Statistics> stats)
+ : StatisticsImpl(stats), m_ignore_histograms() {}
+
+StatisticsJni::StatisticsJni(std::shared_ptr<Statistics> stats,
+ const std::set<uint32_t> ignore_histograms)
+ : StatisticsImpl(stats), m_ignore_histograms(ignore_histograms) {}
+
+bool StatisticsJni::HistEnabledForType(uint32_t type) const {
+ if (type >= HISTOGRAM_ENUM_MAX) {
+ return false;
+ }
+
+ if (m_ignore_histograms.count(type) > 0) {
+ return false;
+ }
+
+ return true;
+}
+// @lint-ignore TXT4 T25377293 Grandfathered in
+}; // namespace ROCKSDB_NAMESPACE \ No newline at end of file
diff --git a/src/rocksdb/java/rocksjni/statisticsjni.h b/src/rocksdb/java/rocksjni/statisticsjni.h
new file mode 100644
index 000000000..51e279143
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/statisticsjni.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::Statistics
+
+#ifndef JAVA_ROCKSJNI_STATISTICSJNI_H_
+#define JAVA_ROCKSJNI_STATISTICSJNI_H_
+
+#include <memory>
+#include <set>
+#include <string>
+#include "rocksdb/statistics.h"
+#include "monitoring/statistics.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+class StatisticsJni : public StatisticsImpl {
+ public:
+ StatisticsJni(std::shared_ptr<Statistics> stats);
+ StatisticsJni(std::shared_ptr<Statistics> stats,
+ const std::set<uint32_t> ignore_histograms);
+ virtual bool HistEnabledForType(uint32_t type) const override;
+
+ private:
+ const std::set<uint32_t> m_ignore_histograms;
+ };
+
+ } // namespace ROCKSDB_NAMESPACE
+
+// @lint-ignore TXT4 T25377293 Grandfathered in
+#endif // JAVA_ROCKSJNI_STATISTICSJNI_H_ \ No newline at end of file
diff --git a/src/rocksdb/java/rocksjni/table.cc b/src/rocksdb/java/rocksjni/table.cc
new file mode 100644
index 000000000..9e3f4b663
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/table.cc
@@ -0,0 +1,150 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::Options.
+
+#include "rocksdb/table.h"
+#include <jni.h>
+#include "include/org_rocksdb_BlockBasedTableConfig.h"
+#include "include/org_rocksdb_PlainTableConfig.h"
+#include "portal.h"
+#include "rocksdb/cache.h"
+#include "rocksdb/filter_policy.h"
+
+/*
+ * Class: org_rocksdb_PlainTableConfig
+ * Method: newTableFactoryHandle
+ * Signature: (IIDIIBZZ)J
+ */
+jlong Java_org_rocksdb_PlainTableConfig_newTableFactoryHandle(
+ JNIEnv * /*env*/, jobject /*jobj*/, jint jkey_size,
+ jint jbloom_bits_per_key, jdouble jhash_table_ratio, jint jindex_sparseness,
+ jint jhuge_page_tlb_size, jbyte jencoding_type, jboolean jfull_scan_mode,
+ jboolean jstore_index_in_file) {
+ ROCKSDB_NAMESPACE::PlainTableOptions options =
+ ROCKSDB_NAMESPACE::PlainTableOptions();
+ options.user_key_len = jkey_size;
+ options.bloom_bits_per_key = jbloom_bits_per_key;
+ options.hash_table_ratio = jhash_table_ratio;
+ options.index_sparseness = jindex_sparseness;
+ options.huge_page_tlb_size = jhuge_page_tlb_size;
+ options.encoding_type =
+ static_cast<ROCKSDB_NAMESPACE::EncodingType>(jencoding_type);
+ options.full_scan_mode = jfull_scan_mode;
+ options.store_index_in_file = jstore_index_in_file;
+ return reinterpret_cast<jlong>(
+ ROCKSDB_NAMESPACE::NewPlainTableFactory(options));
+}
+
+/*
+ * Class: org_rocksdb_BlockBasedTableConfig
+ * Method: newTableFactoryHandle
+ * Signature: (ZZZZBBDBZJJJJIIIJZZJZZIIZZJIJI)J
+ */
+jlong Java_org_rocksdb_BlockBasedTableConfig_newTableFactoryHandle(
+ JNIEnv*, jobject, jboolean jcache_index_and_filter_blocks,
+ jboolean jcache_index_and_filter_blocks_with_high_priority,
+ jboolean jpin_l0_filter_and_index_blocks_in_cache,
+ jboolean jpin_top_level_index_and_filter, jbyte jindex_type_value,
+ jbyte jdata_block_index_type_value,
+ jdouble jdata_block_hash_table_util_ratio, jbyte jchecksum_type_value,
+ jboolean jno_block_cache, jlong jblock_cache_handle,
+ jlong jpersistent_cache_handle,
+ jlong jblock_cache_compressed_handle, jlong jblock_size,
+ jint jblock_size_deviation, jint jblock_restart_interval,
+ jint jindex_block_restart_interval, jlong jmetadata_block_size,
+ jboolean jpartition_filters, jboolean juse_delta_encoding,
+ jlong jfilter_policy_handle, jboolean jwhole_key_filtering,
+ jboolean jverify_compression, jint jread_amp_bytes_per_bit,
+ jint jformat_version, jboolean jenable_index_compression,
+ jboolean jblock_align, jlong jblock_cache_size,
+ jint jblock_cache_num_shard_bits, jlong jblock_cache_compressed_size,
+ jint jblock_cache_compressed_num_shard_bits) {
+ ROCKSDB_NAMESPACE::BlockBasedTableOptions options;
+ options.cache_index_and_filter_blocks =
+ static_cast<bool>(jcache_index_and_filter_blocks);
+ options.cache_index_and_filter_blocks_with_high_priority =
+ static_cast<bool>(jcache_index_and_filter_blocks_with_high_priority);
+ options.pin_l0_filter_and_index_blocks_in_cache =
+ static_cast<bool>(jpin_l0_filter_and_index_blocks_in_cache);
+ options.pin_top_level_index_and_filter =
+ static_cast<bool>(jpin_top_level_index_and_filter);
+ options.index_type =
+ ROCKSDB_NAMESPACE::IndexTypeJni::toCppIndexType(jindex_type_value);
+ options.data_block_index_type =
+ ROCKSDB_NAMESPACE::DataBlockIndexTypeJni::toCppDataBlockIndexType(
+ jdata_block_index_type_value);
+ options.data_block_hash_table_util_ratio =
+ static_cast<double>(jdata_block_hash_table_util_ratio);
+ options.checksum = ROCKSDB_NAMESPACE::ChecksumTypeJni::toCppChecksumType(
+ jchecksum_type_value);
+ options.no_block_cache = static_cast<bool>(jno_block_cache);
+ if (options.no_block_cache) {
+ options.block_cache = nullptr;
+ } else {
+ if (jblock_cache_handle > 0) {
+ std::shared_ptr<ROCKSDB_NAMESPACE::Cache> *pCache =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Cache> *>(
+ jblock_cache_handle);
+ options.block_cache = *pCache;
+ } else if (jblock_cache_size >= 0) {
+ if (jblock_cache_num_shard_bits > 0) {
+ options.block_cache = ROCKSDB_NAMESPACE::NewLRUCache(
+ static_cast<size_t>(jblock_cache_size),
+ static_cast<int>(jblock_cache_num_shard_bits));
+ } else {
+ options.block_cache = ROCKSDB_NAMESPACE::NewLRUCache(
+ static_cast<size_t>(jblock_cache_size));
+ }
+ } else {
+ options.no_block_cache = true;
+ options.block_cache = nullptr;
+ }
+ }
+ if (jpersistent_cache_handle > 0) {
+ std::shared_ptr<ROCKSDB_NAMESPACE::PersistentCache> *pCache =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::PersistentCache> *>(
+ jpersistent_cache_handle);
+ options.persistent_cache = *pCache;
+ }
+ if (jblock_cache_compressed_handle > 0) {
+ std::shared_ptr<ROCKSDB_NAMESPACE::Cache> *pCache =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Cache> *>(
+ jblock_cache_compressed_handle);
+ options.block_cache_compressed = *pCache;
+ } else if (jblock_cache_compressed_size > 0) {
+ if (jblock_cache_compressed_num_shard_bits > 0) {
+ options.block_cache_compressed = ROCKSDB_NAMESPACE::NewLRUCache(
+ static_cast<size_t>(jblock_cache_compressed_size),
+ static_cast<int>(jblock_cache_compressed_num_shard_bits));
+ } else {
+ options.block_cache_compressed = ROCKSDB_NAMESPACE::NewLRUCache(
+ static_cast<size_t>(jblock_cache_compressed_size));
+ }
+ }
+ options.block_size = static_cast<size_t>(jblock_size);
+ options.block_size_deviation = static_cast<int>(jblock_size_deviation);
+ options.block_restart_interval = static_cast<int>(jblock_restart_interval);
+ options.index_block_restart_interval = static_cast<int>(jindex_block_restart_interval);
+ options.metadata_block_size = static_cast<uint64_t>(jmetadata_block_size);
+ options.partition_filters = static_cast<bool>(jpartition_filters);
+ options.use_delta_encoding = static_cast<bool>(juse_delta_encoding);
+ if (jfilter_policy_handle > 0) {
+ std::shared_ptr<ROCKSDB_NAMESPACE::FilterPolicy> *pFilterPolicy =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::FilterPolicy> *>(
+ jfilter_policy_handle);
+ options.filter_policy = *pFilterPolicy;
+ }
+ options.whole_key_filtering = static_cast<bool>(jwhole_key_filtering);
+ options.verify_compression = static_cast<bool>(jverify_compression);
+ options.read_amp_bytes_per_bit = static_cast<uint32_t>(jread_amp_bytes_per_bit);
+ options.format_version = static_cast<uint32_t>(jformat_version);
+ options.enable_index_compression = static_cast<bool>(jenable_index_compression);
+ options.block_align = static_cast<bool>(jblock_align);
+
+ return reinterpret_cast<jlong>(
+ ROCKSDB_NAMESPACE::NewBlockBasedTableFactory(options));
+}
diff --git a/src/rocksdb/java/rocksjni/table_filter.cc b/src/rocksdb/java/rocksjni/table_filter.cc
new file mode 100644
index 000000000..a9024ce88
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/table_filter.cc
@@ -0,0 +1,25 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// org.rocksdb.AbstractTableFilter.
+
+#include <jni.h>
+#include <memory>
+
+#include "include/org_rocksdb_AbstractTableFilter.h"
+#include "rocksjni/table_filter_jnicallback.h"
+
+/*
+ * Class: org_rocksdb_AbstractTableFilter
+ * Method: createNewTableFilter
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_AbstractTableFilter_createNewTableFilter(
+ JNIEnv* env, jobject jtable_filter) {
+ auto* table_filter_jnicallback =
+ new ROCKSDB_NAMESPACE::TableFilterJniCallback(env, jtable_filter);
+ return reinterpret_cast<jlong>(table_filter_jnicallback);
+}
diff --git a/src/rocksdb/java/rocksjni/table_filter_jnicallback.cc b/src/rocksdb/java/rocksjni/table_filter_jnicallback.cc
new file mode 100644
index 000000000..d1699548d
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/table_filter_jnicallback.cc
@@ -0,0 +1,66 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::TableFilter.
+
+#include "rocksjni/table_filter_jnicallback.h"
+#include "rocksjni/portal.h"
+
+namespace ROCKSDB_NAMESPACE {
+TableFilterJniCallback::TableFilterJniCallback(
+ JNIEnv* env, jobject jtable_filter)
+ : JniCallback(env, jtable_filter) {
+ m_jfilter_methodid =
+ AbstractTableFilterJni::getFilterMethod(env);
+ if(m_jfilter_methodid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+
+ // create the function reference
+ /*
+ Note the JNI ENV must be obtained/release
+ on each call to the function itself as
+ it may be called from multiple threads
+ */
+ m_table_filter_function =
+ [this](const ROCKSDB_NAMESPACE::TableProperties& table_properties) {
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* thread_env = getJniEnv(&attached_thread);
+ assert(thread_env != nullptr);
+
+ // create a Java TableProperties object
+ jobject jtable_properties = TablePropertiesJni::fromCppTableProperties(
+ thread_env, table_properties);
+ if (jtable_properties == nullptr) {
+ // exception thrown from fromCppTableProperties
+ thread_env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return false;
+ }
+
+ jboolean result = thread_env->CallBooleanMethod(
+ m_jcallback_obj, m_jfilter_methodid, jtable_properties);
+ if (thread_env->ExceptionCheck()) {
+ // exception thrown from CallBooleanMethod
+ thread_env->DeleteLocalRef(jtable_properties);
+ thread_env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return false;
+ }
+
+ // ok... cleanup and then return
+ releaseJniEnv(attached_thread);
+ return static_cast<bool>(result);
+ };
+}
+
+std::function<bool(const ROCKSDB_NAMESPACE::TableProperties&)>
+TableFilterJniCallback::GetTableFilterFunction() {
+ return m_table_filter_function;
+}
+
+} // namespace ROCKSDB_NAMESPACE
diff --git a/src/rocksdb/java/rocksjni/table_filter_jnicallback.h b/src/rocksdb/java/rocksjni/table_filter_jnicallback.h
new file mode 100644
index 000000000..b5804737a
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/table_filter_jnicallback.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::TableFilter.
+
+#ifndef JAVA_ROCKSJNI_TABLE_FILTER_JNICALLBACK_H_
+#define JAVA_ROCKSJNI_TABLE_FILTER_JNICALLBACK_H_
+
+#include <jni.h>
+#include <functional>
+#include <memory>
+
+#include "rocksdb/table_properties.h"
+#include "rocksjni/jnicallback.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+class TableFilterJniCallback : public JniCallback {
+ public:
+ TableFilterJniCallback(
+ JNIEnv* env, jobject jtable_filter);
+ std::function<bool(const ROCKSDB_NAMESPACE::TableProperties&)>
+ GetTableFilterFunction();
+
+ private:
+ jmethodID m_jfilter_methodid;
+ std::function<bool(const ROCKSDB_NAMESPACE::TableProperties&)>
+ m_table_filter_function;
+};
+
+} // namespace ROCKSDB_NAMESPACE
+
+#endif // JAVA_ROCKSJNI_TABLE_FILTER_JNICALLBACK_H_
diff --git a/src/rocksdb/java/rocksjni/thread_status.cc b/src/rocksdb/java/rocksjni/thread_status.cc
new file mode 100644
index 000000000..a5811ec17
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/thread_status.cc
@@ -0,0 +1,125 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling c++ ROCKSDB_NAMESPACE::ThreadStatus methods from Java side.
+
+#include <jni.h>
+
+#include "portal.h"
+#include "include/org_rocksdb_ThreadStatus.h"
+#include "rocksdb/thread_status.h"
+
+/*
+ * Class: org_rocksdb_ThreadStatus
+ * Method: getThreadTypeName
+ * Signature: (B)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_ThreadStatus_getThreadTypeName(
+ JNIEnv* env, jclass, jbyte jthread_type_value) {
+ auto name = ROCKSDB_NAMESPACE::ThreadStatus::GetThreadTypeName(
+ ROCKSDB_NAMESPACE::ThreadTypeJni::toCppThreadType(jthread_type_value));
+ return ROCKSDB_NAMESPACE::JniUtil::toJavaString(env, &name, true);
+}
+
+/*
+ * Class: org_rocksdb_ThreadStatus
+ * Method: getOperationName
+ * Signature: (B)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_ThreadStatus_getOperationName(
+ JNIEnv* env, jclass, jbyte joperation_type_value) {
+ auto name = ROCKSDB_NAMESPACE::ThreadStatus::GetOperationName(
+ ROCKSDB_NAMESPACE::OperationTypeJni::toCppOperationType(
+ joperation_type_value));
+ return ROCKSDB_NAMESPACE::JniUtil::toJavaString(env, &name, true);
+}
+
+/*
+ * Class: org_rocksdb_ThreadStatus
+ * Method: microsToStringNative
+ * Signature: (J)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_ThreadStatus_microsToStringNative(
+ JNIEnv* env, jclass, jlong jmicros) {
+ auto str = ROCKSDB_NAMESPACE::ThreadStatus::MicrosToString(
+ static_cast<uint64_t>(jmicros));
+ return ROCKSDB_NAMESPACE::JniUtil::toJavaString(env, &str, true);
+}
+
+/*
+ * Class: org_rocksdb_ThreadStatus
+ * Method: getOperationStageName
+ * Signature: (B)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_ThreadStatus_getOperationStageName(
+ JNIEnv* env, jclass, jbyte joperation_stage_value) {
+ auto name = ROCKSDB_NAMESPACE::ThreadStatus::GetOperationStageName(
+ ROCKSDB_NAMESPACE::OperationStageJni::toCppOperationStage(
+ joperation_stage_value));
+ return ROCKSDB_NAMESPACE::JniUtil::toJavaString(env, &name, true);
+}
+
+/*
+ * Class: org_rocksdb_ThreadStatus
+ * Method: getOperationPropertyName
+ * Signature: (BI)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_ThreadStatus_getOperationPropertyName(
+ JNIEnv* env, jclass, jbyte joperation_type_value, jint jindex) {
+ auto name = ROCKSDB_NAMESPACE::ThreadStatus::GetOperationPropertyName(
+ ROCKSDB_NAMESPACE::OperationTypeJni::toCppOperationType(
+ joperation_type_value),
+ static_cast<int>(jindex));
+ return ROCKSDB_NAMESPACE::JniUtil::toJavaString(env, &name, true);
+}
+
+/*
+ * Class: org_rocksdb_ThreadStatus
+ * Method: interpretOperationProperties
+ * Signature: (B[J)Ljava/util/Map;
+ */
+jobject Java_org_rocksdb_ThreadStatus_interpretOperationProperties(
+ JNIEnv* env, jclass, jbyte joperation_type_value,
+ jlongArray joperation_properties) {
+
+ //convert joperation_properties
+ const jsize len = env->GetArrayLength(joperation_properties);
+ const std::unique_ptr<uint64_t[]> op_properties(new uint64_t[len]);
+ jlong* jop = env->GetLongArrayElements(joperation_properties, nullptr);
+ if (jop == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ for (jsize i = 0; i < len; i++) {
+ op_properties[i] = static_cast<uint64_t>(jop[i]);
+ }
+ env->ReleaseLongArrayElements(joperation_properties, jop, JNI_ABORT);
+
+ // call the function
+ auto result = ROCKSDB_NAMESPACE::ThreadStatus::InterpretOperationProperties(
+ ROCKSDB_NAMESPACE::OperationTypeJni::toCppOperationType(
+ joperation_type_value),
+ op_properties.get());
+ jobject jresult = ROCKSDB_NAMESPACE::HashMapJni::fromCppMap(env, &result);
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ return nullptr;
+ }
+
+ return jresult;
+}
+
+/*
+ * Class: org_rocksdb_ThreadStatus
+ * Method: getStateName
+ * Signature: (B)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_ThreadStatus_getStateName(
+ JNIEnv* env, jclass, jbyte jstate_type_value) {
+ auto name = ROCKSDB_NAMESPACE::ThreadStatus::GetStateName(
+ ROCKSDB_NAMESPACE::StateTypeJni::toCppStateType(jstate_type_value));
+ return ROCKSDB_NAMESPACE::JniUtil::toJavaString(env, &name, true);
+}
diff --git a/src/rocksdb/java/rocksjni/trace_writer.cc b/src/rocksdb/java/rocksjni/trace_writer.cc
new file mode 100644
index 000000000..3beafd45e
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/trace_writer.cc
@@ -0,0 +1,23 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::CompactionFilterFactory.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_AbstractTraceWriter.h"
+#include "rocksjni/trace_writer_jnicallback.h"
+
+/*
+ * Class: org_rocksdb_AbstractTraceWriter
+ * Method: createNewTraceWriter
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_AbstractTraceWriter_createNewTraceWriter(
+ JNIEnv* env, jobject jobj) {
+ auto* trace_writer = new ROCKSDB_NAMESPACE::TraceWriterJniCallback(env, jobj);
+ return reinterpret_cast<jlong>(trace_writer);
+}
diff --git a/src/rocksdb/java/rocksjni/trace_writer_jnicallback.cc b/src/rocksdb/java/rocksjni/trace_writer_jnicallback.cc
new file mode 100644
index 000000000..b6566d335
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/trace_writer_jnicallback.cc
@@ -0,0 +1,115 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::TraceWriter.
+
+#include "rocksjni/trace_writer_jnicallback.h"
+#include "rocksjni/portal.h"
+
+namespace ROCKSDB_NAMESPACE {
+TraceWriterJniCallback::TraceWriterJniCallback(
+ JNIEnv* env, jobject jtrace_writer)
+ : JniCallback(env, jtrace_writer) {
+ m_jwrite_proxy_methodid =
+ AbstractTraceWriterJni::getWriteProxyMethodId(env);
+ if(m_jwrite_proxy_methodid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+
+ m_jclose_writer_proxy_methodid =
+ AbstractTraceWriterJni::getCloseWriterProxyMethodId(env);
+ if(m_jclose_writer_proxy_methodid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+
+ m_jget_file_size_methodid =
+ AbstractTraceWriterJni::getGetFileSizeMethodId(env);
+ if(m_jget_file_size_methodid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+}
+
+Status TraceWriterJniCallback::Write(const Slice& data) {
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* env = getJniEnv(&attached_thread);
+ if (env == nullptr) {
+ return Status::IOError("Unable to attach JNI Environment");
+ }
+
+ jshort jstatus = env->CallShortMethod(m_jcallback_obj,
+ m_jwrite_proxy_methodid,
+ &data);
+
+ if(env->ExceptionCheck()) {
+ // exception thrown from CallShortMethod
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return Status::IOError("Unable to call AbstractTraceWriter#writeProxy(long)");
+ }
+
+ // unpack status code and status sub-code from jstatus
+ jbyte jcode_value = (jstatus >> 8) & 0xFF;
+ jbyte jsub_code_value = jstatus & 0xFF;
+ std::unique_ptr<Status> s = StatusJni::toCppStatus(jcode_value, jsub_code_value);
+
+ releaseJniEnv(attached_thread);
+
+ return Status(*s);
+}
+
+Status TraceWriterJniCallback::Close() {
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* env = getJniEnv(&attached_thread);
+ if (env == nullptr) {
+ return Status::IOError("Unable to attach JNI Environment");
+ }
+
+ jshort jstatus = env->CallShortMethod(m_jcallback_obj,
+ m_jclose_writer_proxy_methodid);
+
+ if(env->ExceptionCheck()) {
+ // exception thrown from CallShortMethod
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return Status::IOError("Unable to call AbstractTraceWriter#closeWriterProxy()");
+ }
+
+ // unpack status code and status sub-code from jstatus
+ jbyte code_value = (jstatus >> 8) & 0xFF;
+ jbyte sub_code_value = jstatus & 0xFF;
+ std::unique_ptr<Status> s = StatusJni::toCppStatus(code_value, sub_code_value);
+
+ releaseJniEnv(attached_thread);
+
+ return Status(*s);
+}
+
+uint64_t TraceWriterJniCallback::GetFileSize() {
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* env = getJniEnv(&attached_thread);
+ if (env == nullptr) {
+ return 0;
+ }
+
+ jlong jfile_size = env->CallLongMethod(m_jcallback_obj,
+ m_jget_file_size_methodid);
+
+ if(env->ExceptionCheck()) {
+ // exception thrown from CallLongMethod
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return 0;
+ }
+
+ releaseJniEnv(attached_thread);
+
+ return static_cast<uint64_t>(jfile_size);
+}
+
+} // namespace ROCKSDB_NAMESPACE
diff --git a/src/rocksdb/java/rocksjni/trace_writer_jnicallback.h b/src/rocksdb/java/rocksjni/trace_writer_jnicallback.h
new file mode 100644
index 000000000..eb2a8b0f8
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/trace_writer_jnicallback.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::TraceWriter.
+
+#ifndef JAVA_ROCKSJNI_TRACE_WRITER_JNICALLBACK_H_
+#define JAVA_ROCKSJNI_TRACE_WRITER_JNICALLBACK_H_
+
+#include <jni.h>
+#include <memory>
+
+#include "rocksdb/trace_reader_writer.h"
+#include "rocksjni/jnicallback.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+class TraceWriterJniCallback : public JniCallback, public TraceWriter {
+ public:
+ TraceWriterJniCallback(
+ JNIEnv* env, jobject jtrace_writer);
+ virtual Status Write(const Slice& data);
+ virtual Status Close();
+ virtual uint64_t GetFileSize();
+
+ private:
+ jmethodID m_jwrite_proxy_methodid;
+ jmethodID m_jclose_writer_proxy_methodid;
+ jmethodID m_jget_file_size_methodid;
+};
+
+} // namespace ROCKSDB_NAMESPACE
+
+#endif // JAVA_ROCKSJNI_TRACE_WRITER_JNICALLBACK_H_
diff --git a/src/rocksdb/java/rocksjni/transaction.cc b/src/rocksdb/java/rocksjni/transaction.cc
new file mode 100644
index 000000000..ae98b868c
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/transaction.cc
@@ -0,0 +1,1646 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++
+// for ROCKSDB_NAMESPACE::Transaction.
+
+#include <jni.h>
+#include <functional>
+
+#include "include/org_rocksdb_Transaction.h"
+
+#include "rocksdb/utilities/transaction.h"
+#include "rocksjni/portal.h"
+
+using namespace std::placeholders;
+
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable : 4503) // identifier' : decorated name length
+ // exceeded, name was truncated
+#endif
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: setSnapshot
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_Transaction_setSnapshot(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ txn->SetSnapshot();
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: setSnapshotOnNextOperation
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_Transaction_setSnapshotOnNextOperation__J(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ txn->SetSnapshotOnNextOperation(nullptr);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: setSnapshotOnNextOperation
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Transaction_setSnapshotOnNextOperation__JJ(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ jlong jtxn_notifier_handle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ auto* txn_notifier = reinterpret_cast<
+ std::shared_ptr<ROCKSDB_NAMESPACE::TransactionNotifierJniCallback>*>(
+ jtxn_notifier_handle);
+ txn->SetSnapshotOnNextOperation(*txn_notifier);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: getSnapshot
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Transaction_getSnapshot(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ const ROCKSDB_NAMESPACE::Snapshot* snapshot = txn->GetSnapshot();
+ return reinterpret_cast<jlong>(snapshot);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: clearSnapshot
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_Transaction_clearSnapshot(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ txn->ClearSnapshot();
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: prepare
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_Transaction_prepare(JNIEnv* env, jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ ROCKSDB_NAMESPACE::Status s = txn->Prepare();
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: commit
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_Transaction_commit(JNIEnv* env, jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ ROCKSDB_NAMESPACE::Status s = txn->Commit();
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: rollback
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_Transaction_rollback(JNIEnv* env, jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ ROCKSDB_NAMESPACE::Status s = txn->Rollback();
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: setSavePoint
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_Transaction_setSavePoint(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ txn->SetSavePoint();
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: rollbackToSavePoint
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_Transaction_rollbackToSavePoint(JNIEnv* env,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ ROCKSDB_NAMESPACE::Status s = txn->RollbackToSavePoint();
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+typedef std::function<ROCKSDB_NAMESPACE::Status(
+ const ROCKSDB_NAMESPACE::ReadOptions&, const ROCKSDB_NAMESPACE::Slice&,
+ std::string*)>
+ FnGet;
+
+// TODO(AR) consider refactoring to share this between here and rocksjni.cc
+jbyteArray txn_get_helper(JNIEnv* env, const FnGet& fn_get,
+ const jlong& jread_options_handle,
+ const jbyteArray& jkey, const jint& jkey_part_len) {
+ jbyte* key = env->GetByteArrayElements(jkey, nullptr);
+ if (key == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char*>(key),
+ jkey_part_len);
+
+ auto* read_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jread_options_handle);
+ std::string value;
+ ROCKSDB_NAMESPACE::Status s = fn_get(*read_options, key_slice, &value);
+
+ // trigger java unref on key.
+ // by passing JNI_ABORT, it will simply release the reference without
+ // copying the result back to the java byte array.
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+
+ if (s.IsNotFound()) {
+ return nullptr;
+ }
+
+ if (s.ok()) {
+ jbyteArray jret_value = env->NewByteArray(static_cast<jsize>(value.size()));
+ if (jret_value == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ env->SetByteArrayRegion(jret_value, 0, static_cast<jsize>(value.size()),
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(value.c_str())));
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ return nullptr;
+ }
+ return jret_value;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: get
+ * Signature: (JJ[BIJ)[B
+ */
+jbyteArray Java_org_rocksdb_Transaction_get__JJ_3BIJ(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jread_options_handle,
+ jbyteArray jkey, jint jkey_part_len, jlong jcolumn_family_handle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ auto* column_family_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(
+ jcolumn_family_handle);
+ FnGet fn_get =
+ std::bind<ROCKSDB_NAMESPACE::Status (ROCKSDB_NAMESPACE::Transaction::*)(
+ const ROCKSDB_NAMESPACE::ReadOptions&,
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle*,
+ const ROCKSDB_NAMESPACE::Slice&, std::string*)>(
+ &ROCKSDB_NAMESPACE::Transaction::Get, txn, _1, column_family_handle,
+ _2, _3);
+ return txn_get_helper(env, fn_get, jread_options_handle, jkey, jkey_part_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: get
+ * Signature: (JJ[BI)[B
+ */
+jbyteArray Java_org_rocksdb_Transaction_get__JJ_3BI(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jread_options_handle,
+ jbyteArray jkey, jint jkey_part_len) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ FnGet fn_get =
+ std::bind<ROCKSDB_NAMESPACE::Status (ROCKSDB_NAMESPACE::Transaction::*)(
+ const ROCKSDB_NAMESPACE::ReadOptions&,
+ const ROCKSDB_NAMESPACE::Slice&, std::string*)>(
+ &ROCKSDB_NAMESPACE::Transaction::Get, txn, _1, _2, _3);
+ return txn_get_helper(env, fn_get, jread_options_handle, jkey, jkey_part_len);
+}
+
+// TODO(AR) consider refactoring to share this between here and rocksjni.cc
+// used by txn_multi_get_helper below
+std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*> txn_column_families_helper(
+ JNIEnv* env, jlongArray jcolumn_family_handles, bool* has_exception) {
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*> cf_handles;
+ if (jcolumn_family_handles != nullptr) {
+ const jsize len_cols = env->GetArrayLength(jcolumn_family_handles);
+ if (len_cols > 0) {
+ if (env->EnsureLocalCapacity(len_cols) != 0) {
+ // out of memory
+ *has_exception = JNI_TRUE;
+ return std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>();
+ }
+
+ jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, nullptr);
+ if (jcfh == nullptr) {
+ // exception thrown: OutOfMemoryError
+ *has_exception = JNI_TRUE;
+ return std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>();
+ }
+ for (int i = 0; i < len_cols; i++) {
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcfh[i]);
+ cf_handles.push_back(cf_handle);
+ }
+ env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT);
+ }
+ }
+ return cf_handles;
+}
+
+typedef std::function<std::vector<ROCKSDB_NAMESPACE::Status>(
+ const ROCKSDB_NAMESPACE::ReadOptions&,
+ const std::vector<ROCKSDB_NAMESPACE::Slice>&, std::vector<std::string>*)>
+ FnMultiGet;
+
+void free_parts(
+ JNIEnv* env,
+ std::vector<std::tuple<jbyteArray, jbyte*, jobject>>& parts_to_free) {
+ for (auto& value : parts_to_free) {
+ jobject jk;
+ jbyteArray jk_ba;
+ jbyte* jk_val;
+ std::tie(jk_ba, jk_val, jk) = value;
+ env->ReleaseByteArrayElements(jk_ba, jk_val, JNI_ABORT);
+ env->DeleteLocalRef(jk);
+ }
+}
+
+// TODO(AR) consider refactoring to share this between here and rocksjni.cc
+// cf multi get
+jobjectArray txn_multi_get_helper(JNIEnv* env, const FnMultiGet& fn_multi_get,
+ const jlong& jread_options_handle,
+ const jobjectArray& jkey_parts) {
+ const jsize len_key_parts = env->GetArrayLength(jkey_parts);
+ if (env->EnsureLocalCapacity(len_key_parts) != 0) {
+ // out of memory
+ return nullptr;
+ }
+
+ std::vector<ROCKSDB_NAMESPACE::Slice> key_parts;
+ std::vector<std::tuple<jbyteArray, jbyte*, jobject>> key_parts_to_free;
+ for (int i = 0; i < len_key_parts; i++) {
+ const jobject jk = env->GetObjectArrayElement(jkey_parts, i);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ free_parts(env, key_parts_to_free);
+ return nullptr;
+ }
+ jbyteArray jk_ba = reinterpret_cast<jbyteArray>(jk);
+ const jsize len_key = env->GetArrayLength(jk_ba);
+ if (env->EnsureLocalCapacity(len_key) != 0) {
+ // out of memory
+ env->DeleteLocalRef(jk);
+ free_parts(env, key_parts_to_free);
+ return nullptr;
+ }
+ jbyte* jk_val = env->GetByteArrayElements(jk_ba, nullptr);
+ if (jk_val == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->DeleteLocalRef(jk);
+ free_parts(env, key_parts_to_free);
+ return nullptr;
+ }
+
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char*>(jk_val),
+ len_key);
+ key_parts.push_back(key_slice);
+
+ key_parts_to_free.push_back(std::make_tuple(jk_ba, jk_val, jk));
+ }
+
+ auto* read_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jread_options_handle);
+ std::vector<std::string> value_parts;
+ std::vector<ROCKSDB_NAMESPACE::Status> s =
+ fn_multi_get(*read_options, key_parts, &value_parts);
+
+ // free up allocated byte arrays
+ free_parts(env, key_parts_to_free);
+
+ // prepare the results
+ const jclass jcls_ba = env->FindClass("[B");
+ jobjectArray jresults =
+ env->NewObjectArray(static_cast<jsize>(s.size()), jcls_ba, nullptr);
+ if (jresults == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ // add to the jresults
+ for (std::vector<ROCKSDB_NAMESPACE::Status>::size_type i = 0; i != s.size();
+ i++) {
+ if (s[i].ok()) {
+ jbyteArray jentry_value =
+ env->NewByteArray(static_cast<jsize>(value_parts[i].size()));
+ if (jentry_value == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ env->SetByteArrayRegion(
+ jentry_value, 0, static_cast<jsize>(value_parts[i].size()),
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(value_parts[i].c_str())));
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jentry_value);
+ return nullptr;
+ }
+
+ env->SetObjectArrayElement(jresults, static_cast<jsize>(i), jentry_value);
+ env->DeleteLocalRef(jentry_value);
+ }
+ }
+
+ return jresults;
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: multiGet
+ * Signature: (JJ[[B[J)[[B
+ */
+jobjectArray Java_org_rocksdb_Transaction_multiGet__JJ_3_3B_3J(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jread_options_handle,
+ jobjectArray jkey_parts, jlongArray jcolumn_family_handles) {
+ bool has_exception = false;
+ const std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>
+ column_family_handles = txn_column_families_helper(
+ env, jcolumn_family_handles, &has_exception);
+ if (has_exception) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ FnMultiGet fn_multi_get = std::bind<std::vector<ROCKSDB_NAMESPACE::Status> (
+ ROCKSDB_NAMESPACE::Transaction::*)(
+ const ROCKSDB_NAMESPACE::ReadOptions&,
+ const std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>&,
+ const std::vector<ROCKSDB_NAMESPACE::Slice>&, std::vector<std::string>*)>(
+ &ROCKSDB_NAMESPACE::Transaction::MultiGet, txn, _1, column_family_handles,
+ _2, _3);
+ return txn_multi_get_helper(env, fn_multi_get, jread_options_handle,
+ jkey_parts);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: multiGet
+ * Signature: (JJ[[B)[[B
+ */
+jobjectArray Java_org_rocksdb_Transaction_multiGet__JJ_3_3B(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jread_options_handle,
+ jobjectArray jkey_parts) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ FnMultiGet fn_multi_get = std::bind<std::vector<ROCKSDB_NAMESPACE::Status> (
+ ROCKSDB_NAMESPACE::Transaction::*)(
+ const ROCKSDB_NAMESPACE::ReadOptions&,
+ const std::vector<ROCKSDB_NAMESPACE::Slice>&, std::vector<std::string>*)>(
+ &ROCKSDB_NAMESPACE::Transaction::MultiGet, txn, _1, _2, _3);
+ return txn_multi_get_helper(env, fn_multi_get, jread_options_handle,
+ jkey_parts);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: getForUpdate
+ * Signature: (JJ[BIJZZ)[B
+ */
+jbyteArray Java_org_rocksdb_Transaction_getForUpdate__JJ_3BIJZZ(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jread_options_handle,
+ jbyteArray jkey, jint jkey_part_len, jlong jcolumn_family_handle,
+ jboolean jexclusive, jboolean jdo_validate) {
+ auto* column_family_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(
+ jcolumn_family_handle);
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ FnGet fn_get_for_update =
+ std::bind<ROCKSDB_NAMESPACE::Status (ROCKSDB_NAMESPACE::Transaction::*)(
+ const ROCKSDB_NAMESPACE::ReadOptions&,
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle*,
+ const ROCKSDB_NAMESPACE::Slice&, std::string*, bool, bool)>(
+ &ROCKSDB_NAMESPACE::Transaction::GetForUpdate, txn, _1,
+ column_family_handle, _2, _3, jexclusive, jdo_validate);
+ return txn_get_helper(env, fn_get_for_update, jread_options_handle, jkey,
+ jkey_part_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: getForUpdate
+ * Signature: (JJ[BIZZ)[B
+ */
+jbyteArray Java_org_rocksdb_Transaction_getForUpdate__JJ_3BIZZ(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jread_options_handle,
+ jbyteArray jkey, jint jkey_part_len, jboolean jexclusive,
+ jboolean jdo_validate) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ FnGet fn_get_for_update =
+ std::bind<ROCKSDB_NAMESPACE::Status (ROCKSDB_NAMESPACE::Transaction::*)(
+ const ROCKSDB_NAMESPACE::ReadOptions&,
+ const ROCKSDB_NAMESPACE::Slice&, std::string*, bool, bool)>(
+ &ROCKSDB_NAMESPACE::Transaction::GetForUpdate, txn, _1, _2, _3,
+ jexclusive, jdo_validate);
+ return txn_get_helper(env, fn_get_for_update, jread_options_handle, jkey,
+ jkey_part_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: multiGetForUpdate
+ * Signature: (JJ[[B[J)[[B
+ */
+jobjectArray Java_org_rocksdb_Transaction_multiGetForUpdate__JJ_3_3B_3J(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jread_options_handle,
+ jobjectArray jkey_parts, jlongArray jcolumn_family_handles) {
+ bool has_exception = false;
+ const std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>
+ column_family_handles = txn_column_families_helper(
+ env, jcolumn_family_handles, &has_exception);
+ if (has_exception) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ FnMultiGet fn_multi_get_for_update = std::bind<std::vector<
+ ROCKSDB_NAMESPACE::Status> (ROCKSDB_NAMESPACE::Transaction::*)(
+ const ROCKSDB_NAMESPACE::ReadOptions&,
+ const std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>&,
+ const std::vector<ROCKSDB_NAMESPACE::Slice>&, std::vector<std::string>*)>(
+ &ROCKSDB_NAMESPACE::Transaction::MultiGetForUpdate, txn, _1,
+ column_family_handles, _2, _3);
+ return txn_multi_get_helper(env, fn_multi_get_for_update,
+ jread_options_handle, jkey_parts);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: multiGetForUpdate
+ * Signature: (JJ[[B)[[B
+ */
+jobjectArray Java_org_rocksdb_Transaction_multiGetForUpdate__JJ_3_3B(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jread_options_handle,
+ jobjectArray jkey_parts) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ FnMultiGet fn_multi_get_for_update = std::bind<std::vector<
+ ROCKSDB_NAMESPACE::Status> (ROCKSDB_NAMESPACE::Transaction::*)(
+ const ROCKSDB_NAMESPACE::ReadOptions&,
+ const std::vector<ROCKSDB_NAMESPACE::Slice>&, std::vector<std::string>*)>(
+ &ROCKSDB_NAMESPACE::Transaction::MultiGetForUpdate, txn, _1, _2, _3);
+ return txn_multi_get_helper(env, fn_multi_get_for_update,
+ jread_options_handle, jkey_parts);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: getIterator
+ * Signature: (JJ)J
+ */
+jlong Java_org_rocksdb_Transaction_getIterator__JJ(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle,
+ jlong jread_options_handle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ auto* read_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jread_options_handle);
+ return reinterpret_cast<jlong>(txn->GetIterator(*read_options));
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: getIterator
+ * Signature: (JJJ)J
+ */
+jlong Java_org_rocksdb_Transaction_getIterator__JJJ(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ jlong jread_options_handle, jlong jcolumn_family_handle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ auto* read_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jread_options_handle);
+ auto* column_family_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(
+ jcolumn_family_handle);
+ return reinterpret_cast<jlong>(
+ txn->GetIterator(*read_options, column_family_handle));
+}
+
+typedef std::function<ROCKSDB_NAMESPACE::Status(
+ const ROCKSDB_NAMESPACE::Slice&, const ROCKSDB_NAMESPACE::Slice&)>
+ FnWriteKV;
+
+// TODO(AR) consider refactoring to share this between here and rocksjni.cc
+void txn_write_kv_helper(JNIEnv* env, const FnWriteKV& fn_write_kv,
+ const jbyteArray& jkey, const jint& jkey_part_len,
+ const jbyteArray& jval, const jint& jval_len) {
+ jbyte* key = env->GetByteArrayElements(jkey, nullptr);
+ if (key == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+ jbyte* value = env->GetByteArrayElements(jval, nullptr);
+ if (value == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+ return;
+ }
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char*>(key),
+ jkey_part_len);
+ ROCKSDB_NAMESPACE::Slice value_slice(reinterpret_cast<char*>(value),
+ jval_len);
+
+ ROCKSDB_NAMESPACE::Status s = fn_write_kv(key_slice, value_slice);
+
+ // trigger java unref on key.
+ // by passing JNI_ABORT, it will simply release the reference without
+ // copying the result back to the java byte array.
+ env->ReleaseByteArrayElements(jval, value, JNI_ABORT);
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+
+ if (s.ok()) {
+ return;
+ }
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: put
+ * Signature: (J[BI[BIJZ)V
+ */
+void Java_org_rocksdb_Transaction_put__J_3BI_3BIJZ(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey,
+ jint jkey_part_len, jbyteArray jval, jint jval_len,
+ jlong jcolumn_family_handle, jboolean jassume_tracked) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ auto* column_family_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(
+ jcolumn_family_handle);
+ FnWriteKV fn_put =
+ std::bind<ROCKSDB_NAMESPACE::Status (ROCKSDB_NAMESPACE::Transaction::*)(
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle*,
+ const ROCKSDB_NAMESPACE::Slice&, const ROCKSDB_NAMESPACE::Slice&,
+ bool)>(&ROCKSDB_NAMESPACE::Transaction::Put, txn,
+ column_family_handle, _1, _2, jassume_tracked);
+ txn_write_kv_helper(env, fn_put, jkey, jkey_part_len, jval, jval_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: put
+ * Signature: (J[BI[BI)V
+ */
+void Java_org_rocksdb_Transaction_put__J_3BI_3BI(JNIEnv* env, jobject /*jobj*/,
+ jlong jhandle, jbyteArray jkey,
+ jint jkey_part_len,
+ jbyteArray jval,
+ jint jval_len) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ FnWriteKV fn_put =
+ std::bind<ROCKSDB_NAMESPACE::Status (ROCKSDB_NAMESPACE::Transaction::*)(
+ const ROCKSDB_NAMESPACE::Slice&, const ROCKSDB_NAMESPACE::Slice&)>(
+ &ROCKSDB_NAMESPACE::Transaction::Put, txn, _1, _2);
+ txn_write_kv_helper(env, fn_put, jkey, jkey_part_len, jval, jval_len);
+}
+
+typedef std::function<ROCKSDB_NAMESPACE::Status(
+ const ROCKSDB_NAMESPACE::SliceParts&, const ROCKSDB_NAMESPACE::SliceParts&)>
+ FnWriteKVParts;
+
+// TODO(AR) consider refactoring to share this between here and rocksjni.cc
+void txn_write_kv_parts_helper(JNIEnv* env,
+ const FnWriteKVParts& fn_write_kv_parts,
+ const jobjectArray& jkey_parts,
+ const jint& jkey_parts_len,
+ const jobjectArray& jvalue_parts,
+ const jint& jvalue_parts_len) {
+#ifndef DEBUG
+ (void) jvalue_parts_len;
+#else
+ assert(jkey_parts_len == jvalue_parts_len);
+#endif
+
+ auto key_parts = std::vector<ROCKSDB_NAMESPACE::Slice>();
+ auto value_parts = std::vector<ROCKSDB_NAMESPACE::Slice>();
+ auto jparts_to_free = std::vector<std::tuple<jbyteArray, jbyte*, jobject>>();
+
+ // convert java key_parts/value_parts byte[][] to Slice(s)
+ for (jsize i = 0; i < jkey_parts_len; ++i) {
+ const jobject jobj_key_part = env->GetObjectArrayElement(jkey_parts, i);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ free_parts(env, jparts_to_free);
+ return;
+ }
+ const jobject jobj_value_part = env->GetObjectArrayElement(jvalue_parts, i);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jobj_key_part);
+ free_parts(env, jparts_to_free);
+ return;
+ }
+
+ const jbyteArray jba_key_part = reinterpret_cast<jbyteArray>(jobj_key_part);
+ const jsize jkey_part_len = env->GetArrayLength(jba_key_part);
+ if (env->EnsureLocalCapacity(jkey_part_len) != 0) {
+ // out of memory
+ env->DeleteLocalRef(jobj_value_part);
+ env->DeleteLocalRef(jobj_key_part);
+ free_parts(env, jparts_to_free);
+ return;
+ }
+ jbyte* jkey_part = env->GetByteArrayElements(jba_key_part, nullptr);
+ if (jkey_part == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->DeleteLocalRef(jobj_value_part);
+ env->DeleteLocalRef(jobj_key_part);
+ free_parts(env, jparts_to_free);
+ return;
+ }
+
+ const jbyteArray jba_value_part =
+ reinterpret_cast<jbyteArray>(jobj_value_part);
+ const jsize jvalue_part_len = env->GetArrayLength(jba_value_part);
+ if (env->EnsureLocalCapacity(jvalue_part_len) != 0) {
+ // out of memory
+ env->DeleteLocalRef(jobj_value_part);
+ env->DeleteLocalRef(jobj_key_part);
+ free_parts(env, jparts_to_free);
+ return;
+ }
+ jbyte* jvalue_part = env->GetByteArrayElements(jba_value_part, nullptr);
+ if (jvalue_part == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->ReleaseByteArrayElements(jba_value_part, jvalue_part, JNI_ABORT);
+ env->DeleteLocalRef(jobj_value_part);
+ env->DeleteLocalRef(jobj_key_part);
+ free_parts(env, jparts_to_free);
+ return;
+ }
+
+ jparts_to_free.push_back(
+ std::make_tuple(jba_key_part, jkey_part, jobj_key_part));
+ jparts_to_free.push_back(
+ std::make_tuple(jba_value_part, jvalue_part, jobj_value_part));
+
+ key_parts.push_back(ROCKSDB_NAMESPACE::Slice(
+ reinterpret_cast<char*>(jkey_part), jkey_part_len));
+ value_parts.push_back(ROCKSDB_NAMESPACE::Slice(
+ reinterpret_cast<char*>(jvalue_part), jvalue_part_len));
+ }
+
+ // call the write_multi function
+ ROCKSDB_NAMESPACE::Status s = fn_write_kv_parts(
+ ROCKSDB_NAMESPACE::SliceParts(key_parts.data(), (int)key_parts.size()),
+ ROCKSDB_NAMESPACE::SliceParts(value_parts.data(),
+ (int)value_parts.size()));
+
+ // cleanup temporary memory
+ free_parts(env, jparts_to_free);
+
+ // return
+ if (s.ok()) {
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: put
+ * Signature: (J[[BI[[BIJZ)V
+ */
+void Java_org_rocksdb_Transaction_put__J_3_3BI_3_3BIJZ(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jobjectArray jkey_parts,
+ jint jkey_parts_len, jobjectArray jvalue_parts, jint jvalue_parts_len,
+ jlong jcolumn_family_handle, jboolean jassume_tracked) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ auto* column_family_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(
+ jcolumn_family_handle);
+ FnWriteKVParts fn_put_parts =
+ std::bind<ROCKSDB_NAMESPACE::Status (ROCKSDB_NAMESPACE::Transaction::*)(
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle*,
+ const ROCKSDB_NAMESPACE::SliceParts&,
+ const ROCKSDB_NAMESPACE::SliceParts&, bool)>(
+ &ROCKSDB_NAMESPACE::Transaction::Put, txn, column_family_handle, _1,
+ _2, jassume_tracked);
+ txn_write_kv_parts_helper(env, fn_put_parts, jkey_parts, jkey_parts_len,
+ jvalue_parts, jvalue_parts_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: put
+ * Signature: (J[[BI[[BI)V
+ */
+void Java_org_rocksdb_Transaction_put__J_3_3BI_3_3BI(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jobjectArray jkey_parts,
+ jint jkey_parts_len, jobjectArray jvalue_parts, jint jvalue_parts_len) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ FnWriteKVParts fn_put_parts = std::bind<ROCKSDB_NAMESPACE::Status (
+ ROCKSDB_NAMESPACE::Transaction::*)(const ROCKSDB_NAMESPACE::SliceParts&,
+ const ROCKSDB_NAMESPACE::SliceParts&)>(
+ &ROCKSDB_NAMESPACE::Transaction::Put, txn, _1, _2);
+ txn_write_kv_parts_helper(env, fn_put_parts, jkey_parts, jkey_parts_len,
+ jvalue_parts, jvalue_parts_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: merge
+ * Signature: (J[BI[BIJZ)V
+ */
+void Java_org_rocksdb_Transaction_merge__J_3BI_3BIJZ(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey,
+ jint jkey_part_len, jbyteArray jval, jint jval_len,
+ jlong jcolumn_family_handle, jboolean jassume_tracked) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ auto* column_family_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(
+ jcolumn_family_handle);
+ FnWriteKV fn_merge =
+ std::bind<ROCKSDB_NAMESPACE::Status (ROCKSDB_NAMESPACE::Transaction::*)(
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle*,
+ const ROCKSDB_NAMESPACE::Slice&, const ROCKSDB_NAMESPACE::Slice&,
+ bool)>(&ROCKSDB_NAMESPACE::Transaction::Merge, txn,
+ column_family_handle, _1, _2, jassume_tracked);
+ txn_write_kv_helper(env, fn_merge, jkey, jkey_part_len, jval, jval_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: merge
+ * Signature: (J[BI[BI)V
+ */
+void Java_org_rocksdb_Transaction_merge__J_3BI_3BI(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey,
+ jint jkey_part_len, jbyteArray jval, jint jval_len) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ FnWriteKV fn_merge =
+ std::bind<ROCKSDB_NAMESPACE::Status (ROCKSDB_NAMESPACE::Transaction::*)(
+ const ROCKSDB_NAMESPACE::Slice&, const ROCKSDB_NAMESPACE::Slice&)>(
+ &ROCKSDB_NAMESPACE::Transaction::Merge, txn, _1, _2);
+ txn_write_kv_helper(env, fn_merge, jkey, jkey_part_len, jval, jval_len);
+}
+
+typedef std::function<ROCKSDB_NAMESPACE::Status(
+ const ROCKSDB_NAMESPACE::Slice&)>
+ FnWriteK;
+
+// TODO(AR) consider refactoring to share this between here and rocksjni.cc
+void txn_write_k_helper(JNIEnv* env, const FnWriteK& fn_write_k,
+ const jbyteArray& jkey, const jint& jkey_part_len) {
+ jbyte* key = env->GetByteArrayElements(jkey, nullptr);
+ if (key == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char*>(key),
+ jkey_part_len);
+
+ ROCKSDB_NAMESPACE::Status s = fn_write_k(key_slice);
+
+ // trigger java unref on key.
+ // by passing JNI_ABORT, it will simply release the reference without
+ // copying the result back to the java byte array.
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+
+ if (s.ok()) {
+ return;
+ }
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: delete
+ * Signature: (J[BIJZ)V
+ */
+void Java_org_rocksdb_Transaction_delete__J_3BIJZ(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey,
+ jint jkey_part_len, jlong jcolumn_family_handle, jboolean jassume_tracked) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ auto* column_family_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(
+ jcolumn_family_handle);
+ FnWriteK fn_delete =
+ std::bind<ROCKSDB_NAMESPACE::Status (ROCKSDB_NAMESPACE::Transaction::*)(
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle*,
+ const ROCKSDB_NAMESPACE::Slice&, bool)>(
+ &ROCKSDB_NAMESPACE::Transaction::Delete, txn, column_family_handle,
+ _1, jassume_tracked);
+ txn_write_k_helper(env, fn_delete, jkey, jkey_part_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: delete
+ * Signature: (J[BI)V
+ */
+void Java_org_rocksdb_Transaction_delete__J_3BI(JNIEnv* env, jobject /*jobj*/,
+ jlong jhandle, jbyteArray jkey,
+ jint jkey_part_len) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ FnWriteK fn_delete = std::bind<ROCKSDB_NAMESPACE::Status (
+ ROCKSDB_NAMESPACE::Transaction::*)(const ROCKSDB_NAMESPACE::Slice&)>(
+ &ROCKSDB_NAMESPACE::Transaction::Delete, txn, _1);
+ txn_write_k_helper(env, fn_delete, jkey, jkey_part_len);
+}
+
+typedef std::function<ROCKSDB_NAMESPACE::Status(
+ const ROCKSDB_NAMESPACE::SliceParts&)>
+ FnWriteKParts;
+
+// TODO(AR) consider refactoring to share this between here and rocksjni.cc
+void txn_write_k_parts_helper(JNIEnv* env,
+ const FnWriteKParts& fn_write_k_parts,
+ const jobjectArray& jkey_parts,
+ const jint& jkey_parts_len) {
+ std::vector<ROCKSDB_NAMESPACE::Slice> key_parts;
+ std::vector<std::tuple<jbyteArray, jbyte*, jobject>> jkey_parts_to_free;
+
+ // convert java key_parts byte[][] to Slice(s)
+ for (jint i = 0; i < jkey_parts_len; ++i) {
+ const jobject jobj_key_part = env->GetObjectArrayElement(jkey_parts, i);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ free_parts(env, jkey_parts_to_free);
+ return;
+ }
+
+ const jbyteArray jba_key_part = reinterpret_cast<jbyteArray>(jobj_key_part);
+ const jsize jkey_part_len = env->GetArrayLength(jba_key_part);
+ if (env->EnsureLocalCapacity(jkey_part_len) != 0) {
+ // out of memory
+ env->DeleteLocalRef(jobj_key_part);
+ free_parts(env, jkey_parts_to_free);
+ return;
+ }
+ jbyte* jkey_part = env->GetByteArrayElements(jba_key_part, nullptr);
+ if (jkey_part == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->DeleteLocalRef(jobj_key_part);
+ free_parts(env, jkey_parts_to_free);
+ return;
+ }
+
+ jkey_parts_to_free.push_back(std::tuple<jbyteArray, jbyte*, jobject>(
+ jba_key_part, jkey_part, jobj_key_part));
+
+ key_parts.push_back(ROCKSDB_NAMESPACE::Slice(
+ reinterpret_cast<char*>(jkey_part), jkey_part_len));
+ }
+
+ // call the write_multi function
+ ROCKSDB_NAMESPACE::Status s = fn_write_k_parts(
+ ROCKSDB_NAMESPACE::SliceParts(key_parts.data(), (int)key_parts.size()));
+
+ // cleanup temporary memory
+ free_parts(env, jkey_parts_to_free);
+
+ // return
+ if (s.ok()) {
+ return;
+ }
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: delete
+ * Signature: (J[[BIJZ)V
+ */
+void Java_org_rocksdb_Transaction_delete__J_3_3BIJZ(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jobjectArray jkey_parts,
+ jint jkey_parts_len, jlong jcolumn_family_handle,
+ jboolean jassume_tracked) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ auto* column_family_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(
+ jcolumn_family_handle);
+ FnWriteKParts fn_delete_parts =
+ std::bind<ROCKSDB_NAMESPACE::Status (ROCKSDB_NAMESPACE::Transaction::*)(
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle*,
+ const ROCKSDB_NAMESPACE::SliceParts&, bool)>(
+ &ROCKSDB_NAMESPACE::Transaction::Delete, txn, column_family_handle,
+ _1, jassume_tracked);
+ txn_write_k_parts_helper(env, fn_delete_parts, jkey_parts, jkey_parts_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: delete
+ * Signature: (J[[BI)V
+ */
+void Java_org_rocksdb_Transaction_delete__J_3_3BI(JNIEnv* env, jobject /*jobj*/,
+ jlong jhandle,
+ jobjectArray jkey_parts,
+ jint jkey_parts_len) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ FnWriteKParts fn_delete_parts = std::bind<ROCKSDB_NAMESPACE::Status (
+ ROCKSDB_NAMESPACE::Transaction::*)(const ROCKSDB_NAMESPACE::SliceParts&)>(
+ &ROCKSDB_NAMESPACE::Transaction::Delete, txn, _1);
+ txn_write_k_parts_helper(env, fn_delete_parts, jkey_parts, jkey_parts_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: singleDelete
+ * Signature: (J[BIJZ)V
+ */
+void Java_org_rocksdb_Transaction_singleDelete__J_3BIJZ(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey,
+ jint jkey_part_len, jlong jcolumn_family_handle, jboolean jassume_tracked) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ auto* column_family_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(
+ jcolumn_family_handle);
+ FnWriteK fn_single_delete =
+ std::bind<ROCKSDB_NAMESPACE::Status (ROCKSDB_NAMESPACE::Transaction::*)(
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle*,
+ const ROCKSDB_NAMESPACE::Slice&, bool)>(
+ &ROCKSDB_NAMESPACE::Transaction::SingleDelete, txn,
+ column_family_handle, _1, jassume_tracked);
+ txn_write_k_helper(env, fn_single_delete, jkey, jkey_part_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: singleDelete
+ * Signature: (J[BI)V
+ */
+void Java_org_rocksdb_Transaction_singleDelete__J_3BI(JNIEnv* env,
+ jobject /*jobj*/,
+ jlong jhandle,
+ jbyteArray jkey,
+ jint jkey_part_len) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ FnWriteK fn_single_delete = std::bind<ROCKSDB_NAMESPACE::Status (
+ ROCKSDB_NAMESPACE::Transaction::*)(const ROCKSDB_NAMESPACE::Slice&)>(
+ &ROCKSDB_NAMESPACE::Transaction::SingleDelete, txn, _1);
+ txn_write_k_helper(env, fn_single_delete, jkey, jkey_part_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: singleDelete
+ * Signature: (J[[BIJZ)V
+ */
+void Java_org_rocksdb_Transaction_singleDelete__J_3_3BIJZ(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jobjectArray jkey_parts,
+ jint jkey_parts_len, jlong jcolumn_family_handle,
+ jboolean jassume_tracked) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ auto* column_family_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(
+ jcolumn_family_handle);
+ FnWriteKParts fn_single_delete_parts =
+ std::bind<ROCKSDB_NAMESPACE::Status (ROCKSDB_NAMESPACE::Transaction::*)(
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle*,
+ const ROCKSDB_NAMESPACE::SliceParts&, bool)>(
+ &ROCKSDB_NAMESPACE::Transaction::SingleDelete, txn,
+ column_family_handle, _1, jassume_tracked);
+ txn_write_k_parts_helper(env, fn_single_delete_parts, jkey_parts,
+ jkey_parts_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: singleDelete
+ * Signature: (J[[BI)V
+ */
+void Java_org_rocksdb_Transaction_singleDelete__J_3_3BI(JNIEnv* env,
+ jobject /*jobj*/,
+ jlong jhandle,
+ jobjectArray jkey_parts,
+ jint jkey_parts_len) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ FnWriteKParts fn_single_delete_parts = std::bind<ROCKSDB_NAMESPACE::Status (
+ ROCKSDB_NAMESPACE::Transaction::*)(const ROCKSDB_NAMESPACE::SliceParts&)>(
+ &ROCKSDB_NAMESPACE::Transaction::SingleDelete, txn, _1);
+ txn_write_k_parts_helper(env, fn_single_delete_parts, jkey_parts,
+ jkey_parts_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: putUntracked
+ * Signature: (J[BI[BIJ)V
+ */
+void Java_org_rocksdb_Transaction_putUntracked__J_3BI_3BIJ(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey,
+ jint jkey_part_len, jbyteArray jval, jint jval_len,
+ jlong jcolumn_family_handle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ auto* column_family_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(
+ jcolumn_family_handle);
+ FnWriteKV fn_put_untracked =
+ std::bind<ROCKSDB_NAMESPACE::Status (ROCKSDB_NAMESPACE::Transaction::*)(
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle*,
+ const ROCKSDB_NAMESPACE::Slice&, const ROCKSDB_NAMESPACE::Slice&)>(
+ &ROCKSDB_NAMESPACE::Transaction::PutUntracked, txn,
+ column_family_handle, _1, _2);
+ txn_write_kv_helper(env, fn_put_untracked, jkey, jkey_part_len, jval,
+ jval_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: putUntracked
+ * Signature: (J[BI[BI)V
+ */
+void Java_org_rocksdb_Transaction_putUntracked__J_3BI_3BI(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey,
+ jint jkey_part_len, jbyteArray jval, jint jval_len) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ FnWriteKV fn_put_untracked =
+ std::bind<ROCKSDB_NAMESPACE::Status (ROCKSDB_NAMESPACE::Transaction::*)(
+ const ROCKSDB_NAMESPACE::Slice&, const ROCKSDB_NAMESPACE::Slice&)>(
+ &ROCKSDB_NAMESPACE::Transaction::PutUntracked, txn, _1, _2);
+ txn_write_kv_helper(env, fn_put_untracked, jkey, jkey_part_len, jval,
+ jval_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: putUntracked
+ * Signature: (J[[BI[[BIJ)V
+ */
+void Java_org_rocksdb_Transaction_putUntracked__J_3_3BI_3_3BIJ(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jobjectArray jkey_parts,
+ jint jkey_parts_len, jobjectArray jvalue_parts, jint jvalue_parts_len,
+ jlong jcolumn_family_handle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ auto* column_family_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(
+ jcolumn_family_handle);
+ FnWriteKVParts fn_put_parts_untracked = std::bind<ROCKSDB_NAMESPACE::Status (
+ ROCKSDB_NAMESPACE::Transaction::*)(ROCKSDB_NAMESPACE::ColumnFamilyHandle*,
+ const ROCKSDB_NAMESPACE::SliceParts&,
+ const ROCKSDB_NAMESPACE::SliceParts&)>(
+ &ROCKSDB_NAMESPACE::Transaction::PutUntracked, txn, column_family_handle,
+ _1, _2);
+ txn_write_kv_parts_helper(env, fn_put_parts_untracked, jkey_parts,
+ jkey_parts_len, jvalue_parts, jvalue_parts_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: putUntracked
+ * Signature: (J[[BI[[BI)V
+ */
+void Java_org_rocksdb_Transaction_putUntracked__J_3_3BI_3_3BI(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jobjectArray jkey_parts,
+ jint jkey_parts_len, jobjectArray jvalue_parts, jint jvalue_parts_len) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ FnWriteKVParts fn_put_parts_untracked = std::bind<ROCKSDB_NAMESPACE::Status (
+ ROCKSDB_NAMESPACE::Transaction::*)(const ROCKSDB_NAMESPACE::SliceParts&,
+ const ROCKSDB_NAMESPACE::SliceParts&)>(
+ &ROCKSDB_NAMESPACE::Transaction::PutUntracked, txn, _1, _2);
+ txn_write_kv_parts_helper(env, fn_put_parts_untracked, jkey_parts,
+ jkey_parts_len, jvalue_parts, jvalue_parts_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: mergeUntracked
+ * Signature: (J[BI[BIJ)V
+ */
+void Java_org_rocksdb_Transaction_mergeUntracked__J_3BI_3BIJ(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey,
+ jint jkey_part_len, jbyteArray jval, jint jval_len,
+ jlong jcolumn_family_handle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ auto* column_family_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(
+ jcolumn_family_handle);
+ FnWriteKV fn_merge_untracked =
+ std::bind<ROCKSDB_NAMESPACE::Status (ROCKSDB_NAMESPACE::Transaction::*)(
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle*,
+ const ROCKSDB_NAMESPACE::Slice&, const ROCKSDB_NAMESPACE::Slice&)>(
+ &ROCKSDB_NAMESPACE::Transaction::MergeUntracked, txn,
+ column_family_handle, _1, _2);
+ txn_write_kv_helper(env, fn_merge_untracked, jkey, jkey_part_len, jval,
+ jval_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: mergeUntracked
+ * Signature: (J[BI[BI)V
+ */
+void Java_org_rocksdb_Transaction_mergeUntracked__J_3BI_3BI(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey,
+ jint jkey_part_len, jbyteArray jval, jint jval_len) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ FnWriteKV fn_merge_untracked =
+ std::bind<ROCKSDB_NAMESPACE::Status (ROCKSDB_NAMESPACE::Transaction::*)(
+ const ROCKSDB_NAMESPACE::Slice&, const ROCKSDB_NAMESPACE::Slice&)>(
+ &ROCKSDB_NAMESPACE::Transaction::MergeUntracked, txn, _1, _2);
+ txn_write_kv_helper(env, fn_merge_untracked, jkey, jkey_part_len, jval,
+ jval_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: deleteUntracked
+ * Signature: (J[BIJ)V
+ */
+void Java_org_rocksdb_Transaction_deleteUntracked__J_3BIJ(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey,
+ jint jkey_part_len, jlong jcolumn_family_handle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ auto* column_family_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(
+ jcolumn_family_handle);
+ FnWriteK fn_delete_untracked = std::bind<ROCKSDB_NAMESPACE::Status (
+ ROCKSDB_NAMESPACE::Transaction::*)(ROCKSDB_NAMESPACE::ColumnFamilyHandle*,
+ const ROCKSDB_NAMESPACE::Slice&)>(
+ &ROCKSDB_NAMESPACE::Transaction::DeleteUntracked, txn,
+ column_family_handle, _1);
+ txn_write_k_helper(env, fn_delete_untracked, jkey, jkey_part_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: deleteUntracked
+ * Signature: (J[BI)V
+ */
+void Java_org_rocksdb_Transaction_deleteUntracked__J_3BI(JNIEnv* env,
+ jobject /*jobj*/,
+ jlong jhandle,
+ jbyteArray jkey,
+ jint jkey_part_len) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ FnWriteK fn_delete_untracked = std::bind<ROCKSDB_NAMESPACE::Status (
+ ROCKSDB_NAMESPACE::Transaction::*)(const ROCKSDB_NAMESPACE::Slice&)>(
+ &ROCKSDB_NAMESPACE::Transaction::DeleteUntracked, txn, _1);
+ txn_write_k_helper(env, fn_delete_untracked, jkey, jkey_part_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: deleteUntracked
+ * Signature: (J[[BIJ)V
+ */
+void Java_org_rocksdb_Transaction_deleteUntracked__J_3_3BIJ(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jobjectArray jkey_parts,
+ jint jkey_parts_len, jlong jcolumn_family_handle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ auto* column_family_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(
+ jcolumn_family_handle);
+ FnWriteKParts fn_delete_untracked_parts =
+ std::bind<ROCKSDB_NAMESPACE::Status (ROCKSDB_NAMESPACE::Transaction::*)(
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle*,
+ const ROCKSDB_NAMESPACE::SliceParts&)>(
+ &ROCKSDB_NAMESPACE::Transaction::DeleteUntracked, txn,
+ column_family_handle, _1);
+ txn_write_k_parts_helper(env, fn_delete_untracked_parts, jkey_parts,
+ jkey_parts_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: deleteUntracked
+ * Signature: (J[[BI)V
+ */
+void Java_org_rocksdb_Transaction_deleteUntracked__J_3_3BI(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jobjectArray jkey_parts,
+ jint jkey_parts_len) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ FnWriteKParts fn_delete_untracked_parts =
+ std::bind<ROCKSDB_NAMESPACE::Status (ROCKSDB_NAMESPACE::Transaction::*)(
+ const ROCKSDB_NAMESPACE::SliceParts&)>(
+ &ROCKSDB_NAMESPACE::Transaction::DeleteUntracked, txn, _1);
+ txn_write_k_parts_helper(env, fn_delete_untracked_parts, jkey_parts,
+ jkey_parts_len);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: putLogData
+ * Signature: (J[BI)V
+ */
+void Java_org_rocksdb_Transaction_putLogData(JNIEnv* env, jobject /*jobj*/,
+ jlong jhandle, jbyteArray jkey,
+ jint jkey_part_len) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+
+ jbyte* key = env->GetByteArrayElements(jkey, nullptr);
+ if (key == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char*>(key),
+ jkey_part_len);
+ txn->PutLogData(key_slice);
+
+ // trigger java unref on key.
+ // by passing JNI_ABORT, it will simply release the reference without
+ // copying the result back to the java byte array.
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: disableIndexing
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_Transaction_disableIndexing(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ txn->DisableIndexing();
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: enableIndexing
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_Transaction_enableIndexing(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ txn->EnableIndexing();
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: getNumKeys
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Transaction_getNumKeys(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ return txn->GetNumKeys();
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: getNumPuts
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Transaction_getNumPuts(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ return txn->GetNumPuts();
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: getNumDeletes
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Transaction_getNumDeletes(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ return txn->GetNumDeletes();
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: getNumMerges
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Transaction_getNumMerges(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ return txn->GetNumMerges();
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: getElapsedTime
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Transaction_getElapsedTime(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ return txn->GetElapsedTime();
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: getWriteBatch
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Transaction_getWriteBatch(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ return reinterpret_cast<jlong>(txn->GetWriteBatch());
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: setLockTimeout
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Transaction_setLockTimeout(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle,
+ jlong jlock_timeout) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ txn->SetLockTimeout(jlock_timeout);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: getWriteOptions
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Transaction_getWriteOptions(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ return reinterpret_cast<jlong>(txn->GetWriteOptions());
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: setWriteOptions
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Transaction_setWriteOptions(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle,
+ jlong jwrite_options_handle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options_handle);
+ txn->SetWriteOptions(*write_options);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: undo
+ * Signature: (J[BIJ)V
+ */
+void Java_org_rocksdb_Transaction_undoGetForUpdate__J_3BIJ(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey,
+ jint jkey_part_len, jlong jcolumn_family_handle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ auto* column_family_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(
+ jcolumn_family_handle);
+ jbyte* key = env->GetByteArrayElements(jkey, nullptr);
+ if (key == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char*>(key),
+ jkey_part_len);
+ txn->UndoGetForUpdate(column_family_handle, key_slice);
+
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: undoGetForUpdate
+ * Signature: (J[BI)V
+ */
+void Java_org_rocksdb_Transaction_undoGetForUpdate__J_3BI(JNIEnv* env,
+ jobject /*jobj*/,
+ jlong jhandle,
+ jbyteArray jkey,
+ jint jkey_part_len) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ jbyte* key = env->GetByteArrayElements(jkey, nullptr);
+ if (key == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast<char*>(key),
+ jkey_part_len);
+ txn->UndoGetForUpdate(key_slice);
+
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: rebuildFromWriteBatch
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Transaction_rebuildFromWriteBatch(
+ JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jwrite_batch_handle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ auto* write_batch =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwrite_batch_handle);
+ ROCKSDB_NAMESPACE::Status s = txn->RebuildFromWriteBatch(write_batch);
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: getCommitTimeWriteBatch
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Transaction_getCommitTimeWriteBatch(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ return reinterpret_cast<jlong>(txn->GetCommitTimeWriteBatch());
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: setLogNumber
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Transaction_setLogNumber(JNIEnv* /*env*/,
+ jobject /*jobj*/, jlong jhandle,
+ jlong jlog_number) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ txn->SetLogNumber(jlog_number);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: getLogNumber
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Transaction_getLogNumber(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ return txn->GetLogNumber();
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: setName
+ * Signature: (JLjava/lang/String;)V
+ */
+void Java_org_rocksdb_Transaction_setName(JNIEnv* env, jobject /*jobj*/,
+ jlong jhandle, jstring jname) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ const char* name = env->GetStringUTFChars(jname, nullptr);
+ if (name == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::Status s = txn->SetName(name);
+
+ env->ReleaseStringUTFChars(jname, name);
+
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: getName
+ * Signature: (J)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_Transaction_getName(JNIEnv* env, jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ ROCKSDB_NAMESPACE::TransactionName name = txn->GetName();
+ return env->NewStringUTF(name.data());
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: getID
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Transaction_getID(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ ROCKSDB_NAMESPACE::TransactionID id = txn->GetID();
+ return static_cast<jlong>(id);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: isDeadlockDetect
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Transaction_isDeadlockDetect(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ return static_cast<jboolean>(txn->IsDeadlockDetect());
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: getWaitingTxns
+ * Signature: (J)Lorg/rocksdb/Transaction/WaitingTransactions;
+ */
+jobject Java_org_rocksdb_Transaction_getWaitingTxns(JNIEnv* env,
+ jobject jtransaction_obj,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ uint32_t column_family_id;
+ std::string key;
+ std::vector<ROCKSDB_NAMESPACE::TransactionID> waiting_txns =
+ txn->GetWaitingTxns(&column_family_id, &key);
+ jobject jwaiting_txns =
+ ROCKSDB_NAMESPACE::TransactionJni::newWaitingTransactions(
+ env, jtransaction_obj, column_family_id, key, waiting_txns);
+ return jwaiting_txns;
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: getState
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_Transaction_getState(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ ROCKSDB_NAMESPACE::Transaction::TransactionState txn_status = txn->GetState();
+ switch (txn_status) {
+ case ROCKSDB_NAMESPACE::Transaction::TransactionState::STARTED:
+ return 0x0;
+
+ case ROCKSDB_NAMESPACE::Transaction::TransactionState::AWAITING_PREPARE:
+ return 0x1;
+
+ case ROCKSDB_NAMESPACE::Transaction::TransactionState::PREPARED:
+ return 0x2;
+
+ case ROCKSDB_NAMESPACE::Transaction::TransactionState::AWAITING_COMMIT:
+ return 0x3;
+
+ case ROCKSDB_NAMESPACE::Transaction::TransactionState::COMMITED:
+ return 0x4;
+
+ case ROCKSDB_NAMESPACE::Transaction::TransactionState::AWAITING_ROLLBACK:
+ return 0x5;
+
+ case ROCKSDB_NAMESPACE::Transaction::TransactionState::ROLLEDBACK:
+ return 0x6;
+
+ case ROCKSDB_NAMESPACE::Transaction::TransactionState::LOCKS_STOLEN:
+ return 0x7;
+ }
+
+ assert(false);
+ return static_cast<jbyte>(-1);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: getId
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Transaction_getId(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong jhandle) {
+ auto* txn = reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+ uint64_t id = txn->GetId();
+ return static_cast<jlong>(id);
+}
+
+/*
+ * Class: org_rocksdb_Transaction
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_Transaction_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ delete reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jhandle);
+}
diff --git a/src/rocksdb/java/rocksjni/transaction_db.cc b/src/rocksdb/java/rocksjni/transaction_db.cc
new file mode 100644
index 000000000..c6ec64640
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/transaction_db.cc
@@ -0,0 +1,463 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++
+// for ROCKSDB_NAMESPACE::TransactionDB.
+
+#include <jni.h>
+#include <functional>
+#include <memory>
+#include <utility>
+
+#include "include/org_rocksdb_TransactionDB.h"
+
+#include "rocksdb/options.h"
+#include "rocksdb/utilities/transaction.h"
+#include "rocksdb/utilities/transaction_db.h"
+
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_TransactionDB
+ * Method: open
+ * Signature: (JJLjava/lang/String;)J
+ */
+jlong Java_org_rocksdb_TransactionDB_open__JJLjava_lang_String_2(
+ JNIEnv* env, jclass, jlong joptions_handle,
+ jlong jtxn_db_options_handle, jstring jdb_path) {
+ auto* options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(joptions_handle);
+ auto* txn_db_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDBOptions*>(
+ jtxn_db_options_handle);
+ ROCKSDB_NAMESPACE::TransactionDB* tdb = nullptr;
+ const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
+ if (db_path == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return 0;
+ }
+ ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::TransactionDB::Open(
+ *options, *txn_db_options, db_path, &tdb);
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+
+ if (s.ok()) {
+ return reinterpret_cast<jlong>(tdb);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return 0;
+ }
+}
+
+/*
+ * Class: org_rocksdb_TransactionDB
+ * Method: open
+ * Signature: (JJLjava/lang/String;[[B[J)[J
+ */
+jlongArray Java_org_rocksdb_TransactionDB_open__JJLjava_lang_String_2_3_3B_3J(
+ JNIEnv* env, jclass, jlong jdb_options_handle,
+ jlong jtxn_db_options_handle, jstring jdb_path, jobjectArray jcolumn_names,
+ jlongArray jcolumn_options_handles) {
+ const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
+ if (db_path == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ const jsize len_cols = env->GetArrayLength(jcolumn_names);
+ if (env->EnsureLocalCapacity(len_cols) != 0) {
+ // out of memory
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+ return nullptr;
+ }
+
+ jlong* jco = env->GetLongArrayElements(jcolumn_options_handles, nullptr);
+ if (jco == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+ return nullptr;
+ }
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyDescriptor> column_families;
+ for (int i = 0; i < len_cols; i++) {
+ const jobject jcn = env->GetObjectArrayElement(jcolumn_names, i);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT);
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+ return nullptr;
+ }
+ const jbyteArray jcn_ba = reinterpret_cast<jbyteArray>(jcn);
+ jbyte* jcf_name = env->GetByteArrayElements(jcn_ba, nullptr);
+ if (jcf_name == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->DeleteLocalRef(jcn);
+ env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT);
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+ return nullptr;
+ }
+
+ const int jcf_name_len = env->GetArrayLength(jcn_ba);
+ if (env->EnsureLocalCapacity(jcf_name_len) != 0) {
+ // out of memory
+ env->ReleaseByteArrayElements(jcn_ba, jcf_name, JNI_ABORT);
+ env->DeleteLocalRef(jcn);
+ env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT);
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+ return nullptr;
+ }
+ const std::string cf_name(reinterpret_cast<char*>(jcf_name), jcf_name_len);
+ const ROCKSDB_NAMESPACE::ColumnFamilyOptions* cf_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jco[i]);
+ column_families.push_back(
+ ROCKSDB_NAMESPACE::ColumnFamilyDescriptor(cf_name, *cf_options));
+
+ env->ReleaseByteArrayElements(jcn_ba, jcf_name, JNI_ABORT);
+ env->DeleteLocalRef(jcn);
+ }
+ env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT);
+
+ auto* db_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jdb_options_handle);
+ auto* txn_db_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDBOptions*>(
+ jtxn_db_options_handle);
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*> handles;
+ ROCKSDB_NAMESPACE::TransactionDB* tdb = nullptr;
+ const ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::TransactionDB::Open(
+ *db_options, *txn_db_options, db_path, column_families, &handles, &tdb);
+
+ // check if open operation was successful
+ if (s.ok()) {
+ const jsize resultsLen = 1 + len_cols; // db handle + column family handles
+ std::unique_ptr<jlong[]> results =
+ std::unique_ptr<jlong[]>(new jlong[resultsLen]);
+ results[0] = reinterpret_cast<jlong>(tdb);
+ for (int i = 1; i <= len_cols; i++) {
+ results[i] = reinterpret_cast<jlong>(handles[i - 1]);
+ }
+
+ jlongArray jresults = env->NewLongArray(resultsLen);
+ if (jresults == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ env->SetLongArrayRegion(jresults, 0, resultsLen, results.get());
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jresults);
+ return nullptr;
+ }
+ return jresults;
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+}
+
+/*
+ * Class: org_rocksdb_TransactionDB
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_TransactionDB_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* txn_db = reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDB*>(jhandle);
+ assert(txn_db != nullptr);
+ delete txn_db;
+}
+
+/*
+ * Class: org_rocksdb_TransactionDB
+ * Method: closeDatabase
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_TransactionDB_closeDatabase(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* txn_db = reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDB*>(jhandle);
+ assert(txn_db != nullptr);
+ ROCKSDB_NAMESPACE::Status s = txn_db->Close();
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+}
+
+/*
+ * Class: org_rocksdb_TransactionDB
+ * Method: beginTransaction
+ * Signature: (JJ)J
+ */
+jlong Java_org_rocksdb_TransactionDB_beginTransaction__JJ(
+ JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle) {
+ auto* txn_db = reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDB*>(jhandle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options_handle);
+ ROCKSDB_NAMESPACE::Transaction* txn =
+ txn_db->BeginTransaction(*write_options);
+ return reinterpret_cast<jlong>(txn);
+}
+
+/*
+ * Class: org_rocksdb_TransactionDB
+ * Method: beginTransaction
+ * Signature: (JJJ)J
+ */
+jlong Java_org_rocksdb_TransactionDB_beginTransaction__JJJ(
+ JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle,
+ jlong jtxn_options_handle) {
+ auto* txn_db = reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDB*>(jhandle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options_handle);
+ auto* txn_options = reinterpret_cast<ROCKSDB_NAMESPACE::TransactionOptions*>(
+ jtxn_options_handle);
+ ROCKSDB_NAMESPACE::Transaction* txn =
+ txn_db->BeginTransaction(*write_options, *txn_options);
+ return reinterpret_cast<jlong>(txn);
+}
+
+/*
+ * Class: org_rocksdb_TransactionDB
+ * Method: beginTransaction_withOld
+ * Signature: (JJJ)J
+ */
+jlong Java_org_rocksdb_TransactionDB_beginTransaction_1withOld__JJJ(
+ JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle,
+ jlong jold_txn_handle) {
+ auto* txn_db = reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDB*>(jhandle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options_handle);
+ auto* old_txn =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jold_txn_handle);
+ ROCKSDB_NAMESPACE::TransactionOptions txn_options;
+ ROCKSDB_NAMESPACE::Transaction* txn =
+ txn_db->BeginTransaction(*write_options, txn_options, old_txn);
+
+ // RocksJava relies on the assumption that
+ // we do not allocate a new Transaction object
+ // when providing an old_txn
+ assert(txn == old_txn);
+
+ return reinterpret_cast<jlong>(txn);
+}
+
+/*
+ * Class: org_rocksdb_TransactionDB
+ * Method: beginTransaction_withOld
+ * Signature: (JJJJ)J
+ */
+jlong Java_org_rocksdb_TransactionDB_beginTransaction_1withOld__JJJJ(
+ JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle,
+ jlong jtxn_options_handle, jlong jold_txn_handle) {
+ auto* txn_db = reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDB*>(jhandle);
+ auto* write_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteOptions*>(jwrite_options_handle);
+ auto* txn_options = reinterpret_cast<ROCKSDB_NAMESPACE::TransactionOptions*>(
+ jtxn_options_handle);
+ auto* old_txn =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Transaction*>(jold_txn_handle);
+ ROCKSDB_NAMESPACE::Transaction* txn =
+ txn_db->BeginTransaction(*write_options, *txn_options, old_txn);
+
+ // RocksJava relies on the assumption that
+ // we do not allocate a new Transaction object
+ // when providing an old_txn
+ assert(txn == old_txn);
+
+ return reinterpret_cast<jlong>(txn);
+}
+
+/*
+ * Class: org_rocksdb_TransactionDB
+ * Method: getTransactionByName
+ * Signature: (JLjava/lang/String;)J
+ */
+jlong Java_org_rocksdb_TransactionDB_getTransactionByName(
+ JNIEnv* env, jobject, jlong jhandle, jstring jname) {
+ auto* txn_db = reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDB*>(jhandle);
+ const char* name = env->GetStringUTFChars(jname, nullptr);
+ if (name == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return 0;
+ }
+ ROCKSDB_NAMESPACE::Transaction* txn = txn_db->GetTransactionByName(name);
+ env->ReleaseStringUTFChars(jname, name);
+ return reinterpret_cast<jlong>(txn);
+}
+
+/*
+ * Class: org_rocksdb_TransactionDB
+ * Method: getAllPreparedTransactions
+ * Signature: (J)[J
+ */
+jlongArray Java_org_rocksdb_TransactionDB_getAllPreparedTransactions(
+ JNIEnv* env, jobject, jlong jhandle) {
+ auto* txn_db = reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDB*>(jhandle);
+ std::vector<ROCKSDB_NAMESPACE::Transaction*> txns;
+ txn_db->GetAllPreparedTransactions(&txns);
+
+ const size_t size = txns.size();
+ assert(size < UINT32_MAX); // does it fit in a jint?
+
+ const jsize len = static_cast<jsize>(size);
+ std::vector<jlong> tmp(len);
+ for (jsize i = 0; i < len; ++i) {
+ tmp[i] = reinterpret_cast<jlong>(txns[i]);
+ }
+
+ jlongArray jtxns = env->NewLongArray(len);
+ if (jtxns == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ env->SetLongArrayRegion(jtxns, 0, len, tmp.data());
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jtxns);
+ return nullptr;
+ }
+
+ return jtxns;
+}
+
+/*
+ * Class: org_rocksdb_TransactionDB
+ * Method: getLockStatusData
+ * Signature: (J)Ljava/util/Map;
+ */
+jobject Java_org_rocksdb_TransactionDB_getLockStatusData(
+ JNIEnv* env, jobject, jlong jhandle) {
+ auto* txn_db = reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDB*>(jhandle);
+ const std::unordered_multimap<uint32_t, ROCKSDB_NAMESPACE::KeyLockInfo>
+ lock_status_data = txn_db->GetLockStatusData();
+ const jobject jlock_status_data = ROCKSDB_NAMESPACE::HashMapJni::construct(
+ env, static_cast<uint32_t>(lock_status_data.size()));
+ if (jlock_status_data == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const ROCKSDB_NAMESPACE::HashMapJni::FnMapKV<
+ const int32_t, const ROCKSDB_NAMESPACE::KeyLockInfo, jobject, jobject>
+ fn_map_kv =
+ [env](const std::pair<const int32_t,
+ const ROCKSDB_NAMESPACE::KeyLockInfo>& pair) {
+ const jobject jlong_column_family_id =
+ ROCKSDB_NAMESPACE::LongJni::valueOf(env, pair.first);
+ if (jlong_column_family_id == nullptr) {
+ // an error occurred
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+ const jobject jkey_lock_info =
+ ROCKSDB_NAMESPACE::KeyLockInfoJni::construct(env, pair.second);
+ if (jkey_lock_info == nullptr) {
+ // an error occurred
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+ return std::unique_ptr<std::pair<jobject, jobject>>(
+ new std::pair<jobject, jobject>(jlong_column_family_id,
+ jkey_lock_info));
+ };
+
+ if (!ROCKSDB_NAMESPACE::HashMapJni::putAll(
+ env, jlock_status_data, lock_status_data.begin(),
+ lock_status_data.end(), fn_map_kv)) {
+ // exception occcurred
+ return nullptr;
+ }
+
+ return jlock_status_data;
+}
+
+/*
+ * Class: org_rocksdb_TransactionDB
+ * Method: getDeadlockInfoBuffer
+ * Signature: (J)[Lorg/rocksdb/TransactionDB/DeadlockPath;
+ */
+jobjectArray Java_org_rocksdb_TransactionDB_getDeadlockInfoBuffer(
+ JNIEnv* env, jobject jobj, jlong jhandle) {
+ auto* txn_db = reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDB*>(jhandle);
+ const std::vector<ROCKSDB_NAMESPACE::DeadlockPath> deadlock_info_buffer =
+ txn_db->GetDeadlockInfoBuffer();
+
+ const jsize deadlock_info_buffer_len =
+ static_cast<jsize>(deadlock_info_buffer.size());
+ jobjectArray jdeadlock_info_buffer = env->NewObjectArray(
+ deadlock_info_buffer_len,
+ ROCKSDB_NAMESPACE::DeadlockPathJni::getJClass(env), nullptr);
+ if (jdeadlock_info_buffer == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ jsize jdeadlock_info_buffer_offset = 0;
+
+ auto buf_end = deadlock_info_buffer.end();
+ for (auto buf_it = deadlock_info_buffer.begin(); buf_it != buf_end;
+ ++buf_it) {
+ const ROCKSDB_NAMESPACE::DeadlockPath deadlock_path = *buf_it;
+ const std::vector<ROCKSDB_NAMESPACE::DeadlockInfo> deadlock_infos =
+ deadlock_path.path;
+ const jsize deadlock_infos_len =
+ static_cast<jsize>(deadlock_info_buffer.size());
+ jobjectArray jdeadlock_infos = env->NewObjectArray(
+ deadlock_infos_len, ROCKSDB_NAMESPACE::DeadlockInfoJni::getJClass(env),
+ nullptr);
+ if (jdeadlock_infos == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->DeleteLocalRef(jdeadlock_info_buffer);
+ return nullptr;
+ }
+ jsize jdeadlock_infos_offset = 0;
+
+ auto infos_end = deadlock_infos.end();
+ for (auto infos_it = deadlock_infos.begin(); infos_it != infos_end;
+ ++infos_it) {
+ const ROCKSDB_NAMESPACE::DeadlockInfo deadlock_info = *infos_it;
+ const jobject jdeadlock_info =
+ ROCKSDB_NAMESPACE::TransactionDBJni::newDeadlockInfo(
+ env, jobj, deadlock_info.m_txn_id, deadlock_info.m_cf_id,
+ deadlock_info.m_waiting_key, deadlock_info.m_exclusive);
+ if (jdeadlock_info == nullptr) {
+ // exception occcurred
+ env->DeleteLocalRef(jdeadlock_info_buffer);
+ return nullptr;
+ }
+ env->SetObjectArrayElement(jdeadlock_infos, jdeadlock_infos_offset++,
+ jdeadlock_info);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException or
+ // ArrayStoreException
+ env->DeleteLocalRef(jdeadlock_info);
+ env->DeleteLocalRef(jdeadlock_info_buffer);
+ return nullptr;
+ }
+ }
+
+ const jobject jdeadlock_path =
+ ROCKSDB_NAMESPACE::DeadlockPathJni::construct(
+ env, jdeadlock_infos, deadlock_path.limit_exceeded);
+ if (jdeadlock_path == nullptr) {
+ // exception occcurred
+ env->DeleteLocalRef(jdeadlock_info_buffer);
+ return nullptr;
+ }
+ env->SetObjectArrayElement(jdeadlock_info_buffer,
+ jdeadlock_info_buffer_offset++, jdeadlock_path);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException or ArrayStoreException
+ env->DeleteLocalRef(jdeadlock_path);
+ env->DeleteLocalRef(jdeadlock_info_buffer);
+ return nullptr;
+ }
+ }
+
+ return jdeadlock_info_buffer;
+}
+
+/*
+ * Class: org_rocksdb_TransactionDB
+ * Method: setDeadlockInfoBufferSize
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_TransactionDB_setDeadlockInfoBufferSize(
+ JNIEnv*, jobject, jlong jhandle, jint jdeadlock_info_buffer_size) {
+ auto* txn_db = reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDB*>(jhandle);
+ txn_db->SetDeadlockInfoBufferSize(jdeadlock_info_buffer_size);
+}
diff --git a/src/rocksdb/java/rocksjni/transaction_db_options.cc b/src/rocksdb/java/rocksjni/transaction_db_options.cc
new file mode 100644
index 000000000..c64ff9456
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/transaction_db_options.cc
@@ -0,0 +1,170 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++
+// for ROCKSDB_NAMESPACE::TransactionDBOptions.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_TransactionDBOptions.h"
+
+#include "rocksdb/utilities/transaction_db.h"
+
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_TransactionDBOptions
+ * Method: newTransactionDBOptions
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_TransactionDBOptions_newTransactionDBOptions(
+ JNIEnv* /*env*/, jclass /*jcls*/) {
+ ROCKSDB_NAMESPACE::TransactionDBOptions* opts =
+ new ROCKSDB_NAMESPACE::TransactionDBOptions();
+ return reinterpret_cast<jlong>(opts);
+}
+
+/*
+ * Class: org_rocksdb_TransactionDBOptions
+ * Method: getMaxNumLocks
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_TransactionDBOptions_getMaxNumLocks(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDBOptions*>(jhandle);
+ return opts->max_num_locks;
+}
+
+/*
+ * Class: org_rocksdb_TransactionDBOptions
+ * Method: setMaxNumLocks
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_TransactionDBOptions_setMaxNumLocks(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jlong jmax_num_locks) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDBOptions*>(jhandle);
+ opts->max_num_locks = jmax_num_locks;
+}
+
+/*
+ * Class: org_rocksdb_TransactionDBOptions
+ * Method: getNumStripes
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_TransactionDBOptions_getNumStripes(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDBOptions*>(jhandle);
+ return opts->num_stripes;
+}
+
+/*
+ * Class: org_rocksdb_TransactionDBOptions
+ * Method: setNumStripes
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_TransactionDBOptions_setNumStripes(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle,
+ jlong jnum_stripes) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDBOptions*>(jhandle);
+ opts->num_stripes = jnum_stripes;
+}
+
+/*
+ * Class: org_rocksdb_TransactionDBOptions
+ * Method: getTransactionLockTimeout
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_TransactionDBOptions_getTransactionLockTimeout(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDBOptions*>(jhandle);
+ return opts->transaction_lock_timeout;
+}
+
+/*
+ * Class: org_rocksdb_TransactionDBOptions
+ * Method: setTransactionLockTimeout
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_TransactionDBOptions_setTransactionLockTimeout(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ jlong jtransaction_lock_timeout) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDBOptions*>(jhandle);
+ opts->transaction_lock_timeout = jtransaction_lock_timeout;
+}
+
+/*
+ * Class: org_rocksdb_TransactionDBOptions
+ * Method: getDefaultLockTimeout
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_TransactionDBOptions_getDefaultLockTimeout(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDBOptions*>(jhandle);
+ return opts->default_lock_timeout;
+}
+
+/*
+ * Class: org_rocksdb_TransactionDBOptions
+ * Method: setDefaultLockTimeout
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_TransactionDBOptions_setDefaultLockTimeout(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ jlong jdefault_lock_timeout) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDBOptions*>(jhandle);
+ opts->default_lock_timeout = jdefault_lock_timeout;
+}
+
+/*
+ * Class: org_rocksdb_TransactionDBOptions
+ * Method: getWritePolicy
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_TransactionDBOptions_getWritePolicy(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDBOptions*>(jhandle);
+ return ROCKSDB_NAMESPACE::TxnDBWritePolicyJni::toJavaTxnDBWritePolicy(
+ opts->write_policy);
+}
+
+/*
+ * Class: org_rocksdb_TransactionDBOptions
+ * Method: setWritePolicy
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_TransactionDBOptions_setWritePolicy(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle,
+ jbyte jwrite_policy) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDBOptions*>(jhandle);
+ opts->write_policy =
+ ROCKSDB_NAMESPACE::TxnDBWritePolicyJni::toCppTxnDBWritePolicy(
+ jwrite_policy);
+}
+
+/*
+ * Class: org_rocksdb_TransactionDBOptions
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_TransactionDBOptions_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ delete reinterpret_cast<ROCKSDB_NAMESPACE::TransactionDBOptions*>(jhandle);
+}
diff --git a/src/rocksdb/java/rocksjni/transaction_log.cc b/src/rocksdb/java/rocksjni/transaction_log.cc
new file mode 100644
index 000000000..aa57211eb
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/transaction_log.cc
@@ -0,0 +1,79 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling c++ ROCKSDB_NAMESPACE::Iterator methods from Java side.
+
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "include/org_rocksdb_TransactionLogIterator.h"
+#include "rocksdb/transaction_log.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_TransactionLogIterator
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_TransactionLogIterator_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ delete reinterpret_cast<ROCKSDB_NAMESPACE::TransactionLogIterator*>(handle);
+}
+
+/*
+ * Class: org_rocksdb_TransactionLogIterator
+ * Method: isValid
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_TransactionLogIterator_isValid(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::TransactionLogIterator*>(handle)
+ ->Valid();
+}
+
+/*
+ * Class: org_rocksdb_TransactionLogIterator
+ * Method: next
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_TransactionLogIterator_next(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionLogIterator*>(handle)->Next();
+}
+
+/*
+ * Class: org_rocksdb_TransactionLogIterator
+ * Method: status
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_TransactionLogIterator_status(JNIEnv* env,
+ jobject /*jobj*/,
+ jlong handle) {
+ ROCKSDB_NAMESPACE::Status s =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionLogIterator*>(handle)
+ ->status();
+ if (!s.ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_TransactionLogIterator
+ * Method: getBatch
+ * Signature: (J)Lorg/rocksdb/TransactionLogIterator$BatchResult
+ */
+jobject Java_org_rocksdb_TransactionLogIterator_getBatch(JNIEnv* env,
+ jobject /*jobj*/,
+ jlong handle) {
+ ROCKSDB_NAMESPACE::BatchResult batch_result =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionLogIterator*>(handle)
+ ->GetBatch();
+ return ROCKSDB_NAMESPACE::BatchResultJni::construct(env, batch_result);
+}
diff --git a/src/rocksdb/java/rocksjni/transaction_notifier.cc b/src/rocksdb/java/rocksjni/transaction_notifier.cc
new file mode 100644
index 000000000..4e42af359
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/transaction_notifier.cc
@@ -0,0 +1,43 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++
+// for ROCKSDB_NAMESPACE::TransactionNotifier.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_AbstractTransactionNotifier.h"
+#include "rocksjni/transaction_notifier_jnicallback.h"
+
+/*
+ * Class: org_rocksdb_AbstractTransactionNotifier
+ * Method: createNewTransactionNotifier
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_AbstractTransactionNotifier_createNewTransactionNotifier(
+ JNIEnv* env, jobject jobj) {
+ auto* transaction_notifier =
+ new ROCKSDB_NAMESPACE::TransactionNotifierJniCallback(env, jobj);
+ auto* sptr_transaction_notifier =
+ new std::shared_ptr<ROCKSDB_NAMESPACE::TransactionNotifierJniCallback>(
+ transaction_notifier);
+ return reinterpret_cast<jlong>(sptr_transaction_notifier);
+}
+
+/*
+ * Class: org_rocksdb_AbstractTransactionNotifier
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_AbstractTransactionNotifier_disposeInternal(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ // TODO(AR) refactor to use JniCallback::JniCallback
+ // when https://github.com/facebook/rocksdb/pull/1241/ is merged
+ std::shared_ptr<ROCKSDB_NAMESPACE::TransactionNotifierJniCallback>* handle =
+ reinterpret_cast<
+ std::shared_ptr<ROCKSDB_NAMESPACE::TransactionNotifierJniCallback>*>(
+ jhandle);
+ delete handle;
+}
diff --git a/src/rocksdb/java/rocksjni/transaction_notifier_jnicallback.cc b/src/rocksdb/java/rocksjni/transaction_notifier_jnicallback.cc
new file mode 100644
index 000000000..06b62ad6d
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/transaction_notifier_jnicallback.cc
@@ -0,0 +1,39 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::TransactionNotifier.
+
+#include "rocksjni/transaction_notifier_jnicallback.h"
+#include "rocksjni/portal.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+TransactionNotifierJniCallback::TransactionNotifierJniCallback(JNIEnv* env,
+ jobject jtransaction_notifier) : JniCallback(env, jtransaction_notifier) {
+ // we cache the method id for the JNI callback
+ m_jsnapshot_created_methodID =
+ AbstractTransactionNotifierJni::getSnapshotCreatedMethodId(env);
+}
+
+void TransactionNotifierJniCallback::SnapshotCreated(
+ const Snapshot* newSnapshot) {
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* env = getJniEnv(&attached_thread);
+ assert(env != nullptr);
+
+ env->CallVoidMethod(m_jcallback_obj,
+ m_jsnapshot_created_methodID, reinterpret_cast<jlong>(newSnapshot));
+
+ if(env->ExceptionCheck()) {
+ // exception thrown from CallVoidMethod
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return;
+ }
+
+ releaseJniEnv(attached_thread);
+}
+} // namespace ROCKSDB_NAMESPACE
diff --git a/src/rocksdb/java/rocksjni/transaction_notifier_jnicallback.h b/src/rocksdb/java/rocksjni/transaction_notifier_jnicallback.h
new file mode 100644
index 000000000..b3155b5a3
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/transaction_notifier_jnicallback.h
@@ -0,0 +1,42 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::TransactionNotifier.
+
+#ifndef JAVA_ROCKSJNI_TRANSACTION_NOTIFIER_JNICALLBACK_H_
+#define JAVA_ROCKSJNI_TRANSACTION_NOTIFIER_JNICALLBACK_H_
+
+#include <jni.h>
+
+#include "rocksdb/utilities/transaction.h"
+#include "rocksjni/jnicallback.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+/**
+ * This class acts as a bridge between C++
+ * and Java. The methods in this class will be
+ * called back from the RocksDB TransactionDB or OptimisticTransactionDB (C++),
+ * we then callback to the appropriate Java method
+ * this enables TransactionNotifier to be implemented in Java.
+ *
+ * Unlike RocksJava's Comparator JNI Callback, we do not attempt
+ * to reduce Java object allocations by caching the Snapshot object
+ * presented to the callback. This could be revisited in future
+ * if performance is lacking.
+ */
+class TransactionNotifierJniCallback: public JniCallback,
+ public TransactionNotifier {
+ public:
+ TransactionNotifierJniCallback(JNIEnv* env, jobject jtransaction_notifier);
+ virtual void SnapshotCreated(const Snapshot* newSnapshot);
+
+ private:
+ jmethodID m_jsnapshot_created_methodID;
+};
+} // namespace ROCKSDB_NAMESPACE
+
+#endif // JAVA_ROCKSJNI_TRANSACTION_NOTIFIER_JNICALLBACK_H_
diff --git a/src/rocksdb/java/rocksjni/transaction_options.cc b/src/rocksdb/java/rocksjni/transaction_options.cc
new file mode 100644
index 000000000..0544f255e
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/transaction_options.cc
@@ -0,0 +1,191 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++
+// for ROCKSDB_NAMESPACE::TransactionOptions.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_TransactionOptions.h"
+
+#include "rocksdb/utilities/transaction_db.h"
+
+/*
+ * Class: org_rocksdb_TransactionOptions
+ * Method: newTransactionOptions
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_TransactionOptions_newTransactionOptions(
+ JNIEnv* /*env*/, jclass /*jcls*/) {
+ auto* opts = new ROCKSDB_NAMESPACE::TransactionOptions();
+ return reinterpret_cast<jlong>(opts);
+}
+
+/*
+ * Class: org_rocksdb_TransactionOptions
+ * Method: isSetSnapshot
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_TransactionOptions_isSetSnapshot(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionOptions*>(jhandle);
+ return opts->set_snapshot;
+}
+
+/*
+ * Class: org_rocksdb_TransactionOptions
+ * Method: setSetSnapshot
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_TransactionOptions_setSetSnapshot(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jboolean jset_snapshot) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionOptions*>(jhandle);
+ opts->set_snapshot = jset_snapshot;
+}
+
+/*
+ * Class: org_rocksdb_TransactionOptions
+ * Method: isDeadlockDetect
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_TransactionOptions_isDeadlockDetect(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionOptions*>(jhandle);
+ return opts->deadlock_detect;
+}
+
+/*
+ * Class: org_rocksdb_TransactionOptions
+ * Method: setDeadlockDetect
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_TransactionOptions_setDeadlockDetect(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ jboolean jdeadlock_detect) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionOptions*>(jhandle);
+ opts->deadlock_detect = jdeadlock_detect;
+}
+
+/*
+ * Class: org_rocksdb_TransactionOptions
+ * Method: getLockTimeout
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_TransactionOptions_getLockTimeout(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionOptions*>(jhandle);
+ return opts->lock_timeout;
+}
+
+/*
+ * Class: org_rocksdb_TransactionOptions
+ * Method: setLockTimeout
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_TransactionOptions_setLockTimeout(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle,
+ jlong jlock_timeout) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionOptions*>(jhandle);
+ opts->lock_timeout = jlock_timeout;
+}
+
+/*
+ * Class: org_rocksdb_TransactionOptions
+ * Method: getExpiration
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_TransactionOptions_getExpiration(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionOptions*>(jhandle);
+ return opts->expiration;
+}
+
+/*
+ * Class: org_rocksdb_TransactionOptions
+ * Method: setExpiration
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_TransactionOptions_setExpiration(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle,
+ jlong jexpiration) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionOptions*>(jhandle);
+ opts->expiration = jexpiration;
+}
+
+/*
+ * Class: org_rocksdb_TransactionOptions
+ * Method: getDeadlockDetectDepth
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_TransactionOptions_getDeadlockDetectDepth(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionOptions*>(jhandle);
+ return opts->deadlock_detect_depth;
+}
+
+/*
+ * Class: org_rocksdb_TransactionOptions
+ * Method: setDeadlockDetectDepth
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_TransactionOptions_setDeadlockDetectDepth(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ jlong jdeadlock_detect_depth) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionOptions*>(jhandle);
+ opts->deadlock_detect_depth = jdeadlock_detect_depth;
+}
+
+/*
+ * Class: org_rocksdb_TransactionOptions
+ * Method: getMaxWriteBatchSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_TransactionOptions_getMaxWriteBatchSize(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionOptions*>(jhandle);
+ return opts->max_write_batch_size;
+}
+
+/*
+ * Class: org_rocksdb_TransactionOptions
+ * Method: setMaxWriteBatchSize
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_TransactionOptions_setMaxWriteBatchSize(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ jlong jmax_write_batch_size) {
+ auto* opts =
+ reinterpret_cast<ROCKSDB_NAMESPACE::TransactionOptions*>(jhandle);
+ opts->max_write_batch_size = jmax_write_batch_size;
+}
+
+/*
+ * Class: org_rocksdb_TransactionOptions
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_TransactionOptions_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jhandle) {
+ delete reinterpret_cast<ROCKSDB_NAMESPACE::TransactionOptions*>(jhandle);
+}
diff --git a/src/rocksdb/java/rocksjni/ttl.cc b/src/rocksdb/java/rocksjni/ttl.cc
new file mode 100644
index 000000000..77d17c82a
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/ttl.cc
@@ -0,0 +1,207 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling c++ ROCKSDB_NAMESPACE::TtlDB methods.
+// from Java side.
+
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "include/org_rocksdb_TtlDB.h"
+#include "rocksdb/utilities/db_ttl.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_TtlDB
+ * Method: open
+ * Signature: (JLjava/lang/String;IZ)J
+ */
+jlong Java_org_rocksdb_TtlDB_open(
+ JNIEnv* env, jclass, jlong joptions_handle, jstring jdb_path, jint jttl,
+ jboolean jread_only) {
+ const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
+ if (db_path == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return 0;
+ }
+
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(joptions_handle);
+ ROCKSDB_NAMESPACE::DBWithTTL* db = nullptr;
+ ROCKSDB_NAMESPACE::Status s =
+ ROCKSDB_NAMESPACE::DBWithTTL::Open(*opt, db_path, &db, jttl, jread_only);
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+
+ // as TTLDB extends RocksDB on the java side, we can reuse
+ // the RocksDB portal here.
+ if (s.ok()) {
+ return reinterpret_cast<jlong>(db);
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return 0;
+ }
+}
+
+/*
+ * Class: org_rocksdb_TtlDB
+ * Method: openCF
+ * Signature: (JLjava/lang/String;[[B[J[IZ)[J
+ */
+jlongArray Java_org_rocksdb_TtlDB_openCF(
+ JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path,
+ jobjectArray jcolumn_names, jlongArray jcolumn_options,
+ jintArray jttls, jboolean jread_only) {
+ const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
+ if (db_path == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return 0;
+ }
+
+ const jsize len_cols = env->GetArrayLength(jcolumn_names);
+ jlong* jco = env->GetLongArrayElements(jcolumn_options, nullptr);
+ if (jco == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+ return nullptr;
+ }
+
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyDescriptor> column_families;
+ jboolean has_exception = JNI_FALSE;
+ ROCKSDB_NAMESPACE::JniUtil::byteStrings<std::string>(
+ env, jcolumn_names,
+ [](const char* str_data, const size_t str_len) {
+ return std::string(str_data, str_len);
+ },
+ [&jco, &column_families](size_t idx, std::string cf_name) {
+ ROCKSDB_NAMESPACE::ColumnFamilyOptions* cf_options =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(jco[idx]);
+ column_families.push_back(
+ ROCKSDB_NAMESPACE::ColumnFamilyDescriptor(cf_name, *cf_options));
+ },
+ &has_exception);
+
+ env->ReleaseLongArrayElements(jcolumn_options, jco, JNI_ABORT);
+
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+ return nullptr;
+ }
+
+ std::vector<int32_t> ttl_values;
+ jint* jttlv = env->GetIntArrayElements(jttls, nullptr);
+ if (jttlv == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+ return nullptr;
+ }
+ const jsize len_ttls = env->GetArrayLength(jttls);
+ for (jsize i = 0; i < len_ttls; i++) {
+ ttl_values.push_back(jttlv[i]);
+ }
+ env->ReleaseIntArrayElements(jttls, jttlv, JNI_ABORT);
+
+ auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jopt_handle);
+ std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*> handles;
+ ROCKSDB_NAMESPACE::DBWithTTL* db = nullptr;
+ ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::DBWithTTL::Open(
+ *opt, db_path, column_families, &handles, &db, ttl_values, jread_only);
+
+ // we have now finished with db_path
+ env->ReleaseStringUTFChars(jdb_path, db_path);
+
+ // check if open operation was successful
+ if (s.ok()) {
+ const jsize resultsLen = 1 + len_cols; // db handle + column family handles
+ std::unique_ptr<jlong[]> results =
+ std::unique_ptr<jlong[]>(new jlong[resultsLen]);
+ results[0] = reinterpret_cast<jlong>(db);
+ for (int i = 1; i <= len_cols; i++) {
+ results[i] = reinterpret_cast<jlong>(handles[i - 1]);
+ }
+
+ jlongArray jresults = env->NewLongArray(resultsLen);
+ if (jresults == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ env->SetLongArrayRegion(jresults, 0, resultsLen, results.get());
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jresults);
+ return nullptr;
+ }
+
+ return jresults;
+ } else {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return NULL;
+ }
+}
+
+/*
+ * Class: org_rocksdb_TtlDB
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_TtlDB_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* ttl_db = reinterpret_cast<ROCKSDB_NAMESPACE::DBWithTTL*>(jhandle);
+ assert(ttl_db != nullptr);
+ delete ttl_db;
+}
+
+/*
+ * Class: org_rocksdb_TtlDB
+ * Method: closeDatabase
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_TtlDB_closeDatabase(
+ JNIEnv* /* env */, jclass, jlong /* jhandle */) {
+ // auto* ttl_db = reinterpret_cast<ROCKSDB_NAMESPACE::DBWithTTL*>(jhandle);
+ // assert(ttl_db != nullptr);
+ // ROCKSDB_NAMESPACE::Status s = ttl_db->Close();
+ // ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+
+ //TODO(AR) this is disabled until https://github.com/facebook/rocksdb/issues/4818 is resolved!
+}
+
+/*
+ * Class: org_rocksdb_TtlDB
+ * Method: createColumnFamilyWithTtl
+ * Signature: (JLorg/rocksdb/ColumnFamilyDescriptor;[BJI)J;
+ */
+jlong Java_org_rocksdb_TtlDB_createColumnFamilyWithTtl(
+ JNIEnv* env, jobject, jlong jdb_handle, jbyteArray jcolumn_name,
+ jlong jcolumn_options, jint jttl) {
+ jbyte* cfname = env->GetByteArrayElements(jcolumn_name, nullptr);
+ if (cfname == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return 0;
+ }
+ const jsize len = env->GetArrayLength(jcolumn_name);
+
+ auto* cfOptions = reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyOptions*>(
+ jcolumn_options);
+
+ auto* db_handle = reinterpret_cast<ROCKSDB_NAMESPACE::DBWithTTL*>(jdb_handle);
+ ROCKSDB_NAMESPACE::ColumnFamilyHandle* handle;
+ ROCKSDB_NAMESPACE::Status s = db_handle->CreateColumnFamilyWithTtl(
+ *cfOptions, std::string(reinterpret_cast<char*>(cfname), len), &handle,
+ jttl);
+
+ env->ReleaseByteArrayElements(jcolumn_name, cfname, 0);
+
+ if (s.ok()) {
+ return reinterpret_cast<jlong>(handle);
+ }
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+ return 0;
+}
diff --git a/src/rocksdb/java/rocksjni/wal_filter.cc b/src/rocksdb/java/rocksjni/wal_filter.cc
new file mode 100644
index 000000000..e4040b1f8
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/wal_filter.cc
@@ -0,0 +1,23 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::WalFilter.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_AbstractWalFilter.h"
+#include "rocksjni/wal_filter_jnicallback.h"
+
+/*
+ * Class: org_rocksdb_AbstractWalFilter
+ * Method: createNewWalFilter
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_AbstractWalFilter_createNewWalFilter(
+ JNIEnv* env, jobject jobj) {
+ auto* wal_filter = new ROCKSDB_NAMESPACE::WalFilterJniCallback(env, jobj);
+ return reinterpret_cast<jlong>(wal_filter);
+}
diff --git a/src/rocksdb/java/rocksjni/wal_filter_jnicallback.cc b/src/rocksdb/java/rocksjni/wal_filter_jnicallback.cc
new file mode 100644
index 000000000..48fe2d505
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/wal_filter_jnicallback.cc
@@ -0,0 +1,144 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::WalFilter.
+
+#include "rocksjni/wal_filter_jnicallback.h"
+#include "rocksjni/portal.h"
+
+namespace ROCKSDB_NAMESPACE {
+WalFilterJniCallback::WalFilterJniCallback(
+ JNIEnv* env, jobject jwal_filter)
+ : JniCallback(env, jwal_filter) {
+ // Note: The name of a WalFilter will not change during it's lifetime,
+ // so we cache it in a global var
+ jmethodID jname_mid = AbstractWalFilterJni::getNameMethodId(env);
+ if(jname_mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+ jstring jname = (jstring)env->CallObjectMethod(m_jcallback_obj, jname_mid);
+ if(env->ExceptionCheck()) {
+ // exception thrown
+ return;
+ }
+ jboolean has_exception = JNI_FALSE;
+ m_name = JniUtil::copyString(env, jname,
+ &has_exception); // also releases jname
+ if (has_exception == JNI_TRUE) {
+ // exception thrown
+ return;
+ }
+
+ m_column_family_log_number_map_mid =
+ AbstractWalFilterJni::getColumnFamilyLogNumberMapMethodId(env);
+ if(m_column_family_log_number_map_mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+
+ m_log_record_found_proxy_mid =
+ AbstractWalFilterJni::getLogRecordFoundProxyMethodId(env);
+ if(m_log_record_found_proxy_mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+}
+
+void WalFilterJniCallback::ColumnFamilyLogNumberMap(
+ const std::map<uint32_t, uint64_t>& cf_lognumber_map,
+ const std::map<std::string, uint32_t>& cf_name_id_map) {
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* env = getJniEnv(&attached_thread);
+ if (env == nullptr) {
+ return;
+ }
+
+ jobject jcf_lognumber_map =
+ ROCKSDB_NAMESPACE::HashMapJni::fromCppMap(env, &cf_lognumber_map);
+ if (jcf_lognumber_map == nullptr) {
+ // exception occurred
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return;
+ }
+
+ jobject jcf_name_id_map =
+ ROCKSDB_NAMESPACE::HashMapJni::fromCppMap(env, &cf_name_id_map);
+ if (jcf_name_id_map == nullptr) {
+ // exception occurred
+ env->ExceptionDescribe(); // print out exception to stderr
+ env->DeleteLocalRef(jcf_lognumber_map);
+ releaseJniEnv(attached_thread);
+ return;
+ }
+
+ env->CallVoidMethod(m_jcallback_obj,
+ m_column_family_log_number_map_mid,
+ jcf_lognumber_map,
+ jcf_name_id_map);
+
+ env->DeleteLocalRef(jcf_lognumber_map);
+ env->DeleteLocalRef(jcf_name_id_map);
+
+ if(env->ExceptionCheck()) {
+ // exception thrown from CallVoidMethod
+ env->ExceptionDescribe(); // print out exception to stderr
+ }
+
+ releaseJniEnv(attached_thread);
+}
+
+ WalFilter::WalProcessingOption WalFilterJniCallback::LogRecordFound(
+ unsigned long long log_number, const std::string& log_file_name,
+ const WriteBatch& batch, WriteBatch* new_batch, bool* batch_changed) {
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* env = getJniEnv(&attached_thread);
+ if (env == nullptr) {
+ return WalFilter::WalProcessingOption::kCorruptedRecord;
+ }
+
+ jstring jlog_file_name = JniUtil::toJavaString(env, &log_file_name);
+ if (jlog_file_name == nullptr) {
+ // exception occcurred
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return WalFilter::WalProcessingOption::kCorruptedRecord;
+ }
+
+ jshort jlog_record_found_result = env->CallShortMethod(m_jcallback_obj,
+ m_log_record_found_proxy_mid,
+ static_cast<jlong>(log_number),
+ jlog_file_name,
+ reinterpret_cast<jlong>(&batch),
+ reinterpret_cast<jlong>(new_batch));
+
+ env->DeleteLocalRef(jlog_file_name);
+
+ if (env->ExceptionCheck()) {
+ // exception thrown from CallShortMethod
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return WalFilter::WalProcessingOption::kCorruptedRecord;
+ }
+
+ // unpack WalProcessingOption and batch_changed from jlog_record_found_result
+ jbyte jwal_processing_option_value = (jlog_record_found_result >> 8) & 0xFF;
+ jbyte jbatch_changed_value = jlog_record_found_result & 0xFF;
+
+ releaseJniEnv(attached_thread);
+
+ *batch_changed = jbatch_changed_value == JNI_TRUE;
+
+ return WalProcessingOptionJni::toCppWalProcessingOption(
+ jwal_processing_option_value);
+}
+
+const char* WalFilterJniCallback::Name() const {
+ return m_name.get();
+}
+
+} // namespace ROCKSDB_NAMESPACE
diff --git a/src/rocksdb/java/rocksjni/wal_filter_jnicallback.h b/src/rocksdb/java/rocksjni/wal_filter_jnicallback.h
new file mode 100644
index 000000000..b575ddc62
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/wal_filter_jnicallback.h
@@ -0,0 +1,42 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::WalFilter.
+
+#ifndef JAVA_ROCKSJNI_WAL_FILTER_JNICALLBACK_H_
+#define JAVA_ROCKSJNI_WAL_FILTER_JNICALLBACK_H_
+
+#include <jni.h>
+#include <map>
+#include <memory>
+#include <string>
+
+#include "rocksdb/wal_filter.h"
+#include "rocksjni/jnicallback.h"
+
+namespace ROCKSDB_NAMESPACE {
+
+class WalFilterJniCallback : public JniCallback, public WalFilter {
+ public:
+ WalFilterJniCallback(
+ JNIEnv* env, jobject jwal_filter);
+ virtual void ColumnFamilyLogNumberMap(
+ const std::map<uint32_t, uint64_t>& cf_lognumber_map,
+ const std::map<std::string, uint32_t>& cf_name_id_map);
+ virtual WalFilter::WalProcessingOption LogRecordFound(
+ unsigned long long log_number, const std::string& log_file_name,
+ const WriteBatch& batch, WriteBatch* new_batch, bool* batch_changed);
+ virtual const char* Name() const;
+
+ private:
+ std::unique_ptr<const char[]> m_name;
+ jmethodID m_column_family_log_number_map_mid;
+ jmethodID m_log_record_found_proxy_mid;
+};
+
+} // namespace ROCKSDB_NAMESPACE
+
+#endif // JAVA_ROCKSJNI_WAL_FILTER_JNICALLBACK_H_
diff --git a/src/rocksdb/java/rocksjni/write_batch.cc b/src/rocksdb/java/rocksjni/write_batch.cc
new file mode 100644
index 000000000..4ef8035c4
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/write_batch.cc
@@ -0,0 +1,674 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling c++ ROCKSDB_NAMESPACE::WriteBatch methods from Java side.
+#include <memory>
+
+#include "db/memtable.h"
+#include "db/write_batch_internal.h"
+#include "include/org_rocksdb_WriteBatch.h"
+#include "include/org_rocksdb_WriteBatch_Handler.h"
+#include "logging/logging.h"
+#include "rocksdb/db.h"
+#include "rocksdb/env.h"
+#include "rocksdb/memtablerep.h"
+#include "rocksdb/status.h"
+#include "rocksdb/write_batch.h"
+#include "rocksdb/write_buffer_manager.h"
+#include "rocksjni/portal.h"
+#include "rocksjni/writebatchhandlerjnicallback.h"
+#include "table/scoped_arena_iterator.h"
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: newWriteBatch
+ * Signature: (I)J
+ */
+jlong Java_org_rocksdb_WriteBatch_newWriteBatch__I(JNIEnv* /*env*/,
+ jclass /*jcls*/,
+ jint jreserved_bytes) {
+ auto* wb =
+ new ROCKSDB_NAMESPACE::WriteBatch(static_cast<size_t>(jreserved_bytes));
+ return reinterpret_cast<jlong>(wb);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: newWriteBatch
+ * Signature: ([BI)J
+ */
+jlong Java_org_rocksdb_WriteBatch_newWriteBatch___3BI(JNIEnv* env,
+ jclass /*jcls*/,
+ jbyteArray jserialized,
+ jint jserialized_length) {
+ jboolean has_exception = JNI_FALSE;
+ std::string serialized = ROCKSDB_NAMESPACE::JniUtil::byteString<std::string>(
+ env, jserialized, jserialized_length,
+ [](const char* str, const size_t len) { return std::string(str, len); },
+ &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return 0;
+ }
+
+ auto* wb = new ROCKSDB_NAMESPACE::WriteBatch(serialized);
+ return reinterpret_cast<jlong>(wb);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: count0
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_WriteBatch_count0(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong jwb_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ return static_cast<jint>(wb->Count());
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: clear0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_WriteBatch_clear0(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong jwb_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ wb->Clear();
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: setSavePoint0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_WriteBatch_setSavePoint0(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jwb_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ wb->SetSavePoint();
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: rollbackToSavePoint0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_WriteBatch_rollbackToSavePoint0(JNIEnv* env,
+ jobject /*jobj*/,
+ jlong jwb_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ auto s = wb->RollbackToSavePoint();
+
+ if (s.ok()) {
+ return;
+ }
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: popSavePoint
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_WriteBatch_popSavePoint(JNIEnv* env, jobject /*jobj*/,
+ jlong jwb_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ auto s = wb->PopSavePoint();
+
+ if (s.ok()) {
+ return;
+ }
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: setMaxBytes
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_WriteBatch_setMaxBytes(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong jwb_handle,
+ jlong jmax_bytes) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ wb->SetMaxBytes(static_cast<size_t>(jmax_bytes));
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: put
+ * Signature: (J[BI[BI)V
+ */
+void Java_org_rocksdb_WriteBatch_put__J_3BI_3BI(JNIEnv* env, jobject jobj,
+ jlong jwb_handle,
+ jbyteArray jkey, jint jkey_len,
+ jbyteArray jentry_value,
+ jint jentry_value_len) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+ auto put = [&wb](ROCKSDB_NAMESPACE::Slice key,
+ ROCKSDB_NAMESPACE::Slice value) {
+ return wb->Put(key, value);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::kv_op(put, env, jobj, jkey, jkey_len,
+ jentry_value, jentry_value_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: put
+ * Signature: (J[BI[BIJ)V
+ */
+void Java_org_rocksdb_WriteBatch_put__J_3BI_3BIJ(
+ JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jkey, jint jkey_len,
+ jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ assert(cf_handle != nullptr);
+ auto put = [&wb, &cf_handle](ROCKSDB_NAMESPACE::Slice key,
+ ROCKSDB_NAMESPACE::Slice value) {
+ return wb->Put(cf_handle, key, value);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::kv_op(put, env, jobj, jkey, jkey_len,
+ jentry_value, jentry_value_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: putDirect
+ * Signature: (JLjava/nio/ByteBuffer;IILjava/nio/ByteBuffer;IIJ)V
+ */
+void Java_org_rocksdb_WriteBatch_putDirect(JNIEnv* env, jobject /*jobj*/,
+ jlong jwb_handle, jobject jkey,
+ jint jkey_offset, jint jkey_len,
+ jobject jval, jint jval_offset,
+ jint jval_len, jlong jcf_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ auto put = [&wb, &cf_handle](ROCKSDB_NAMESPACE::Slice& key,
+ ROCKSDB_NAMESPACE::Slice& value) {
+ if (cf_handle == nullptr) {
+ wb->Put(key, value);
+ } else {
+ wb->Put(cf_handle, key, value);
+ }
+ };
+ ROCKSDB_NAMESPACE::JniUtil::kv_op_direct(
+ put, env, jkey, jkey_offset, jkey_len, jval, jval_offset, jval_len);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: merge
+ * Signature: (J[BI[BI)V
+ */
+void Java_org_rocksdb_WriteBatch_merge__J_3BI_3BI(
+ JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jkey, jint jkey_len,
+ jbyteArray jentry_value, jint jentry_value_len) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+ auto merge = [&wb](ROCKSDB_NAMESPACE::Slice key,
+ ROCKSDB_NAMESPACE::Slice value) {
+ return wb->Merge(key, value);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::kv_op(merge, env, jobj, jkey, jkey_len,
+ jentry_value, jentry_value_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: merge
+ * Signature: (J[BI[BIJ)V
+ */
+void Java_org_rocksdb_WriteBatch_merge__J_3BI_3BIJ(
+ JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jkey, jint jkey_len,
+ jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ assert(cf_handle != nullptr);
+ auto merge = [&wb, &cf_handle](ROCKSDB_NAMESPACE::Slice key,
+ ROCKSDB_NAMESPACE::Slice value) {
+ return wb->Merge(cf_handle, key, value);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::kv_op(merge, env, jobj, jkey, jkey_len,
+ jentry_value, jentry_value_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: delete
+ * Signature: (J[BI)V
+ */
+void Java_org_rocksdb_WriteBatch_delete__J_3BI(JNIEnv* env, jobject jobj,
+ jlong jwb_handle,
+ jbyteArray jkey, jint jkey_len) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+ auto remove = [&wb](ROCKSDB_NAMESPACE::Slice key) { return wb->Delete(key); };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::k_op(remove, env, jobj, jkey, jkey_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: delete
+ * Signature: (J[BIJ)V
+ */
+void Java_org_rocksdb_WriteBatch_delete__J_3BIJ(JNIEnv* env, jobject jobj,
+ jlong jwb_handle,
+ jbyteArray jkey, jint jkey_len,
+ jlong jcf_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ assert(cf_handle != nullptr);
+ auto remove = [&wb, &cf_handle](ROCKSDB_NAMESPACE::Slice key) {
+ return wb->Delete(cf_handle, key);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::k_op(remove, env, jobj, jkey, jkey_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: singleDelete
+ * Signature: (J[BI)V
+ */
+void Java_org_rocksdb_WriteBatch_singleDelete__J_3BI(JNIEnv* env, jobject jobj,
+ jlong jwb_handle,
+ jbyteArray jkey,
+ jint jkey_len) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+ auto single_delete = [&wb](ROCKSDB_NAMESPACE::Slice key) {
+ return wb->SingleDelete(key);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::k_op(single_delete, env, jobj, jkey,
+ jkey_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: singleDelete
+ * Signature: (J[BIJ)V
+ */
+void Java_org_rocksdb_WriteBatch_singleDelete__J_3BIJ(JNIEnv* env, jobject jobj,
+ jlong jwb_handle,
+ jbyteArray jkey,
+ jint jkey_len,
+ jlong jcf_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ assert(cf_handle != nullptr);
+ auto single_delete = [&wb, &cf_handle](ROCKSDB_NAMESPACE::Slice key) {
+ return wb->SingleDelete(cf_handle, key);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::k_op(single_delete, env, jobj, jkey,
+ jkey_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: removeDirect
+ * Signature: (JLjava/nio/ByteBuffer;IIJ)V
+ */
+void Java_org_rocksdb_WriteBatch_removeDirect(JNIEnv* env, jobject /*jobj*/,
+ jlong jwb_handle, jobject jkey,
+ jint jkey_offset, jint jkey_len,
+ jlong jcf_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ auto remove = [&wb, &cf_handle](ROCKSDB_NAMESPACE::Slice& key) {
+ if (cf_handle == nullptr) {
+ wb->Delete(key);
+ } else {
+ wb->Delete(cf_handle, key);
+ }
+ };
+ ROCKSDB_NAMESPACE::JniUtil::k_op_direct(remove, env, jkey, jkey_offset,
+ jkey_len);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: deleteRange
+ * Signature: (J[BI[BI)V
+ */
+void Java_org_rocksdb_WriteBatch_deleteRange__J_3BI_3BI(
+ JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jbegin_key,
+ jint jbegin_key_len, jbyteArray jend_key, jint jend_key_len) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+ auto deleteRange = [&wb](ROCKSDB_NAMESPACE::Slice beginKey,
+ ROCKSDB_NAMESPACE::Slice endKey) {
+ return wb->DeleteRange(beginKey, endKey);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::kv_op(deleteRange, env, jobj, jbegin_key,
+ jbegin_key_len, jend_key, jend_key_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: deleteRange
+ * Signature: (J[BI[BIJ)V
+ */
+void Java_org_rocksdb_WriteBatch_deleteRange__J_3BI_3BIJ(
+ JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jbegin_key,
+ jint jbegin_key_len, jbyteArray jend_key, jint jend_key_len,
+ jlong jcf_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ assert(cf_handle != nullptr);
+ auto deleteRange = [&wb, &cf_handle](ROCKSDB_NAMESPACE::Slice beginKey,
+ ROCKSDB_NAMESPACE::Slice endKey) {
+ return wb->DeleteRange(cf_handle, beginKey, endKey);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::kv_op(deleteRange, env, jobj, jbegin_key,
+ jbegin_key_len, jend_key, jend_key_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: putLogData
+ * Signature: (J[BI)V
+ */
+void Java_org_rocksdb_WriteBatch_putLogData(JNIEnv* env, jobject jobj,
+ jlong jwb_handle, jbyteArray jblob,
+ jint jblob_len) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+ auto putLogData = [&wb](ROCKSDB_NAMESPACE::Slice blob) {
+ return wb->PutLogData(blob);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::k_op(putLogData, env, jobj, jblob, jblob_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: iterate
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_WriteBatch_iterate(JNIEnv* env, jobject /*jobj*/,
+ jlong jwb_handle,
+ jlong handlerHandle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ ROCKSDB_NAMESPACE::Status s = wb->Iterate(
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchHandlerJniCallback*>(
+ handlerHandle));
+
+ if (s.ok()) {
+ return;
+ }
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: data
+ * Signature: (J)[B
+ */
+jbyteArray Java_org_rocksdb_WriteBatch_data(JNIEnv* env, jobject /*jobj*/,
+ jlong jwb_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ auto data = wb->Data();
+ return ROCKSDB_NAMESPACE::JniUtil::copyBytes(env, data);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: getDataSize
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_WriteBatch_getDataSize(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong jwb_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ auto data_size = wb->GetDataSize();
+ return static_cast<jlong>(data_size);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: hasPut
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_WriteBatch_hasPut(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong jwb_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ return wb->HasPut();
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: hasDelete
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_WriteBatch_hasDelete(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jwb_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ return wb->HasDelete();
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: hasSingleDelete
+ * Signature: (J)Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_rocksdb_WriteBatch_hasSingleDelete(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jwb_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ return wb->HasSingleDelete();
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: hasDeleteRange
+ * Signature: (J)Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_rocksdb_WriteBatch_hasDeleteRange(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jwb_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ return wb->HasDeleteRange();
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: hasMerge
+ * Signature: (J)Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_rocksdb_WriteBatch_hasMerge(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jwb_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ return wb->HasMerge();
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: hasBeginPrepare
+ * Signature: (J)Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_rocksdb_WriteBatch_hasBeginPrepare(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jwb_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ return wb->HasBeginPrepare();
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: hasEndPrepare
+ * Signature: (J)Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_rocksdb_WriteBatch_hasEndPrepare(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jwb_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ return wb->HasEndPrepare();
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: hasCommit
+ * Signature: (J)Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_rocksdb_WriteBatch_hasCommit(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jwb_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ return wb->HasCommit();
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: hasRollback
+ * Signature: (J)Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_rocksdb_WriteBatch_hasRollback(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jwb_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ return wb->HasRollback();
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: markWalTerminationPoint
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_WriteBatch_markWalTerminationPoint(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jwb_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ wb->MarkWalTerminationPoint();
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: getWalTerminationPoint
+ * Signature: (J)Lorg/rocksdb/WriteBatch/SavePoint;
+ */
+jobject Java_org_rocksdb_WriteBatch_getWalTerminationPoint(JNIEnv* env,
+ jobject /*jobj*/,
+ jlong jwb_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ auto save_point = wb->GetWalTerminationPoint();
+ return ROCKSDB_NAMESPACE::WriteBatchSavePointJni::construct(env, save_point);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_WriteBatch_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(handle);
+ assert(wb != nullptr);
+ delete wb;
+}
+
+/*
+ * Class: org_rocksdb_WriteBatch_Handler
+ * Method: createNewHandler0
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_WriteBatch_00024Handler_createNewHandler0(JNIEnv* env,
+ jobject jobj) {
+ auto* wbjnic = new ROCKSDB_NAMESPACE::WriteBatchHandlerJniCallback(env, jobj);
+ return reinterpret_cast<jlong>(wbjnic);
+}
diff --git a/src/rocksdb/java/rocksjni/write_batch_test.cc b/src/rocksdb/java/rocksjni/write_batch_test.cc
new file mode 100644
index 000000000..114eac42c
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/write_batch_test.cc
@@ -0,0 +1,198 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling c++ ROCKSDB_NAMESPACE::WriteBatch methods testing from Java side.
+#include <memory>
+
+#include "db/memtable.h"
+#include "db/write_batch_internal.h"
+#include "include/org_rocksdb_WriteBatch.h"
+#include "include/org_rocksdb_WriteBatchTest.h"
+#include "include/org_rocksdb_WriteBatchTestInternalHelper.h"
+#include "include/org_rocksdb_WriteBatch_Handler.h"
+#include "options/cf_options.h"
+#include "rocksdb/db.h"
+#include "rocksdb/env.h"
+#include "rocksdb/memtablerep.h"
+#include "rocksdb/status.h"
+#include "rocksdb/write_batch.h"
+#include "rocksdb/write_buffer_manager.h"
+#include "rocksjni/portal.h"
+#include "table/scoped_arena_iterator.h"
+#include "test_util/testharness.h"
+#include "util/string_util.h"
+
+/*
+ * Class: org_rocksdb_WriteBatchTest
+ * Method: getContents
+ * Signature: (J)[B
+ */
+jbyteArray Java_org_rocksdb_WriteBatchTest_getContents(JNIEnv* env,
+ jclass /*jclazz*/,
+ jlong jwb_handle) {
+ auto* b = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(b != nullptr);
+
+ // todo: Currently the following code is directly copied from
+ // db/write_bench_test.cc. It could be implemented in java once
+ // all the necessary components can be accessed via jni api.
+
+ ROCKSDB_NAMESPACE::InternalKeyComparator cmp(
+ ROCKSDB_NAMESPACE::BytewiseComparator());
+ auto factory = std::make_shared<ROCKSDB_NAMESPACE::SkipListFactory>();
+ ROCKSDB_NAMESPACE::Options options;
+ ROCKSDB_NAMESPACE::WriteBufferManager wb(options.db_write_buffer_size);
+ options.memtable_factory = factory;
+ ROCKSDB_NAMESPACE::MemTable* mem = new ROCKSDB_NAMESPACE::MemTable(
+ cmp, ROCKSDB_NAMESPACE::ImmutableCFOptions(options),
+ ROCKSDB_NAMESPACE::MutableCFOptions(options), &wb,
+ ROCKSDB_NAMESPACE::kMaxSequenceNumber, 0 /* column_family_id */);
+ mem->Ref();
+ std::string state;
+ ROCKSDB_NAMESPACE::ColumnFamilyMemTablesDefault cf_mems_default(mem);
+ ROCKSDB_NAMESPACE::Status s =
+ ROCKSDB_NAMESPACE::WriteBatchInternal::InsertInto(b, &cf_mems_default,
+ nullptr, nullptr);
+ unsigned int count = 0;
+ ROCKSDB_NAMESPACE::Arena arena;
+ ROCKSDB_NAMESPACE::ScopedArenaIterator iter(
+ mem->NewIterator(ROCKSDB_NAMESPACE::ReadOptions(), &arena));
+ for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
+ ROCKSDB_NAMESPACE::ParsedInternalKey ikey;
+ ikey.clear();
+ bool parsed = ROCKSDB_NAMESPACE::ParseInternalKey(iter->key(), &ikey);
+ if (!parsed) {
+ assert(parsed);
+ }
+ switch (ikey.type) {
+ case ROCKSDB_NAMESPACE::kTypeValue:
+ state.append("Put(");
+ state.append(ikey.user_key.ToString());
+ state.append(", ");
+ state.append(iter->value().ToString());
+ state.append(")");
+ count++;
+ break;
+ case ROCKSDB_NAMESPACE::kTypeMerge:
+ state.append("Merge(");
+ state.append(ikey.user_key.ToString());
+ state.append(", ");
+ state.append(iter->value().ToString());
+ state.append(")");
+ count++;
+ break;
+ case ROCKSDB_NAMESPACE::kTypeDeletion:
+ state.append("Delete(");
+ state.append(ikey.user_key.ToString());
+ state.append(")");
+ count++;
+ break;
+ case ROCKSDB_NAMESPACE::kTypeSingleDeletion:
+ state.append("SingleDelete(");
+ state.append(ikey.user_key.ToString());
+ state.append(")");
+ count++;
+ break;
+ case ROCKSDB_NAMESPACE::kTypeRangeDeletion:
+ state.append("DeleteRange(");
+ state.append(ikey.user_key.ToString());
+ state.append(", ");
+ state.append(iter->value().ToString());
+ state.append(")");
+ count++;
+ break;
+ case ROCKSDB_NAMESPACE::kTypeLogData:
+ state.append("LogData(");
+ state.append(ikey.user_key.ToString());
+ state.append(")");
+ count++;
+ break;
+ default:
+ assert(false);
+ state.append("Err:Expected(");
+ state.append(std::to_string(ikey.type));
+ state.append(")");
+ count++;
+ break;
+ }
+ state.append("@");
+ state.append(ROCKSDB_NAMESPACE::NumberToString(ikey.sequence));
+ }
+ if (!s.ok()) {
+ state.append(s.ToString());
+ } else if (ROCKSDB_NAMESPACE::WriteBatchInternal::Count(b) != count) {
+ state.append("Err:CountMismatch(expected=");
+ state.append(
+ std::to_string(ROCKSDB_NAMESPACE::WriteBatchInternal::Count(b)));
+ state.append(", actual=");
+ state.append(std::to_string(count));
+ state.append(")");
+ }
+ delete mem->Unref();
+
+ jbyteArray jstate = env->NewByteArray(static_cast<jsize>(state.size()));
+ if (jstate == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ env->SetByteArrayRegion(
+ jstate, 0, static_cast<jsize>(state.size()),
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(state.c_str())));
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jstate);
+ return nullptr;
+ }
+
+ return jstate;
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchTestInternalHelper
+ * Method: setSequence
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_WriteBatchTestInternalHelper_setSequence(
+ JNIEnv* /*env*/, jclass /*jclazz*/, jlong jwb_handle, jlong jsn) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ ROCKSDB_NAMESPACE::WriteBatchInternal::SetSequence(
+ wb, static_cast<ROCKSDB_NAMESPACE::SequenceNumber>(jsn));
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchTestInternalHelper
+ * Method: sequence
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_WriteBatchTestInternalHelper_sequence(JNIEnv* /*env*/,
+ jclass /*jclazz*/,
+ jlong jwb_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+
+ return static_cast<jlong>(
+ ROCKSDB_NAMESPACE::WriteBatchInternal::Sequence(wb));
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchTestInternalHelper
+ * Method: append
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_WriteBatchTestInternalHelper_append(JNIEnv* /*env*/,
+ jclass /*jclazz*/,
+ jlong jwb_handle_1,
+ jlong jwb_handle_2) {
+ auto* wb1 = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle_1);
+ assert(wb1 != nullptr);
+ auto* wb2 = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle_2);
+ assert(wb2 != nullptr);
+
+ ROCKSDB_NAMESPACE::WriteBatchInternal::Append(wb1, wb2);
+}
diff --git a/src/rocksdb/java/rocksjni/write_batch_with_index.cc b/src/rocksdb/java/rocksjni/write_batch_with_index.cc
new file mode 100644
index 000000000..da8264525
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/write_batch_with_index.cc
@@ -0,0 +1,862 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling c++ ROCKSDB_NAMESPACE::WriteBatchWithIndex methods from Java side.
+
+#include "rocksdb/utilities/write_batch_with_index.h"
+#include "include/org_rocksdb_WBWIRocksIterator.h"
+#include "include/org_rocksdb_WriteBatchWithIndex.h"
+#include "rocksdb/comparator.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: newWriteBatchWithIndex
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__(
+ JNIEnv* /*env*/, jclass /*jcls*/) {
+ auto* wbwi = new ROCKSDB_NAMESPACE::WriteBatchWithIndex();
+ return reinterpret_cast<jlong>(wbwi);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: newWriteBatchWithIndex
+ * Signature: (Z)J
+ */
+jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__Z(
+ JNIEnv* /*env*/, jclass /*jcls*/, jboolean joverwrite_key) {
+ auto* wbwi = new ROCKSDB_NAMESPACE::WriteBatchWithIndex(
+ ROCKSDB_NAMESPACE::BytewiseComparator(), 0,
+ static_cast<bool>(joverwrite_key));
+ return reinterpret_cast<jlong>(wbwi);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: newWriteBatchWithIndex
+ * Signature: (JBIZ)J
+ */
+jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__JBIZ(
+ JNIEnv* /*env*/, jclass /*jcls*/, jlong jfallback_index_comparator_handle,
+ jbyte jcomparator_type, jint jreserved_bytes, jboolean joverwrite_key) {
+ ROCKSDB_NAMESPACE::Comparator* fallback_comparator = nullptr;
+ switch (jcomparator_type) {
+ // JAVA_COMPARATOR
+ case 0x0:
+ fallback_comparator =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ComparatorJniCallback*>(
+ jfallback_index_comparator_handle);
+ break;
+
+ // JAVA_NATIVE_COMPARATOR_WRAPPER
+ case 0x1:
+ fallback_comparator = reinterpret_cast<ROCKSDB_NAMESPACE::Comparator*>(
+ jfallback_index_comparator_handle);
+ break;
+ }
+ auto* wbwi = new ROCKSDB_NAMESPACE::WriteBatchWithIndex(
+ fallback_comparator, static_cast<size_t>(jreserved_bytes),
+ static_cast<bool>(joverwrite_key));
+ return reinterpret_cast<jlong>(wbwi);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: count0
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_WriteBatchWithIndex_count0(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jwbwi_handle) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ assert(wbwi != nullptr);
+
+ return static_cast<jint>(wbwi->GetWriteBatch()->Count());
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: put
+ * Signature: (J[BI[BI)V
+ */
+void Java_org_rocksdb_WriteBatchWithIndex_put__J_3BI_3BI(
+ JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey,
+ jint jkey_len, jbyteArray jentry_value, jint jentry_value_len) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ assert(wbwi != nullptr);
+ auto put = [&wbwi](ROCKSDB_NAMESPACE::Slice key,
+ ROCKSDB_NAMESPACE::Slice value) {
+ return wbwi->Put(key, value);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::kv_op(put, env, jobj, jkey, jkey_len,
+ jentry_value, jentry_value_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: put
+ * Signature: (J[BI[BIJ)V
+ */
+void Java_org_rocksdb_WriteBatchWithIndex_put__J_3BI_3BIJ(
+ JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey,
+ jint jkey_len, jbyteArray jentry_value, jint jentry_value_len,
+ jlong jcf_handle) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ assert(wbwi != nullptr);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ assert(cf_handle != nullptr);
+ auto put = [&wbwi, &cf_handle](ROCKSDB_NAMESPACE::Slice key,
+ ROCKSDB_NAMESPACE::Slice value) {
+ return wbwi->Put(cf_handle, key, value);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::kv_op(put, env, jobj, jkey, jkey_len,
+ jentry_value, jentry_value_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: putDirect
+ * Signature: (JLjava/nio/ByteBuffer;IILjava/nio/ByteBuffer;IIJ)V
+ */
+void Java_org_rocksdb_WriteBatchWithIndex_putDirect(
+ JNIEnv* env, jobject /*jobj*/, jlong jwb_handle, jobject jkey,
+ jint jkey_offset, jint jkey_len, jobject jval, jint jval_offset,
+ jint jval_len, jlong jcf_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ auto put = [&wb, &cf_handle](ROCKSDB_NAMESPACE::Slice& key,
+ ROCKSDB_NAMESPACE::Slice& value) {
+ if (cf_handle == nullptr) {
+ wb->Put(key, value);
+ } else {
+ wb->Put(cf_handle, key, value);
+ }
+ };
+ ROCKSDB_NAMESPACE::JniUtil::kv_op_direct(
+ put, env, jkey, jkey_offset, jkey_len, jval, jval_offset, jval_len);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: merge
+ * Signature: (J[BI[BI)V
+ */
+void Java_org_rocksdb_WriteBatchWithIndex_merge__J_3BI_3BI(
+ JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey,
+ jint jkey_len, jbyteArray jentry_value, jint jentry_value_len) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ assert(wbwi != nullptr);
+ auto merge = [&wbwi](ROCKSDB_NAMESPACE::Slice key,
+ ROCKSDB_NAMESPACE::Slice value) {
+ return wbwi->Merge(key, value);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::kv_op(merge, env, jobj, jkey, jkey_len,
+ jentry_value, jentry_value_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: merge
+ * Signature: (J[BI[BIJ)V
+ */
+void Java_org_rocksdb_WriteBatchWithIndex_merge__J_3BI_3BIJ(
+ JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey,
+ jint jkey_len, jbyteArray jentry_value, jint jentry_value_len,
+ jlong jcf_handle) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ assert(wbwi != nullptr);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ assert(cf_handle != nullptr);
+ auto merge = [&wbwi, &cf_handle](ROCKSDB_NAMESPACE::Slice key,
+ ROCKSDB_NAMESPACE::Slice value) {
+ return wbwi->Merge(cf_handle, key, value);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::kv_op(merge, env, jobj, jkey, jkey_len,
+ jentry_value, jentry_value_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: delete
+ * Signature: (J[BI)V
+ */
+void Java_org_rocksdb_WriteBatchWithIndex_delete__J_3BI(JNIEnv* env,
+ jobject jobj,
+ jlong jwbwi_handle,
+ jbyteArray jkey,
+ jint jkey_len) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ assert(wbwi != nullptr);
+ auto remove = [&wbwi](ROCKSDB_NAMESPACE::Slice key) {
+ return wbwi->Delete(key);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::k_op(remove, env, jobj, jkey, jkey_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: delete
+ * Signature: (J[BIJ)V
+ */
+void Java_org_rocksdb_WriteBatchWithIndex_delete__J_3BIJ(
+ JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey,
+ jint jkey_len, jlong jcf_handle) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ assert(wbwi != nullptr);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ assert(cf_handle != nullptr);
+ auto remove = [&wbwi, &cf_handle](ROCKSDB_NAMESPACE::Slice key) {
+ return wbwi->Delete(cf_handle, key);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::k_op(remove, env, jobj, jkey, jkey_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: singleDelete
+ * Signature: (J[BI)V
+ */
+void Java_org_rocksdb_WriteBatchWithIndex_singleDelete__J_3BI(
+ JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey,
+ jint jkey_len) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ assert(wbwi != nullptr);
+ auto single_delete = [&wbwi](ROCKSDB_NAMESPACE::Slice key) {
+ return wbwi->SingleDelete(key);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::k_op(single_delete, env, jobj, jkey,
+ jkey_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: singleDelete
+ * Signature: (J[BIJ)V
+ */
+void Java_org_rocksdb_WriteBatchWithIndex_singleDelete__J_3BIJ(
+ JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey,
+ jint jkey_len, jlong jcf_handle) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ assert(wbwi != nullptr);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ assert(cf_handle != nullptr);
+ auto single_delete = [&wbwi, &cf_handle](ROCKSDB_NAMESPACE::Slice key) {
+ return wbwi->SingleDelete(cf_handle, key);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::k_op(single_delete, env, jobj, jkey,
+ jkey_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: removeDirect
+ * Signature: (JLjava/nio/ByteBuffer;IIJ)V
+ */
+void Java_org_rocksdb_WriteBatchWithIndex_removeDirect(
+ JNIEnv* env, jobject /*jobj*/, jlong jwb_handle, jobject jkey,
+ jint jkey_offset, jint jkey_len, jlong jcf_handle) {
+ auto* wb = reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatch*>(jwb_handle);
+ assert(wb != nullptr);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ auto remove = [&wb, &cf_handle](ROCKSDB_NAMESPACE::Slice& key) {
+ if (cf_handle == nullptr) {
+ wb->Delete(key);
+ } else {
+ wb->Delete(cf_handle, key);
+ }
+ };
+ ROCKSDB_NAMESPACE::JniUtil::k_op_direct(remove, env, jkey, jkey_offset,
+ jkey_len);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: deleteRange
+ * Signature: (J[BI[BI)V
+ */
+void Java_org_rocksdb_WriteBatchWithIndex_deleteRange__J_3BI_3BI(
+ JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jbegin_key,
+ jint jbegin_key_len, jbyteArray jend_key, jint jend_key_len) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ assert(wbwi != nullptr);
+ auto deleteRange = [&wbwi](ROCKSDB_NAMESPACE::Slice beginKey,
+ ROCKSDB_NAMESPACE::Slice endKey) {
+ return wbwi->DeleteRange(beginKey, endKey);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::kv_op(deleteRange, env, jobj, jbegin_key,
+ jbegin_key_len, jend_key, jend_key_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: deleteRange
+ * Signature: (J[BI[BIJ)V
+ */
+void Java_org_rocksdb_WriteBatchWithIndex_deleteRange__J_3BI_3BIJ(
+ JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jbegin_key,
+ jint jbegin_key_len, jbyteArray jend_key, jint jend_key_len,
+ jlong jcf_handle) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ assert(wbwi != nullptr);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ assert(cf_handle != nullptr);
+ auto deleteRange = [&wbwi, &cf_handle](ROCKSDB_NAMESPACE::Slice beginKey,
+ ROCKSDB_NAMESPACE::Slice endKey) {
+ return wbwi->DeleteRange(cf_handle, beginKey, endKey);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::kv_op(deleteRange, env, jobj, jbegin_key,
+ jbegin_key_len, jend_key, jend_key_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: putLogData
+ * Signature: (J[BI)V
+ */
+void Java_org_rocksdb_WriteBatchWithIndex_putLogData(JNIEnv* env, jobject jobj,
+ jlong jwbwi_handle,
+ jbyteArray jblob,
+ jint jblob_len) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ assert(wbwi != nullptr);
+ auto putLogData = [&wbwi](ROCKSDB_NAMESPACE::Slice blob) {
+ return wbwi->PutLogData(blob);
+ };
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::JniUtil::k_op(putLogData, env, jobj, jblob, jblob_len);
+ if (status != nullptr && !status->ok()) {
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status);
+ }
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: clear
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_WriteBatchWithIndex_clear0(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jwbwi_handle) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ assert(wbwi != nullptr);
+
+ wbwi->Clear();
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: setSavePoint0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_WriteBatchWithIndex_setSavePoint0(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jwbwi_handle) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ assert(wbwi != nullptr);
+
+ wbwi->SetSavePoint();
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: rollbackToSavePoint0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_WriteBatchWithIndex_rollbackToSavePoint0(
+ JNIEnv* env, jobject /*jobj*/, jlong jwbwi_handle) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ assert(wbwi != nullptr);
+
+ auto s = wbwi->RollbackToSavePoint();
+
+ if (s.ok()) {
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: popSavePoint
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_WriteBatchWithIndex_popSavePoint(JNIEnv* env,
+ jobject /*jobj*/,
+ jlong jwbwi_handle) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ assert(wbwi != nullptr);
+
+ auto s = wbwi->PopSavePoint();
+
+ if (s.ok()) {
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: setMaxBytes
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_WriteBatchWithIndex_setMaxBytes(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jwbwi_handle,
+ jlong jmax_bytes) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ assert(wbwi != nullptr);
+
+ wbwi->SetMaxBytes(static_cast<size_t>(jmax_bytes));
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: getWriteBatch
+ * Signature: (J)Lorg/rocksdb/WriteBatch;
+ */
+jobject Java_org_rocksdb_WriteBatchWithIndex_getWriteBatch(JNIEnv* env,
+ jobject /*jobj*/,
+ jlong jwbwi_handle) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ assert(wbwi != nullptr);
+
+ auto* wb = wbwi->GetWriteBatch();
+
+ // TODO(AR) is the `wb` object owned by us?
+ return ROCKSDB_NAMESPACE::WriteBatchJni::construct(env, wb);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: iterator0
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_WriteBatchWithIndex_iterator0(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jwbwi_handle) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ auto* wbwi_iterator = wbwi->NewIterator();
+ return reinterpret_cast<jlong>(wbwi_iterator);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: iterator1
+ * Signature: (JJ)J
+ */
+jlong Java_org_rocksdb_WriteBatchWithIndex_iterator1(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jwbwi_handle,
+ jlong jcf_handle) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ auto* wbwi_iterator = wbwi->NewIterator(cf_handle);
+ return reinterpret_cast<jlong>(wbwi_iterator);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: iteratorWithBase
+ * Signature: (JJJ)J
+ */
+jlong Java_org_rocksdb_WriteBatchWithIndex_iteratorWithBase(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong jwbwi_handle,
+ jlong jcf_handle,
+ jlong jbi_handle) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+ auto* base_iterator =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Iterator*>(jbi_handle);
+ auto* iterator = wbwi->NewIteratorWithBase(cf_handle, base_iterator);
+ return reinterpret_cast<jlong>(iterator);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: getFromBatch
+ * Signature: (JJ[BI)[B
+ */
+jbyteArray JNICALL Java_org_rocksdb_WriteBatchWithIndex_getFromBatch__JJ_3BI(
+ JNIEnv* env, jobject /*jobj*/, jlong jwbwi_handle, jlong jdbopt_handle,
+ jbyteArray jkey, jint jkey_len) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ auto* dbopt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jdbopt_handle);
+
+ auto getter = [&wbwi, &dbopt](const ROCKSDB_NAMESPACE::Slice& key,
+ std::string* value) {
+ return wbwi->GetFromBatch(*dbopt, key, value);
+ };
+
+ return ROCKSDB_NAMESPACE::JniUtil::v_op(getter, env, jkey, jkey_len);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: getFromBatch
+ * Signature: (JJ[BIJ)[B
+ */
+jbyteArray Java_org_rocksdb_WriteBatchWithIndex_getFromBatch__JJ_3BIJ(
+ JNIEnv* env, jobject /*jobj*/, jlong jwbwi_handle, jlong jdbopt_handle,
+ jbyteArray jkey, jint jkey_len, jlong jcf_handle) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ auto* dbopt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jdbopt_handle);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+
+ auto getter = [&wbwi, &cf_handle, &dbopt](const ROCKSDB_NAMESPACE::Slice& key,
+ std::string* value) {
+ return wbwi->GetFromBatch(cf_handle, *dbopt, key, value);
+ };
+
+ return ROCKSDB_NAMESPACE::JniUtil::v_op(getter, env, jkey, jkey_len);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: getFromBatchAndDB
+ * Signature: (JJJ[BI)[B
+ */
+jbyteArray Java_org_rocksdb_WriteBatchWithIndex_getFromBatchAndDB__JJJ_3BI(
+ JNIEnv* env, jobject /*jobj*/, jlong jwbwi_handle, jlong jdb_handle,
+ jlong jreadopt_handle, jbyteArray jkey, jint jkey_len) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* readopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jreadopt_handle);
+
+ auto getter = [&wbwi, &db, &readopt](const ROCKSDB_NAMESPACE::Slice& key,
+ std::string* value) {
+ return wbwi->GetFromBatchAndDB(db, *readopt, key, value);
+ };
+
+ return ROCKSDB_NAMESPACE::JniUtil::v_op(getter, env, jkey, jkey_len);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: getFromBatchAndDB
+ * Signature: (JJJ[BIJ)[B
+ */
+jbyteArray Java_org_rocksdb_WriteBatchWithIndex_getFromBatchAndDB__JJJ_3BIJ(
+ JNIEnv* env, jobject /*jobj*/, jlong jwbwi_handle, jlong jdb_handle,
+ jlong jreadopt_handle, jbyteArray jkey, jint jkey_len, jlong jcf_handle) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(jwbwi_handle);
+ auto* db = reinterpret_cast<ROCKSDB_NAMESPACE::DB*>(jdb_handle);
+ auto* readopt =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ReadOptions*>(jreadopt_handle);
+ auto* cf_handle =
+ reinterpret_cast<ROCKSDB_NAMESPACE::ColumnFamilyHandle*>(jcf_handle);
+
+ auto getter = [&wbwi, &db, &cf_handle, &readopt](
+ const ROCKSDB_NAMESPACE::Slice& key, std::string* value) {
+ return wbwi->GetFromBatchAndDB(db, *readopt, cf_handle, key, value);
+ };
+
+ return ROCKSDB_NAMESPACE::JniUtil::v_op(getter, env, jkey, jkey_len);
+}
+
+/*
+ * Class: org_rocksdb_WriteBatchWithIndex
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_WriteBatchWithIndex_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ auto* wbwi =
+ reinterpret_cast<ROCKSDB_NAMESPACE::WriteBatchWithIndex*>(handle);
+ assert(wbwi != nullptr);
+ delete wbwi;
+}
+
+/* WBWIRocksIterator below */
+
+/*
+ * Class: org_rocksdb_WBWIRocksIterator
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_WBWIRocksIterator_disposeInternal(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::WBWIIterator*>(handle);
+ assert(it != nullptr);
+ delete it;
+}
+
+/*
+ * Class: org_rocksdb_WBWIRocksIterator
+ * Method: isValid0
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_WBWIRocksIterator_isValid0(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ return reinterpret_cast<ROCKSDB_NAMESPACE::WBWIIterator*>(handle)->Valid();
+}
+
+/*
+ * Class: org_rocksdb_WBWIRocksIterator
+ * Method: seekToFirst0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_WBWIRocksIterator_seekToFirst0(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::WBWIIterator*>(handle)->SeekToFirst();
+}
+
+/*
+ * Class: org_rocksdb_WBWIRocksIterator
+ * Method: seekToLast0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_WBWIRocksIterator_seekToLast0(JNIEnv* /*env*/,
+ jobject /*jobj*/,
+ jlong handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::WBWIIterator*>(handle)->SeekToLast();
+}
+
+/*
+ * Class: org_rocksdb_WBWIRocksIterator
+ * Method: next0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_WBWIRocksIterator_next0(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::WBWIIterator*>(handle)->Next();
+}
+
+/*
+ * Class: org_rocksdb_WBWIRocksIterator
+ * Method: prev0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_WBWIRocksIterator_prev0(JNIEnv* /*env*/, jobject /*jobj*/,
+ jlong handle) {
+ reinterpret_cast<ROCKSDB_NAMESPACE::WBWIIterator*>(handle)->Prev();
+}
+
+/*
+ * Class: org_rocksdb_WBWIRocksIterator
+ * Method: seek0
+ * Signature: (J[BI)V
+ */
+void Java_org_rocksdb_WBWIRocksIterator_seek0(JNIEnv* env, jobject /*jobj*/,
+ jlong handle, jbyteArray jtarget,
+ jint jtarget_len) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::WBWIIterator*>(handle);
+ jbyte* target = env->GetByteArrayElements(jtarget, nullptr);
+ if (target == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::Slice target_slice(reinterpret_cast<char*>(target),
+ jtarget_len);
+
+ it->Seek(target_slice);
+
+ env->ReleaseByteArrayElements(jtarget, target, JNI_ABORT);
+}
+
+/*
+ * Class: org_rocksdb_WBWIRocksIterator
+ * Method: seekDirect0
+ * Signature: (JLjava/nio/ByteBuffer;II)V
+ */
+void Java_org_rocksdb_WBWIRocksIterator_seekDirect0(
+ JNIEnv* env, jobject /*jobj*/, jlong handle, jobject jtarget,
+ jint jtarget_off, jint jtarget_len) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::WBWIIterator*>(handle);
+ auto seek = [&it](ROCKSDB_NAMESPACE::Slice& target_slice) {
+ it->Seek(target_slice);
+ };
+ ROCKSDB_NAMESPACE::JniUtil::k_op_direct(seek, env, jtarget, jtarget_off,
+ jtarget_len);
+}
+
+/*
+ * Class: org_rocksdb_WBWIRocksIterator
+ * Method: seekForPrev0
+ * Signature: (J[BI)V
+ */
+void Java_org_rocksdb_WBWIRocksIterator_seekForPrev0(JNIEnv* env,
+ jobject /*jobj*/,
+ jlong handle,
+ jbyteArray jtarget,
+ jint jtarget_len) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::WBWIIterator*>(handle);
+ jbyte* target = env->GetByteArrayElements(jtarget, nullptr);
+ if (target == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::Slice target_slice(reinterpret_cast<char*>(target),
+ jtarget_len);
+
+ it->SeekForPrev(target_slice);
+
+ env->ReleaseByteArrayElements(jtarget, target, JNI_ABORT);
+}
+
+/*
+ * Class: org_rocksdb_WBWIRocksIterator
+ * Method: status0
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_WBWIRocksIterator_status0(JNIEnv* env, jobject /*jobj*/,
+ jlong handle) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::WBWIIterator*>(handle);
+ ROCKSDB_NAMESPACE::Status s = it->status();
+
+ if (s.ok()) {
+ return;
+ }
+
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s);
+}
+
+/*
+ * Class: org_rocksdb_WBWIRocksIterator
+ * Method: entry1
+ * Signature: (J)[J
+ */
+jlongArray Java_org_rocksdb_WBWIRocksIterator_entry1(JNIEnv* env,
+ jobject /*jobj*/,
+ jlong handle) {
+ auto* it = reinterpret_cast<ROCKSDB_NAMESPACE::WBWIIterator*>(handle);
+ const ROCKSDB_NAMESPACE::WriteEntry& we = it->Entry();
+
+ jlong results[3];
+
+ // set the type of the write entry
+ results[0] = ROCKSDB_NAMESPACE::WriteTypeJni::toJavaWriteType(we.type);
+
+ // NOTE: key_slice and value_slice will be freed by
+ // org.rocksdb.DirectSlice#close
+
+ auto* key_slice = new ROCKSDB_NAMESPACE::Slice(we.key.data(), we.key.size());
+ results[1] = reinterpret_cast<jlong>(key_slice);
+ if (we.type == ROCKSDB_NAMESPACE::kDeleteRecord ||
+ we.type == ROCKSDB_NAMESPACE::kSingleDeleteRecord ||
+ we.type == ROCKSDB_NAMESPACE::kLogDataRecord) {
+ // set native handle of value slice to null if no value available
+ results[2] = 0;
+ } else {
+ auto* value_slice =
+ new ROCKSDB_NAMESPACE::Slice(we.value.data(), we.value.size());
+ results[2] = reinterpret_cast<jlong>(value_slice);
+ }
+
+ jlongArray jresults = env->NewLongArray(3);
+ if (jresults == nullptr) {
+ // exception thrown: OutOfMemoryError
+ if (results[2] != 0) {
+ auto* value_slice =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(results[2]);
+ delete value_slice;
+ }
+ delete key_slice;
+ return nullptr;
+ }
+
+ env->SetLongArrayRegion(jresults, 0, 3, results);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jresults);
+ if (results[2] != 0) {
+ auto* value_slice =
+ reinterpret_cast<ROCKSDB_NAMESPACE::Slice*>(results[2]);
+ delete value_slice;
+ }
+ delete key_slice;
+ return nullptr;
+ }
+
+ return jresults;
+}
diff --git a/src/rocksdb/java/rocksjni/write_buffer_manager.cc b/src/rocksdb/java/rocksjni/write_buffer_manager.cc
new file mode 100644
index 000000000..a52daed7d
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/write_buffer_manager.cc
@@ -0,0 +1,42 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+#include <jni.h>
+
+#include "include/org_rocksdb_WriteBufferManager.h"
+
+#include "rocksdb/cache.h"
+#include "rocksdb/write_buffer_manager.h"
+
+/*
+ * Class: org_rocksdb_WriteBufferManager
+ * Method: newWriteBufferManager
+ * Signature: (JJ)J
+ */
+jlong Java_org_rocksdb_WriteBufferManager_newWriteBufferManager(
+ JNIEnv* /*env*/, jclass /*jclazz*/, jlong jbuffer_size, jlong jcache_handle) {
+ auto* cache_ptr =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Cache>*>(
+ jcache_handle);
+ auto* write_buffer_manager =
+ new std::shared_ptr<ROCKSDB_NAMESPACE::WriteBufferManager>(
+ std::make_shared<ROCKSDB_NAMESPACE::WriteBufferManager>(jbuffer_size,
+ *cache_ptr));
+ return reinterpret_cast<jlong>(write_buffer_manager);
+}
+
+/*
+ * Class: org_rocksdb_WriteBufferManager
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_WriteBufferManager_disposeInternal(
+ JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ auto* write_buffer_manager =
+ reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::WriteBufferManager>*>(
+ jhandle);
+ assert(write_buffer_manager != nullptr);
+ delete write_buffer_manager;
+}
diff --git a/src/rocksdb/java/rocksjni/writebatchhandlerjnicallback.cc b/src/rocksdb/java/rocksjni/writebatchhandlerjnicallback.cc
new file mode 100644
index 000000000..4ecb6b2d1
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/writebatchhandlerjnicallback.cc
@@ -0,0 +1,548 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::Comparator.
+
+#include "rocksjni/writebatchhandlerjnicallback.h"
+#include "rocksjni/portal.h"
+
+namespace ROCKSDB_NAMESPACE {
+WriteBatchHandlerJniCallback::WriteBatchHandlerJniCallback(
+ JNIEnv* env, jobject jWriteBatchHandler)
+ : JniCallback(env, jWriteBatchHandler), m_env(env) {
+
+ m_jPutCfMethodId = WriteBatchHandlerJni::getPutCfMethodId(env);
+ if(m_jPutCfMethodId == nullptr) {
+ // exception thrown
+ return;
+ }
+
+ m_jPutMethodId = WriteBatchHandlerJni::getPutMethodId(env);
+ if(m_jPutMethodId == nullptr) {
+ // exception thrown
+ return;
+ }
+
+ m_jMergeCfMethodId = WriteBatchHandlerJni::getMergeCfMethodId(env);
+ if(m_jMergeCfMethodId == nullptr) {
+ // exception thrown
+ return;
+ }
+
+ m_jMergeMethodId = WriteBatchHandlerJni::getMergeMethodId(env);
+ if(m_jMergeMethodId == nullptr) {
+ // exception thrown
+ return;
+ }
+
+ m_jDeleteCfMethodId = WriteBatchHandlerJni::getDeleteCfMethodId(env);
+ if(m_jDeleteCfMethodId == nullptr) {
+ // exception thrown
+ return;
+ }
+
+ m_jDeleteMethodId = WriteBatchHandlerJni::getDeleteMethodId(env);
+ if(m_jDeleteMethodId == nullptr) {
+ // exception thrown
+ return;
+ }
+
+ m_jSingleDeleteCfMethodId =
+ WriteBatchHandlerJni::getSingleDeleteCfMethodId(env);
+ if(m_jSingleDeleteCfMethodId == nullptr) {
+ // exception thrown
+ return;
+ }
+
+ m_jSingleDeleteMethodId = WriteBatchHandlerJni::getSingleDeleteMethodId(env);
+ if(m_jSingleDeleteMethodId == nullptr) {
+ // exception thrown
+ return;
+ }
+
+ m_jDeleteRangeCfMethodId =
+ WriteBatchHandlerJni::getDeleteRangeCfMethodId(env);
+ if (m_jDeleteRangeCfMethodId == nullptr) {
+ // exception thrown
+ return;
+ }
+
+ m_jDeleteRangeMethodId = WriteBatchHandlerJni::getDeleteRangeMethodId(env);
+ if (m_jDeleteRangeMethodId == nullptr) {
+ // exception thrown
+ return;
+ }
+
+ m_jLogDataMethodId = WriteBatchHandlerJni::getLogDataMethodId(env);
+ if(m_jLogDataMethodId == nullptr) {
+ // exception thrown
+ return;
+ }
+
+ m_jPutBlobIndexCfMethodId =
+ WriteBatchHandlerJni::getPutBlobIndexCfMethodId(env);
+ if(m_jPutBlobIndexCfMethodId == nullptr) {
+ // exception thrown
+ return;
+ }
+
+ m_jMarkBeginPrepareMethodId =
+ WriteBatchHandlerJni::getMarkBeginPrepareMethodId(env);
+ if(m_jMarkBeginPrepareMethodId == nullptr) {
+ // exception thrown
+ return;
+ }
+
+ m_jMarkEndPrepareMethodId =
+ WriteBatchHandlerJni::getMarkEndPrepareMethodId(env);
+ if(m_jMarkEndPrepareMethodId == nullptr) {
+ // exception thrown
+ return;
+ }
+
+ m_jMarkNoopMethodId = WriteBatchHandlerJni::getMarkNoopMethodId(env);
+ if(m_jMarkNoopMethodId == nullptr) {
+ // exception thrown
+ return;
+ }
+
+ m_jMarkRollbackMethodId = WriteBatchHandlerJni::getMarkRollbackMethodId(env);
+ if(m_jMarkRollbackMethodId == nullptr) {
+ // exception thrown
+ return;
+ }
+
+ m_jMarkCommitMethodId = WriteBatchHandlerJni::getMarkCommitMethodId(env);
+ if(m_jMarkCommitMethodId == nullptr) {
+ // exception thrown
+ return;
+ }
+
+ m_jContinueMethodId = WriteBatchHandlerJni::getContinueMethodId(env);
+ if(m_jContinueMethodId == nullptr) {
+ // exception thrown
+ return;
+ }
+}
+
+ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::PutCF(
+ uint32_t column_family_id, const Slice& key, const Slice& value) {
+ auto put = [this, column_family_id] (
+ jbyteArray j_key, jbyteArray j_value) {
+ m_env->CallVoidMethod(
+ m_jcallback_obj,
+ m_jPutCfMethodId,
+ static_cast<jint>(column_family_id),
+ j_key,
+ j_value);
+ };
+ auto status = WriteBatchHandlerJniCallback::kv_op(key, value, put);
+ if(status == nullptr) {
+ return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) what to do if there is
+ // an Exception but we don't know
+ // the ROCKSDB_NAMESPACE::Status?
+ } else {
+ return ROCKSDB_NAMESPACE::Status(*status);
+ }
+}
+
+void WriteBatchHandlerJniCallback::Put(const Slice& key, const Slice& value) {
+ auto put = [this] (
+ jbyteArray j_key, jbyteArray j_value) {
+ m_env->CallVoidMethod(
+ m_jcallback_obj,
+ m_jPutMethodId,
+ j_key,
+ j_value);
+ };
+ WriteBatchHandlerJniCallback::kv_op(key, value, put);
+}
+
+ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::MergeCF(
+ uint32_t column_family_id, const Slice& key, const Slice& value) {
+ auto merge = [this, column_family_id] (
+ jbyteArray j_key, jbyteArray j_value) {
+ m_env->CallVoidMethod(
+ m_jcallback_obj,
+ m_jMergeCfMethodId,
+ static_cast<jint>(column_family_id),
+ j_key,
+ j_value);
+ };
+ auto status = WriteBatchHandlerJniCallback::kv_op(key, value, merge);
+ if(status == nullptr) {
+ return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) what to do if there is
+ // an Exception but we don't know
+ // the ROCKSDB_NAMESPACE::Status?
+ } else {
+ return ROCKSDB_NAMESPACE::Status(*status);
+ }
+}
+
+void WriteBatchHandlerJniCallback::Merge(const Slice& key, const Slice& value) {
+ auto merge = [this] (
+ jbyteArray j_key, jbyteArray j_value) {
+ m_env->CallVoidMethod(
+ m_jcallback_obj,
+ m_jMergeMethodId,
+ j_key,
+ j_value);
+ };
+ WriteBatchHandlerJniCallback::kv_op(key, value, merge);
+}
+
+ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::DeleteCF(
+ uint32_t column_family_id, const Slice& key) {
+ auto remove = [this, column_family_id] (jbyteArray j_key) {
+ m_env->CallVoidMethod(
+ m_jcallback_obj,
+ m_jDeleteCfMethodId,
+ static_cast<jint>(column_family_id),
+ j_key);
+ };
+ auto status = WriteBatchHandlerJniCallback::k_op(key, remove);
+ if(status == nullptr) {
+ return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) what to do if there is
+ // an Exception but we don't know
+ // the ROCKSDB_NAMESPACE::Status?
+ } else {
+ return ROCKSDB_NAMESPACE::Status(*status);
+ }
+}
+
+void WriteBatchHandlerJniCallback::Delete(const Slice& key) {
+ auto remove = [this] (jbyteArray j_key) {
+ m_env->CallVoidMethod(
+ m_jcallback_obj,
+ m_jDeleteMethodId,
+ j_key);
+ };
+ WriteBatchHandlerJniCallback::k_op(key, remove);
+}
+
+ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::SingleDeleteCF(
+ uint32_t column_family_id, const Slice& key) {
+ auto singleDelete = [this, column_family_id] (jbyteArray j_key) {
+ m_env->CallVoidMethod(
+ m_jcallback_obj,
+ m_jSingleDeleteCfMethodId,
+ static_cast<jint>(column_family_id),
+ j_key);
+ };
+ auto status = WriteBatchHandlerJniCallback::k_op(key, singleDelete);
+ if(status == nullptr) {
+ return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) what to do if there is
+ // an Exception but we don't know
+ // the ROCKSDB_NAMESPACE::Status?
+ } else {
+ return ROCKSDB_NAMESPACE::Status(*status);
+ }
+}
+
+void WriteBatchHandlerJniCallback::SingleDelete(const Slice& key) {
+ auto singleDelete = [this] (jbyteArray j_key) {
+ m_env->CallVoidMethod(
+ m_jcallback_obj,
+ m_jSingleDeleteMethodId,
+ j_key);
+ };
+ WriteBatchHandlerJniCallback::k_op(key, singleDelete);
+}
+
+ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::DeleteRangeCF(
+ uint32_t column_family_id, const Slice& beginKey, const Slice& endKey) {
+ auto deleteRange = [this, column_family_id] (
+ jbyteArray j_beginKey, jbyteArray j_endKey) {
+ m_env->CallVoidMethod(
+ m_jcallback_obj,
+ m_jDeleteRangeCfMethodId,
+ static_cast<jint>(column_family_id),
+ j_beginKey,
+ j_endKey);
+ };
+ auto status = WriteBatchHandlerJniCallback::kv_op(beginKey, endKey, deleteRange);
+ if(status == nullptr) {
+ return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) what to do if there is
+ // an Exception but we don't know
+ // the ROCKSDB_NAMESPACE::Status?
+ } else {
+ return ROCKSDB_NAMESPACE::Status(*status);
+ }
+}
+
+void WriteBatchHandlerJniCallback::DeleteRange(const Slice& beginKey,
+ const Slice& endKey) {
+ auto deleteRange = [this] (
+ jbyteArray j_beginKey, jbyteArray j_endKey) {
+ m_env->CallVoidMethod(
+ m_jcallback_obj,
+ m_jDeleteRangeMethodId,
+ j_beginKey,
+ j_endKey);
+ };
+ WriteBatchHandlerJniCallback::kv_op(beginKey, endKey, deleteRange);
+}
+
+void WriteBatchHandlerJniCallback::LogData(const Slice& blob) {
+ auto logData = [this] (jbyteArray j_blob) {
+ m_env->CallVoidMethod(
+ m_jcallback_obj,
+ m_jLogDataMethodId,
+ j_blob);
+ };
+ WriteBatchHandlerJniCallback::k_op(blob, logData);
+}
+
+ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::PutBlobIndexCF(
+ uint32_t column_family_id, const Slice& key, const Slice& value) {
+ auto putBlobIndex = [this, column_family_id] (
+ jbyteArray j_key, jbyteArray j_value) {
+ m_env->CallVoidMethod(
+ m_jcallback_obj,
+ m_jPutBlobIndexCfMethodId,
+ static_cast<jint>(column_family_id),
+ j_key,
+ j_value);
+ };
+ auto status = WriteBatchHandlerJniCallback::kv_op(key, value, putBlobIndex);
+ if(status == nullptr) {
+ return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) what to do if there is
+ // an Exception but we don't know
+ // the ROCKSDB_NAMESPACE::Status?
+ } else {
+ return ROCKSDB_NAMESPACE::Status(*status);
+ }
+}
+
+ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::MarkBeginPrepare(
+ bool unprepare) {
+#ifndef DEBUG
+ (void) unprepare;
+#else
+ assert(!unprepare);
+#endif
+ m_env->CallVoidMethod(m_jcallback_obj, m_jMarkBeginPrepareMethodId);
+
+ // check for Exception, in-particular RocksDBException
+ if (m_env->ExceptionCheck()) {
+ // exception thrown
+ jthrowable exception = m_env->ExceptionOccurred();
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::toCppStatus(m_env, exception);
+ if (status == nullptr) {
+ // unkown status or exception occurred extracting status
+ m_env->ExceptionDescribe();
+ return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) probably need a
+ // better error code here
+
+ } else {
+ m_env->ExceptionClear(); // clear the exception, as we have extracted the status
+ return ROCKSDB_NAMESPACE::Status(*status);
+ }
+ }
+
+ return ROCKSDB_NAMESPACE::Status::OK();
+}
+
+ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::MarkEndPrepare(
+ const Slice& xid) {
+ auto markEndPrepare = [this] (
+ jbyteArray j_xid) {
+ m_env->CallVoidMethod(
+ m_jcallback_obj,
+ m_jMarkEndPrepareMethodId,
+ j_xid);
+ };
+ auto status = WriteBatchHandlerJniCallback::k_op(xid, markEndPrepare);
+ if(status == nullptr) {
+ return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) what to do if there is
+ // an Exception but we don't know
+ // the ROCKSDB_NAMESPACE::Status?
+ } else {
+ return ROCKSDB_NAMESPACE::Status(*status);
+ }
+}
+
+ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::MarkNoop(
+ bool empty_batch) {
+ m_env->CallVoidMethod(m_jcallback_obj, m_jMarkNoopMethodId, static_cast<jboolean>(empty_batch));
+
+ // check for Exception, in-particular RocksDBException
+ if (m_env->ExceptionCheck()) {
+ // exception thrown
+ jthrowable exception = m_env->ExceptionOccurred();
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::toCppStatus(m_env, exception);
+ if (status == nullptr) {
+ // unkown status or exception occurred extracting status
+ m_env->ExceptionDescribe();
+ return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) probably need a
+ // better error code here
+
+ } else {
+ m_env->ExceptionClear(); // clear the exception, as we have extracted the status
+ return ROCKSDB_NAMESPACE::Status(*status);
+ }
+ }
+
+ return ROCKSDB_NAMESPACE::Status::OK();
+}
+
+ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::MarkRollback(
+ const Slice& xid) {
+ auto markRollback = [this] (
+ jbyteArray j_xid) {
+ m_env->CallVoidMethod(
+ m_jcallback_obj,
+ m_jMarkRollbackMethodId,
+ j_xid);
+ };
+ auto status = WriteBatchHandlerJniCallback::k_op(xid, markRollback);
+ if(status == nullptr) {
+ return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) what to do if there is
+ // an Exception but we don't know
+ // the ROCKSDB_NAMESPACE::Status?
+ } else {
+ return ROCKSDB_NAMESPACE::Status(*status);
+ }
+}
+
+ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::MarkCommit(
+ const Slice& xid) {
+ auto markCommit = [this] (
+ jbyteArray j_xid) {
+ m_env->CallVoidMethod(
+ m_jcallback_obj,
+ m_jMarkCommitMethodId,
+ j_xid);
+ };
+ auto status = WriteBatchHandlerJniCallback::k_op(xid, markCommit);
+ if(status == nullptr) {
+ return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) what to do if there is
+ // an Exception but we don't know
+ // the ROCKSDB_NAMESPACE::Status?
+ } else {
+ return ROCKSDB_NAMESPACE::Status(*status);
+ }
+}
+
+bool WriteBatchHandlerJniCallback::Continue() {
+ jboolean jContinue = m_env->CallBooleanMethod(
+ m_jcallback_obj,
+ m_jContinueMethodId);
+ if(m_env->ExceptionCheck()) {
+ // exception thrown
+ m_env->ExceptionDescribe();
+ }
+
+ return static_cast<bool>(jContinue == JNI_TRUE);
+}
+
+std::unique_ptr<ROCKSDB_NAMESPACE::Status> WriteBatchHandlerJniCallback::kv_op(
+ const Slice& key, const Slice& value,
+ std::function<void(jbyteArray, jbyteArray)> kvFn) {
+ const jbyteArray j_key = JniUtil::copyBytes(m_env, key);
+ if (j_key == nullptr) {
+ // exception thrown
+ if (m_env->ExceptionCheck()) {
+ m_env->ExceptionDescribe();
+ }
+ return nullptr;
+ }
+
+ const jbyteArray j_value = JniUtil::copyBytes(m_env, value);
+ if (j_value == nullptr) {
+ // exception thrown
+ if (m_env->ExceptionCheck()) {
+ m_env->ExceptionDescribe();
+ }
+ if (j_key != nullptr) {
+ m_env->DeleteLocalRef(j_key);
+ }
+ return nullptr;
+ }
+
+ kvFn(j_key, j_value);
+
+ // check for Exception, in-particular RocksDBException
+ if (m_env->ExceptionCheck()) {
+ if (j_value != nullptr) {
+ m_env->DeleteLocalRef(j_value);
+ }
+ if (j_key != nullptr) {
+ m_env->DeleteLocalRef(j_key);
+ }
+
+ // exception thrown
+ jthrowable exception = m_env->ExceptionOccurred();
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::toCppStatus(m_env, exception);
+ if (status == nullptr) {
+ // unkown status or exception occurred extracting status
+ m_env->ExceptionDescribe();
+ return nullptr;
+
+ } else {
+ m_env->ExceptionClear(); // clear the exception, as we have extracted the status
+ return status;
+ }
+ }
+
+ if (j_value != nullptr) {
+ m_env->DeleteLocalRef(j_value);
+ }
+ if (j_key != nullptr) {
+ m_env->DeleteLocalRef(j_key);
+ }
+
+ // all OK
+ return std::unique_ptr<ROCKSDB_NAMESPACE::Status>(
+ new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::OK()));
+}
+
+std::unique_ptr<ROCKSDB_NAMESPACE::Status> WriteBatchHandlerJniCallback::k_op(
+ const Slice& key, std::function<void(jbyteArray)> kFn) {
+ const jbyteArray j_key = JniUtil::copyBytes(m_env, key);
+ if (j_key == nullptr) {
+ // exception thrown
+ if (m_env->ExceptionCheck()) {
+ m_env->ExceptionDescribe();
+ }
+ return nullptr;
+ }
+
+ kFn(j_key);
+
+ // check for Exception, in-particular RocksDBException
+ if (m_env->ExceptionCheck()) {
+ if (j_key != nullptr) {
+ m_env->DeleteLocalRef(j_key);
+ }
+
+ // exception thrown
+ jthrowable exception = m_env->ExceptionOccurred();
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> status =
+ ROCKSDB_NAMESPACE::RocksDBExceptionJni::toCppStatus(m_env, exception);
+ if (status == nullptr) {
+ // unkown status or exception occurred extracting status
+ m_env->ExceptionDescribe();
+ return nullptr;
+
+ } else {
+ m_env->ExceptionClear(); // clear the exception, as we have extracted the status
+ return status;
+ }
+ }
+
+ if (j_key != nullptr) {
+ m_env->DeleteLocalRef(j_key);
+ }
+
+ // all OK
+ return std::unique_ptr<ROCKSDB_NAMESPACE::Status>(
+ new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::OK()));
+}
+} // namespace ROCKSDB_NAMESPACE
diff --git a/src/rocksdb/java/rocksjni/writebatchhandlerjnicallback.h b/src/rocksdb/java/rocksjni/writebatchhandlerjnicallback.h
new file mode 100644
index 000000000..a4c61f8bd
--- /dev/null
+++ b/src/rocksdb/java/rocksjni/writebatchhandlerjnicallback.h
@@ -0,0 +1,89 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// ROCKSDB_NAMESPACE::WriteBatch::Handler.
+
+#ifndef JAVA_ROCKSJNI_WRITEBATCHHANDLERJNICALLBACK_H_
+#define JAVA_ROCKSJNI_WRITEBATCHHANDLERJNICALLBACK_H_
+
+#include <functional>
+#include <jni.h>
+#include <memory>
+#include "rocksjni/jnicallback.h"
+#include "rocksdb/write_batch.h"
+
+namespace ROCKSDB_NAMESPACE {
+/**
+ * This class acts as a bridge between C++
+ * and Java. The methods in this class will be
+ * called back from the RocksDB storage engine (C++)
+ * which calls the appropriate Java method.
+ * This enables Write Batch Handlers to be implemented in Java.
+ */
+class WriteBatchHandlerJniCallback : public JniCallback, public WriteBatch::Handler {
+ public:
+ WriteBatchHandlerJniCallback(
+ JNIEnv* env, jobject jWriteBackHandler);
+ Status PutCF(uint32_t column_family_id, const Slice& key,
+ const Slice& value);
+ void Put(const Slice& key, const Slice& value);
+ Status MergeCF(uint32_t column_family_id, const Slice& key,
+ const Slice& value);
+ void Merge(const Slice& key, const Slice& value);
+ Status DeleteCF(uint32_t column_family_id, const Slice& key);
+ void Delete(const Slice& key);
+ Status SingleDeleteCF(uint32_t column_family_id, const Slice& key);
+ void SingleDelete(const Slice& key);
+ Status DeleteRangeCF(uint32_t column_family_id, const Slice& beginKey,
+ const Slice& endKey);
+ void DeleteRange(const Slice& beginKey, const Slice& endKey);
+ void LogData(const Slice& blob);
+ Status PutBlobIndexCF(uint32_t column_family_id, const Slice& key,
+ const Slice& value);
+ Status MarkBeginPrepare(bool);
+ Status MarkEndPrepare(const Slice& xid);
+ Status MarkNoop(bool empty_batch);
+ Status MarkRollback(const Slice& xid);
+ Status MarkCommit(const Slice& xid);
+ bool Continue();
+
+ private:
+ JNIEnv* m_env;
+ jmethodID m_jPutCfMethodId;
+ jmethodID m_jPutMethodId;
+ jmethodID m_jMergeCfMethodId;
+ jmethodID m_jMergeMethodId;
+ jmethodID m_jDeleteCfMethodId;
+ jmethodID m_jDeleteMethodId;
+ jmethodID m_jSingleDeleteCfMethodId;
+ jmethodID m_jSingleDeleteMethodId;
+ jmethodID m_jDeleteRangeCfMethodId;
+ jmethodID m_jDeleteRangeMethodId;
+ jmethodID m_jLogDataMethodId;
+ jmethodID m_jPutBlobIndexCfMethodId;
+ jmethodID m_jMarkBeginPrepareMethodId;
+ jmethodID m_jMarkEndPrepareMethodId;
+ jmethodID m_jMarkNoopMethodId;
+ jmethodID m_jMarkRollbackMethodId;
+ jmethodID m_jMarkCommitMethodId;
+ jmethodID m_jContinueMethodId;
+ /**
+ * @return A pointer to a ROCKSDB_NAMESPACE::Status or nullptr if an
+ * unexpected exception occurred
+ */
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> kv_op(
+ const Slice& key, const Slice& value,
+ std::function<void(jbyteArray, jbyteArray)> kvFn);
+ /**
+ * @return A pointer to a ROCKSDB_NAMESPACE::Status or nullptr if an
+ * unexpected exception occurred
+ */
+ std::unique_ptr<ROCKSDB_NAMESPACE::Status> k_op(
+ const Slice& key, std::function<void(jbyteArray)> kFn);
+};
+} // namespace ROCKSDB_NAMESPACE
+
+#endif // JAVA_ROCKSJNI_WRITEBATCHHANDLERJNICALLBACK_H_
diff --git a/src/rocksdb/java/samples/src/main/java/OptimisticTransactionSample.java b/src/rocksdb/java/samples/src/main/java/OptimisticTransactionSample.java
new file mode 100644
index 000000000..1633d1f2b
--- /dev/null
+++ b/src/rocksdb/java/samples/src/main/java/OptimisticTransactionSample.java
@@ -0,0 +1,184 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+import org.rocksdb.*;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+/**
+ * Demonstrates using Transactions on an OptimisticTransactionDB with
+ * varying isolation guarantees
+ */
+public class OptimisticTransactionSample {
+ private static final String dbPath = "/tmp/rocksdb_optimistic_transaction_example";
+
+ public static final void main(final String args[]) throws RocksDBException {
+
+ try(final Options options = new Options()
+ .setCreateIfMissing(true);
+ final OptimisticTransactionDB txnDb =
+ OptimisticTransactionDB.open(options, dbPath)) {
+
+ try (final WriteOptions writeOptions = new WriteOptions();
+ final ReadOptions readOptions = new ReadOptions()) {
+
+ ////////////////////////////////////////////////////////
+ //
+ // Simple OptimisticTransaction Example ("Read Committed")
+ //
+ ////////////////////////////////////////////////////////
+ readCommitted(txnDb, writeOptions, readOptions);
+
+
+ ////////////////////////////////////////////////////////
+ //
+ // "Repeatable Read" (Snapshot Isolation) Example
+ // -- Using a single Snapshot
+ //
+ ////////////////////////////////////////////////////////
+ repeatableRead(txnDb, writeOptions, readOptions);
+
+
+ ////////////////////////////////////////////////////////
+ //
+ // "Read Committed" (Monotonic Atomic Views) Example
+ // --Using multiple Snapshots
+ //
+ ////////////////////////////////////////////////////////
+ readCommitted_monotonicAtomicViews(txnDb, writeOptions, readOptions);
+ }
+ }
+ }
+
+ /**
+ * Demonstrates "Read Committed" isolation
+ */
+ private static void readCommitted(final OptimisticTransactionDB txnDb,
+ final WriteOptions writeOptions, final ReadOptions readOptions)
+ throws RocksDBException {
+ final byte key1[] = "abc".getBytes(UTF_8);
+ final byte value1[] = "def".getBytes(UTF_8);
+
+ final byte key2[] = "xyz".getBytes(UTF_8);
+ final byte value2[] = "zzz".getBytes(UTF_8);
+
+ // Start a transaction
+ try(final Transaction txn = txnDb.beginTransaction(writeOptions)) {
+ // Read a key in this transaction
+ byte[] value = txn.get(readOptions, key1);
+ assert(value == null);
+
+ // Write a key in this transaction
+ txn.put(key1, value1);
+
+ // Read a key OUTSIDE this transaction. Does not affect txn.
+ value = txnDb.get(readOptions, key1);
+ assert(value == null);
+
+ // Write a key OUTSIDE of this transaction.
+ // Does not affect txn since this is an unrelated key.
+ // If we wrote key 'abc' here, the transaction would fail to commit.
+ txnDb.put(writeOptions, key2, value2);
+
+ // Commit transaction
+ txn.commit();
+ }
+ }
+
+ /**
+ * Demonstrates "Repeatable Read" (Snapshot Isolation) isolation
+ */
+ private static void repeatableRead(final OptimisticTransactionDB txnDb,
+ final WriteOptions writeOptions, final ReadOptions readOptions)
+ throws RocksDBException {
+
+ final byte key1[] = "ghi".getBytes(UTF_8);
+ final byte value1[] = "jkl".getBytes(UTF_8);
+
+ // Set a snapshot at start of transaction by setting setSnapshot(true)
+ try(final OptimisticTransactionOptions txnOptions =
+ new OptimisticTransactionOptions().setSetSnapshot(true);
+ final Transaction txn =
+ txnDb.beginTransaction(writeOptions, txnOptions)) {
+
+ final Snapshot snapshot = txn.getSnapshot();
+
+ // Write a key OUTSIDE of transaction
+ txnDb.put(writeOptions, key1, value1);
+
+ // Read a key using the snapshot.
+ readOptions.setSnapshot(snapshot);
+ final byte[] value = txn.getForUpdate(readOptions, key1, true);
+ assert(value == value1);
+
+ try {
+ // Attempt to commit transaction
+ txn.commit();
+ throw new IllegalStateException();
+ } catch(final RocksDBException e) {
+ // Transaction could not commit since the write outside of the txn
+ // conflicted with the read!
+ assert(e.getStatus().getCode() == Status.Code.Busy);
+ }
+
+ txn.rollback();
+ } finally {
+ // Clear snapshot from read options since it is no longer valid
+ readOptions.setSnapshot(null);
+ }
+ }
+
+ /**
+ * Demonstrates "Read Committed" (Monotonic Atomic Views) isolation
+ *
+ * In this example, we set the snapshot multiple times. This is probably
+ * only necessary if you have very strict isolation requirements to
+ * implement.
+ */
+ private static void readCommitted_monotonicAtomicViews(
+ final OptimisticTransactionDB txnDb, final WriteOptions writeOptions,
+ final ReadOptions readOptions) throws RocksDBException {
+
+ final byte keyX[] = "x".getBytes(UTF_8);
+ final byte valueX[] = "x".getBytes(UTF_8);
+
+ final byte keyY[] = "y".getBytes(UTF_8);
+ final byte valueY[] = "y".getBytes(UTF_8);
+
+ try (final OptimisticTransactionOptions txnOptions =
+ new OptimisticTransactionOptions().setSetSnapshot(true);
+ final Transaction txn =
+ txnDb.beginTransaction(writeOptions, txnOptions)) {
+
+ // Do some reads and writes to key "x"
+ Snapshot snapshot = txnDb.getSnapshot();
+ readOptions.setSnapshot(snapshot);
+ byte[] value = txn.get(readOptions, keyX);
+ txn.put(valueX, valueX);
+
+ // Do a write outside of the transaction to key "y"
+ txnDb.put(writeOptions, keyY, valueY);
+
+ // Set a new snapshot in the transaction
+ txn.setSnapshot();
+ snapshot = txnDb.getSnapshot();
+ readOptions.setSnapshot(snapshot);
+
+ // Do some reads and writes to key "y"
+ // Since the snapshot was advanced, the write done outside of the
+ // transaction does not conflict.
+ value = txn.getForUpdate(readOptions, keyY, true);
+ txn.put(keyY, valueY);
+
+ // Commit. Since the snapshot was advanced, the write done outside of the
+ // transaction does not prevent this transaction from Committing.
+ txn.commit();
+
+ } finally {
+ // Clear snapshot from read options since it is no longer valid
+ readOptions.setSnapshot(null);
+ }
+ }
+}
diff --git a/src/rocksdb/java/samples/src/main/java/RocksDBColumnFamilySample.java b/src/rocksdb/java/samples/src/main/java/RocksDBColumnFamilySample.java
new file mode 100644
index 000000000..650b1b2f6
--- /dev/null
+++ b/src/rocksdb/java/samples/src/main/java/RocksDBColumnFamilySample.java
@@ -0,0 +1,78 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+import org.rocksdb.*;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class RocksDBColumnFamilySample {
+ static {
+ RocksDB.loadLibrary();
+ }
+
+ public static void main(final String[] args) throws RocksDBException {
+ if (args.length < 1) {
+ System.out.println(
+ "usage: RocksDBColumnFamilySample db_path");
+ System.exit(-1);
+ }
+
+ final String db_path = args[0];
+
+ System.out.println("RocksDBColumnFamilySample");
+ try(final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options, db_path)) {
+
+ assert(db != null);
+
+ // create column family
+ try(final ColumnFamilyHandle columnFamilyHandle = db.createColumnFamily(
+ new ColumnFamilyDescriptor("new_cf".getBytes(),
+ new ColumnFamilyOptions()))) {
+ assert (columnFamilyHandle != null);
+ }
+ }
+
+ // open DB with two column families
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
+ new ArrayList<>();
+ // have to open default column family
+ columnFamilyDescriptors.add(new ColumnFamilyDescriptor(
+ RocksDB.DEFAULT_COLUMN_FAMILY, new ColumnFamilyOptions()));
+ // open the new one, too
+ columnFamilyDescriptors.add(new ColumnFamilyDescriptor(
+ "new_cf".getBytes(), new ColumnFamilyOptions()));
+ final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
+ try(final DBOptions options = new DBOptions();
+ final RocksDB db = RocksDB.open(options, db_path,
+ columnFamilyDescriptors, columnFamilyHandles)) {
+ assert(db != null);
+
+ try {
+ // put and get from non-default column family
+ db.put(columnFamilyHandles.get(0), new WriteOptions(),
+ "key".getBytes(), "value".getBytes());
+
+ // atomic write
+ try (final WriteBatch wb = new WriteBatch()) {
+ wb.put(columnFamilyHandles.get(0), "key2".getBytes(),
+ "value2".getBytes());
+ wb.put(columnFamilyHandles.get(1), "key3".getBytes(),
+ "value3".getBytes());
+ wb.remove(columnFamilyHandles.get(0), "key".getBytes());
+ db.write(new WriteOptions(), wb);
+ }
+
+ // drop column family
+ db.dropColumnFamily(columnFamilyHandles.get(1));
+ } finally {
+ for (final ColumnFamilyHandle handle : columnFamilyHandles) {
+ handle.close();
+ }
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/samples/src/main/java/RocksDBSample.java b/src/rocksdb/java/samples/src/main/java/RocksDBSample.java
new file mode 100644
index 000000000..f61995ed9
--- /dev/null
+++ b/src/rocksdb/java/samples/src/main/java/RocksDBSample.java
@@ -0,0 +1,303 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+import java.lang.IllegalArgumentException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.ArrayList;
+
+import org.rocksdb.*;
+import org.rocksdb.util.SizeUnit;
+
+public class RocksDBSample {
+ static {
+ RocksDB.loadLibrary();
+ }
+
+ public static void main(final String[] args) {
+ if (args.length < 1) {
+ System.out.println("usage: RocksDBSample db_path");
+ System.exit(-1);
+ }
+
+ final String db_path = args[0];
+ final String db_path_not_found = db_path + "_not_found";
+
+ System.out.println("RocksDBSample");
+ try (final Options options = new Options();
+ final Filter bloomFilter = new BloomFilter(10);
+ final ReadOptions readOptions = new ReadOptions()
+ .setFillCache(false);
+ final Statistics stats = new Statistics();
+ final RateLimiter rateLimiter = new RateLimiter(10000000,10000, 10)) {
+
+ try (final RocksDB db = RocksDB.open(options, db_path_not_found)) {
+ assert (false);
+ } catch (final RocksDBException e) {
+ System.out.format("Caught the expected exception -- %s\n", e);
+ }
+
+ try {
+ options.setCreateIfMissing(true)
+ .setStatistics(stats)
+ .setWriteBufferSize(8 * SizeUnit.KB)
+ .setMaxWriteBufferNumber(3)
+ .setMaxBackgroundCompactions(10)
+ .setCompressionType(CompressionType.SNAPPY_COMPRESSION)
+ .setCompactionStyle(CompactionStyle.UNIVERSAL);
+ } catch (final IllegalArgumentException e) {
+ assert (false);
+ }
+
+ assert (options.createIfMissing() == true);
+ assert (options.writeBufferSize() == 8 * SizeUnit.KB);
+ assert (options.maxWriteBufferNumber() == 3);
+ assert (options.maxBackgroundCompactions() == 10);
+ assert (options.compressionType() == CompressionType.SNAPPY_COMPRESSION);
+ assert (options.compactionStyle() == CompactionStyle.UNIVERSAL);
+
+ assert (options.memTableFactoryName().equals("SkipListFactory"));
+ options.setMemTableConfig(
+ new HashSkipListMemTableConfig()
+ .setHeight(4)
+ .setBranchingFactor(4)
+ .setBucketCount(2000000));
+ assert (options.memTableFactoryName().equals("HashSkipListRepFactory"));
+
+ options.setMemTableConfig(
+ new HashLinkedListMemTableConfig()
+ .setBucketCount(100000));
+ assert (options.memTableFactoryName().equals("HashLinkedListRepFactory"));
+
+ options.setMemTableConfig(
+ new VectorMemTableConfig().setReservedSize(10000));
+ assert (options.memTableFactoryName().equals("VectorRepFactory"));
+
+ options.setMemTableConfig(new SkipListMemTableConfig());
+ assert (options.memTableFactoryName().equals("SkipListFactory"));
+
+ options.setTableFormatConfig(new PlainTableConfig());
+ // Plain-Table requires mmap read
+ options.setAllowMmapReads(true);
+ assert (options.tableFactoryName().equals("PlainTable"));
+
+ options.setRateLimiter(rateLimiter);
+
+ final BlockBasedTableConfig table_options = new BlockBasedTableConfig();
+ table_options.setBlockCacheSize(64 * SizeUnit.KB)
+ .setFilter(bloomFilter)
+ .setCacheNumShardBits(6)
+ .setBlockSizeDeviation(5)
+ .setBlockRestartInterval(10)
+ .setCacheIndexAndFilterBlocks(true)
+ .setHashIndexAllowCollision(false)
+ .setBlockCacheCompressedSize(64 * SizeUnit.KB)
+ .setBlockCacheCompressedNumShardBits(10);
+
+ assert (table_options.blockCacheSize() == 64 * SizeUnit.KB);
+ assert (table_options.cacheNumShardBits() == 6);
+ assert (table_options.blockSizeDeviation() == 5);
+ assert (table_options.blockRestartInterval() == 10);
+ assert (table_options.cacheIndexAndFilterBlocks() == true);
+ assert (table_options.hashIndexAllowCollision() == false);
+ assert (table_options.blockCacheCompressedSize() == 64 * SizeUnit.KB);
+ assert (table_options.blockCacheCompressedNumShardBits() == 10);
+
+ options.setTableFormatConfig(table_options);
+ assert (options.tableFactoryName().equals("BlockBasedTable"));
+
+ try (final RocksDB db = RocksDB.open(options, db_path)) {
+ db.put("hello".getBytes(), "world".getBytes());
+
+ final byte[] value = db.get("hello".getBytes());
+ assert ("world".equals(new String(value)));
+
+ final String str = db.getProperty("rocksdb.stats");
+ assert (str != null && !str.equals(""));
+ } catch (final RocksDBException e) {
+ System.out.format("[ERROR] caught the unexpected exception -- %s\n", e);
+ assert (false);
+ }
+
+ try (final RocksDB db = RocksDB.open(options, db_path)) {
+ db.put("hello".getBytes(), "world".getBytes());
+ byte[] value = db.get("hello".getBytes());
+ System.out.format("Get('hello') = %s\n",
+ new String(value));
+
+ for (int i = 1; i <= 9; ++i) {
+ for (int j = 1; j <= 9; ++j) {
+ db.put(String.format("%dx%d", i, j).getBytes(),
+ String.format("%d", i * j).getBytes());
+ }
+ }
+
+ for (int i = 1; i <= 9; ++i) {
+ for (int j = 1; j <= 9; ++j) {
+ System.out.format("%s ", new String(db.get(
+ String.format("%dx%d", i, j).getBytes())));
+ }
+ System.out.println("");
+ }
+
+ // write batch test
+ try (final WriteOptions writeOpt = new WriteOptions()) {
+ for (int i = 10; i <= 19; ++i) {
+ try (final WriteBatch batch = new WriteBatch()) {
+ for (int j = 10; j <= 19; ++j) {
+ batch.put(String.format("%dx%d", i, j).getBytes(),
+ String.format("%d", i * j).getBytes());
+ }
+ db.write(writeOpt, batch);
+ }
+ }
+ }
+ for (int i = 10; i <= 19; ++i) {
+ for (int j = 10; j <= 19; ++j) {
+ assert (new String(
+ db.get(String.format("%dx%d", i, j).getBytes())).equals(
+ String.format("%d", i * j)));
+ System.out.format("%s ", new String(db.get(
+ String.format("%dx%d", i, j).getBytes())));
+ }
+ System.out.println("");
+ }
+
+ value = db.get("1x1".getBytes());
+ assert (value != null);
+ value = db.get("world".getBytes());
+ assert (value == null);
+ value = db.get(readOptions, "world".getBytes());
+ assert (value == null);
+
+ final byte[] testKey = "asdf".getBytes();
+ final byte[] testValue =
+ "asdfghjkl;'?><MNBVCXZQWERTYUIOP{+_)(*&^%$#@".getBytes();
+ db.put(testKey, testValue);
+ byte[] testResult = db.get(testKey);
+ assert (testResult != null);
+ assert (Arrays.equals(testValue, testResult));
+ assert (new String(testValue).equals(new String(testResult)));
+ testResult = db.get(readOptions, testKey);
+ assert (testResult != null);
+ assert (Arrays.equals(testValue, testResult));
+ assert (new String(testValue).equals(new String(testResult)));
+
+ final byte[] insufficientArray = new byte[10];
+ final byte[] enoughArray = new byte[50];
+ int len;
+ len = db.get(testKey, insufficientArray);
+ assert (len > insufficientArray.length);
+ len = db.get("asdfjkl;".getBytes(), enoughArray);
+ assert (len == RocksDB.NOT_FOUND);
+ len = db.get(testKey, enoughArray);
+ assert (len == testValue.length);
+
+ len = db.get(readOptions, testKey, insufficientArray);
+ assert (len > insufficientArray.length);
+ len = db.get(readOptions, "asdfjkl;".getBytes(), enoughArray);
+ assert (len == RocksDB.NOT_FOUND);
+ len = db.get(readOptions, testKey, enoughArray);
+ assert (len == testValue.length);
+
+ db.remove(testKey);
+ len = db.get(testKey, enoughArray);
+ assert (len == RocksDB.NOT_FOUND);
+
+ // repeat the test with WriteOptions
+ try (final WriteOptions writeOpts = new WriteOptions()) {
+ writeOpts.setSync(true);
+ writeOpts.setDisableWAL(true);
+ db.put(writeOpts, testKey, testValue);
+ len = db.get(testKey, enoughArray);
+ assert (len == testValue.length);
+ assert (new String(testValue).equals(
+ new String(enoughArray, 0, len)));
+ }
+
+ try {
+ for (final TickerType statsType : TickerType.values()) {
+ if (statsType != TickerType.TICKER_ENUM_MAX) {
+ stats.getTickerCount(statsType);
+ }
+ }
+ System.out.println("getTickerCount() passed.");
+ } catch (final Exception e) {
+ System.out.println("Failed in call to getTickerCount()");
+ assert (false); //Should never reach here.
+ }
+
+ try {
+ for (final HistogramType histogramType : HistogramType.values()) {
+ if (histogramType != HistogramType.HISTOGRAM_ENUM_MAX) {
+ HistogramData data = stats.getHistogramData(histogramType);
+ }
+ }
+ System.out.println("getHistogramData() passed.");
+ } catch (final Exception e) {
+ System.out.println("Failed in call to getHistogramData()");
+ assert (false); //Should never reach here.
+ }
+
+ try (final RocksIterator iterator = db.newIterator()) {
+
+ boolean seekToFirstPassed = false;
+ for (iterator.seekToFirst(); iterator.isValid(); iterator.next()) {
+ iterator.status();
+ assert (iterator.key() != null);
+ assert (iterator.value() != null);
+ seekToFirstPassed = true;
+ }
+ if (seekToFirstPassed) {
+ System.out.println("iterator seekToFirst tests passed.");
+ }
+
+ boolean seekToLastPassed = false;
+ for (iterator.seekToLast(); iterator.isValid(); iterator.prev()) {
+ iterator.status();
+ assert (iterator.key() != null);
+ assert (iterator.value() != null);
+ seekToLastPassed = true;
+ }
+
+ if (seekToLastPassed) {
+ System.out.println("iterator seekToLastPassed tests passed.");
+ }
+
+ iterator.seekToFirst();
+ iterator.seek(iterator.key());
+ assert (iterator.key() != null);
+ assert (iterator.value() != null);
+
+ System.out.println("iterator seek test passed.");
+
+ }
+ System.out.println("iterator tests passed.");
+
+ final List<byte[]> keys = new ArrayList<>();
+ try (final RocksIterator iterator = db.newIterator()) {
+ for (iterator.seekToLast(); iterator.isValid(); iterator.prev()) {
+ keys.add(iterator.key());
+ }
+ }
+
+ Map<byte[], byte[]> values = db.multiGet(keys);
+ assert (values.size() == keys.size());
+ for (final byte[] value1 : values.values()) {
+ assert (value1 != null);
+ }
+
+ values = db.multiGet(new ReadOptions(), keys);
+ assert (values.size() == keys.size());
+ for (final byte[] value1 : values.values()) {
+ assert (value1 != null);
+ }
+ } catch (final RocksDBException e) {
+ System.err.println(e);
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/samples/src/main/java/TransactionSample.java b/src/rocksdb/java/samples/src/main/java/TransactionSample.java
new file mode 100644
index 000000000..b88a68f12
--- /dev/null
+++ b/src/rocksdb/java/samples/src/main/java/TransactionSample.java
@@ -0,0 +1,183 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+import org.rocksdb.*;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+/**
+ * Demonstrates using Transactions on a TransactionDB with
+ * varying isolation guarantees
+ */
+public class TransactionSample {
+ private static final String dbPath = "/tmp/rocksdb_transaction_example";
+
+ public static final void main(final String args[]) throws RocksDBException {
+
+ try(final Options options = new Options()
+ .setCreateIfMissing(true);
+ final TransactionDBOptions txnDbOptions = new TransactionDBOptions();
+ final TransactionDB txnDb =
+ TransactionDB.open(options, txnDbOptions, dbPath)) {
+
+ try (final WriteOptions writeOptions = new WriteOptions();
+ final ReadOptions readOptions = new ReadOptions()) {
+
+ ////////////////////////////////////////////////////////
+ //
+ // Simple Transaction Example ("Read Committed")
+ //
+ ////////////////////////////////////////////////////////
+ readCommitted(txnDb, writeOptions, readOptions);
+
+
+ ////////////////////////////////////////////////////////
+ //
+ // "Repeatable Read" (Snapshot Isolation) Example
+ // -- Using a single Snapshot
+ //
+ ////////////////////////////////////////////////////////
+ repeatableRead(txnDb, writeOptions, readOptions);
+
+
+ ////////////////////////////////////////////////////////
+ //
+ // "Read Committed" (Monotonic Atomic Views) Example
+ // --Using multiple Snapshots
+ //
+ ////////////////////////////////////////////////////////
+ readCommitted_monotonicAtomicViews(txnDb, writeOptions, readOptions);
+ }
+ }
+ }
+
+ /**
+ * Demonstrates "Read Committed" isolation
+ */
+ private static void readCommitted(final TransactionDB txnDb,
+ final WriteOptions writeOptions, final ReadOptions readOptions)
+ throws RocksDBException {
+ final byte key1[] = "abc".getBytes(UTF_8);
+ final byte value1[] = "def".getBytes(UTF_8);
+
+ final byte key2[] = "xyz".getBytes(UTF_8);
+ final byte value2[] = "zzz".getBytes(UTF_8);
+
+ // Start a transaction
+ try(final Transaction txn = txnDb.beginTransaction(writeOptions)) {
+ // Read a key in this transaction
+ byte[] value = txn.get(readOptions, key1);
+ assert(value == null);
+
+ // Write a key in this transaction
+ txn.put(key1, value1);
+
+ // Read a key OUTSIDE this transaction. Does not affect txn.
+ value = txnDb.get(readOptions, key1);
+ assert(value == null);
+
+ // Write a key OUTSIDE of this transaction.
+ // Does not affect txn since this is an unrelated key.
+ // If we wrote key 'abc' here, the transaction would fail to commit.
+ txnDb.put(writeOptions, key2, value2);
+
+ // Commit transaction
+ txn.commit();
+ }
+ }
+
+ /**
+ * Demonstrates "Repeatable Read" (Snapshot Isolation) isolation
+ */
+ private static void repeatableRead(final TransactionDB txnDb,
+ final WriteOptions writeOptions, final ReadOptions readOptions)
+ throws RocksDBException {
+
+ final byte key1[] = "ghi".getBytes(UTF_8);
+ final byte value1[] = "jkl".getBytes(UTF_8);
+
+ // Set a snapshot at start of transaction by setting setSnapshot(true)
+ try(final TransactionOptions txnOptions = new TransactionOptions()
+ .setSetSnapshot(true);
+ final Transaction txn =
+ txnDb.beginTransaction(writeOptions, txnOptions)) {
+
+ final Snapshot snapshot = txn.getSnapshot();
+
+ // Write a key OUTSIDE of transaction
+ txnDb.put(writeOptions, key1, value1);
+
+ // Attempt to read a key using the snapshot. This will fail since
+ // the previous write outside this txn conflicts with this read.
+ readOptions.setSnapshot(snapshot);
+
+ try {
+ final byte[] value = txn.getForUpdate(readOptions, key1, true);
+ throw new IllegalStateException();
+ } catch(final RocksDBException e) {
+ assert(e.getStatus().getCode() == Status.Code.Busy);
+ }
+
+ txn.rollback();
+ } finally {
+ // Clear snapshot from read options since it is no longer valid
+ readOptions.setSnapshot(null);
+ }
+ }
+
+ /**
+ * Demonstrates "Read Committed" (Monotonic Atomic Views) isolation
+ *
+ * In this example, we set the snapshot multiple times. This is probably
+ * only necessary if you have very strict isolation requirements to
+ * implement.
+ */
+ private static void readCommitted_monotonicAtomicViews(
+ final TransactionDB txnDb, final WriteOptions writeOptions,
+ final ReadOptions readOptions) throws RocksDBException {
+
+ final byte keyX[] = "x".getBytes(UTF_8);
+ final byte valueX[] = "x".getBytes(UTF_8);
+
+ final byte keyY[] = "y".getBytes(UTF_8);
+ final byte valueY[] = "y".getBytes(UTF_8);
+
+ try (final TransactionOptions txnOptions = new TransactionOptions()
+ .setSetSnapshot(true);
+ final Transaction txn =
+ txnDb.beginTransaction(writeOptions, txnOptions)) {
+
+ // Do some reads and writes to key "x"
+ Snapshot snapshot = txnDb.getSnapshot();
+ readOptions.setSnapshot(snapshot);
+ byte[] value = txn.get(readOptions, keyX);
+ txn.put(valueX, valueX);
+
+ // Do a write outside of the transaction to key "y"
+ txnDb.put(writeOptions, keyY, valueY);
+
+ // Set a new snapshot in the transaction
+ txn.setSnapshot();
+ txn.setSavePoint();
+ snapshot = txnDb.getSnapshot();
+ readOptions.setSnapshot(snapshot);
+
+ // Do some reads and writes to key "y"
+ // Since the snapshot was advanced, the write done outside of the
+ // transaction does not conflict.
+ value = txn.getForUpdate(readOptions, keyY, true);
+ txn.put(keyY, valueY);
+
+ // Decide we want to revert the last write from this transaction.
+ txn.rollbackToSavePoint();
+
+ // Commit.
+ txn.commit();
+ } finally {
+ // Clear snapshot from read options since it is no longer valid
+ readOptions.setSnapshot(null);
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java
new file mode 100644
index 000000000..2f0d4f3ca
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java
@@ -0,0 +1,59 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+/**
+ * A CompactionFilter allows an application to modify/delete a key-value at
+ * the time of compaction.
+ *
+ * At present we just permit an overriding Java class to wrap a C++
+ * implementation
+ */
+public abstract class AbstractCompactionFilter<T extends AbstractSlice<?>>
+ extends RocksObject {
+
+ public static class Context {
+ private final boolean fullCompaction;
+ private final boolean manualCompaction;
+
+ public Context(final boolean fullCompaction, final boolean manualCompaction) {
+ this.fullCompaction = fullCompaction;
+ this.manualCompaction = manualCompaction;
+ }
+
+ /**
+ * Does this compaction run include all data files
+ *
+ * @return true if this is a full compaction run
+ */
+ public boolean isFullCompaction() {
+ return fullCompaction;
+ }
+
+ /**
+ * Is this compaction requested by the client,
+ * or is it occurring as an automatic compaction process
+ *
+ * @return true if the compaction was initiated by the client
+ */
+ public boolean isManualCompaction() {
+ return manualCompaction;
+ }
+ }
+
+ protected AbstractCompactionFilter(final long nativeHandle) {
+ super(nativeHandle);
+ }
+
+ /**
+ * Deletes underlying C++ compaction pointer.
+ *
+ * Note that this function should be called only after all
+ * RocksDB instances referencing the compaction filter are closed.
+ * Otherwise an undefined behavior will occur.
+ */
+ @Override
+ protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java
new file mode 100644
index 000000000..380b4461d
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java
@@ -0,0 +1,77 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Each compaction will create a new {@link AbstractCompactionFilter}
+ * allowing the application to know about different compactions
+ *
+ * @param <T> The concrete type of the compaction filter
+ */
+public abstract class AbstractCompactionFilterFactory<T extends AbstractCompactionFilter<?>>
+ extends RocksCallbackObject {
+
+ public AbstractCompactionFilterFactory() {
+ super(null);
+ }
+
+ @Override
+ protected long initializeNative(final long... nativeParameterHandles) {
+ return createNewCompactionFilterFactory0();
+ }
+
+ /**
+ * Called from JNI, see compaction_filter_factory_jnicallback.cc
+ *
+ * @param fullCompaction {@link AbstractCompactionFilter.Context#fullCompaction}
+ * @param manualCompaction {@link AbstractCompactionFilter.Context#manualCompaction}
+ *
+ * @return native handle of the CompactionFilter
+ */
+ private long createCompactionFilter(final boolean fullCompaction,
+ final boolean manualCompaction) {
+ final T filter = createCompactionFilter(
+ new AbstractCompactionFilter.Context(fullCompaction, manualCompaction));
+
+ // CompactionFilterFactory::CreateCompactionFilter returns a std::unique_ptr
+ // which therefore has ownership of the underlying native object
+ filter.disOwnNativeHandle();
+
+ return filter.nativeHandle_;
+ }
+
+ /**
+ * Create a new compaction filter
+ *
+ * @param context The context describing the need for a new compaction filter
+ *
+ * @return A new instance of {@link AbstractCompactionFilter}
+ */
+ public abstract T createCompactionFilter(
+ final AbstractCompactionFilter.Context context);
+
+ /**
+ * A name which identifies this compaction filter
+ *
+ * The name will be printed to the LOG file on start up for diagnosis
+ *
+ * @return name which identifies this compaction filter.
+ */
+ public abstract String name();
+
+ /**
+ * We override {@link RocksCallbackObject#disposeInternal()}
+ * as disposing of a rocksdb::AbstractCompactionFilterFactory requires
+ * a slightly different approach as it is a std::shared_ptr
+ */
+ @Override
+ protected void disposeInternal() {
+ disposeInternal(nativeHandle_);
+ }
+
+ private native long createNewCompactionFilterFactory0();
+ private native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractComparator.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractComparator.java
new file mode 100644
index 000000000..c08e9127c
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractComparator.java
@@ -0,0 +1,124 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.ByteBuffer;
+
+/**
+ * Comparators are used by RocksDB to determine
+ * the ordering of keys.
+ *
+ * Implementations of Comparators in Java should extend this class.
+ */
+public abstract class AbstractComparator
+ extends RocksCallbackObject {
+
+ AbstractComparator() {
+ super();
+ }
+
+ protected AbstractComparator(final ComparatorOptions copt) {
+ super(copt.nativeHandle_);
+ }
+
+ @Override
+ protected long initializeNative(final long... nativeParameterHandles) {
+ return createNewComparator(nativeParameterHandles[0]);
+ }
+
+ /**
+ * Get the type of this comparator.
+ *
+ * Used for determining the correct C++ cast in native code.
+ *
+ * @return The type of the comparator.
+ */
+ ComparatorType getComparatorType() {
+ return ComparatorType.JAVA_COMPARATOR;
+ }
+
+ /**
+ * The name of the comparator. Used to check for comparator
+ * mismatches (i.e., a DB created with one comparator is
+ * accessed using a different comparator).
+ *
+ * A new name should be used whenever
+ * the comparator implementation changes in a way that will cause
+ * the relative ordering of any two keys to change.
+ *
+ * Names starting with "rocksdb." are reserved and should not be used.
+ *
+ * @return The name of this comparator implementation
+ */
+ public abstract String name();
+
+ /**
+ * Three-way key comparison. Implementations should provide a
+ * <a href="https://en.wikipedia.org/wiki/Total_order">total order</a>
+ * on keys that might be passed to it.
+ *
+ * The implementation may modify the {@code ByteBuffer}s passed in, though
+ * it would be unconventional to modify the "limit" or any of the
+ * underlying bytes. As a callback, RocksJava will ensure that {@code a}
+ * is a different instance from {@code b}.
+ *
+ * @param a buffer containing the first key in its "remaining" elements
+ * @param b buffer containing the second key in its "remaining" elements
+ *
+ * @return Should return either:
+ * 1) &lt; 0 if "a" &lt; "b"
+ * 2) == 0 if "a" == "b"
+ * 3) &gt; 0 if "a" &gt; "b"
+ */
+ public abstract int compare(final ByteBuffer a, final ByteBuffer b);
+
+ /**
+ * <p>Used to reduce the space requirements
+ * for internal data structures like index blocks.</p>
+ *
+ * <p>If start &lt; limit, you may modify start which is a
+ * shorter string in [start, limit).</p>
+ *
+ * If you modify start, it is expected that you set the byte buffer so that
+ * a subsequent read of start.remaining() bytes from start.position()
+ * to start.limit() will obtain the new start value.
+ *
+ * <p>Simple comparator implementations may return with start unchanged.
+ * i.e., an implementation of this method that does nothing is correct.</p>
+ *
+ * @param start the start
+ * @param limit the limit
+ */
+ public void findShortestSeparator(final ByteBuffer start,
+ final ByteBuffer limit) {
+ // no-op
+ }
+
+ /**
+ * <p>Used to reduce the space requirements
+ * for internal data structures like index blocks.</p>
+ *
+ * <p>You may change key to a shorter key (key1) where
+ * key1 &ge; key.</p>
+ *
+ * <p>Simple comparator implementations may return the key unchanged.
+ * i.e., an implementation of
+ * this method that does nothing is correct.</p>
+ *
+ * @param key the key
+ */
+ public void findShortSuccessor(final ByteBuffer key) {
+ // no-op
+ }
+
+ public final boolean usingDirectBuffers() {
+ return usingDirectBuffers(nativeHandle_);
+ }
+
+ private native boolean usingDirectBuffers(final long nativeHandle);
+
+ private native long createNewComparator(final long comparatorOptionsHandle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java
new file mode 100644
index 000000000..b732d2495
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java
@@ -0,0 +1,125 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.ByteBuffer;
+
+/**
+ * This class is intentionally private,
+ * it holds methods which are called
+ * from C++ to interact with a Comparator
+ * written in Java.
+ *
+ * Placing these bridge methods in this
+ * class keeps the API of the
+ * {@link org.rocksdb.AbstractComparator} clean.
+ */
+class AbstractComparatorJniBridge {
+
+ /**
+ * Only called from JNI.
+ *
+ * Simply a bridge to calling
+ * {@link AbstractComparator#compare(ByteBuffer, ByteBuffer)},
+ * which ensures that the byte buffer lengths are correct
+ * before and after the call.
+ *
+ * @param comparator the comparator object on which to
+ * call {@link AbstractComparator#compare(ByteBuffer, ByteBuffer)}
+ * @param a buffer access to first key
+ * @param aLen the length of the a key,
+ * may be smaller than the buffer {@code a}
+ * @param b buffer access to second key
+ * @param bLen the length of the b key,
+ * may be smaller than the buffer {@code b}
+ *
+ * @return the result of the comparison
+ */
+ private static int compareInternal(
+ final AbstractComparator comparator,
+ final ByteBuffer a, final int aLen,
+ final ByteBuffer b, final int bLen) {
+ if (aLen != -1) {
+ a.mark();
+ a.limit(aLen);
+ }
+ if (bLen != -1) {
+ b.mark();
+ b.limit(bLen);
+ }
+
+ final int c = comparator.compare(a, b);
+
+ if (aLen != -1) {
+ a.reset();
+ }
+ if (bLen != -1) {
+ b.reset();
+ }
+
+ return c;
+ }
+
+ /**
+ * Only called from JNI.
+ *
+ * Simply a bridge to calling
+ * {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)},
+ * which ensures that the byte buffer lengths are correct
+ * before the call.
+ *
+ * @param comparator the comparator object on which to
+ * call {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)}
+ * @param start buffer access to the start key
+ * @param startLen the length of the start key,
+ * may be smaller than the buffer {@code start}
+ * @param limit buffer access to the limit key
+ * @param limitLen the length of the limit key,
+ * may be smaller than the buffer {@code limit}
+ *
+ * @return either {@code startLen} if the start key is unchanged, otherwise
+ * the new length of the start key
+ */
+ private static int findShortestSeparatorInternal(
+ final AbstractComparator comparator,
+ final ByteBuffer start, final int startLen,
+ final ByteBuffer limit, final int limitLen) {
+ if (startLen != -1) {
+ start.limit(startLen);
+ }
+ if (limitLen != -1) {
+ limit.limit(limitLen);
+ }
+ comparator.findShortestSeparator(start, limit);
+ return start.remaining();
+ }
+
+ /**
+ * Only called from JNI.
+ *
+ * Simply a bridge to calling
+ * {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)},
+ * which ensures that the byte buffer length is correct
+ * before the call.
+ *
+ * @param comparator the comparator object on which to
+ * call {@link AbstractComparator#findShortSuccessor(ByteBuffer)}
+ * @param key buffer access to the key
+ * @param keyLen the length of the key,
+ * may be smaller than the buffer {@code key}
+ *
+ * @return either keyLen if the key is unchanged, otherwise the new length of the key
+ */
+ private static int findShortSuccessorInternal(
+ final AbstractComparator comparator,
+ final ByteBuffer key, final int keyLen) {
+ if (keyLen != -1) {
+ key.limit(keyLen);
+ }
+ comparator.findShortSuccessor(key);
+ return key.remaining();
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java
new file mode 100644
index 000000000..8532debf8
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java
@@ -0,0 +1,67 @@
+// Copyright (c) 2016, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * Offers functionality for implementations of
+ * {@link AbstractNativeReference} which have an immutable reference to the
+ * underlying native C++ object
+ */
+//@ThreadSafe
+public abstract class AbstractImmutableNativeReference
+ extends AbstractNativeReference {
+
+ /**
+ * A flag indicating whether the current {@code AbstractNativeReference} is
+ * responsible to free the underlying C++ object
+ */
+ protected final AtomicBoolean owningHandle_;
+
+ protected AbstractImmutableNativeReference(final boolean owningHandle) {
+ this.owningHandle_ = new AtomicBoolean(owningHandle);
+ }
+
+ @Override
+ public boolean isOwningHandle() {
+ return owningHandle_.get();
+ }
+
+ /**
+ * Releases this {@code AbstractNativeReference} from the responsibility of
+ * freeing the underlying native C++ object
+ * <p>
+ * This will prevent the object from attempting to delete the underlying
+ * native object in its finalizer. This must be used when another object
+ * takes over ownership of the native object or both will attempt to delete
+ * the underlying object when garbage collected.
+ * <p>
+ * When {@code disOwnNativeHandle()} is called, {@code dispose()} will
+ * subsequently take no action. As a result, incorrect use of this function
+ * may cause a memory leak.
+ * </p>
+ *
+ * @see #dispose()
+ */
+ protected final void disOwnNativeHandle() {
+ owningHandle_.set(false);
+ }
+
+ @Override
+ public void close() {
+ if (owningHandle_.compareAndSet(true, false)) {
+ disposeInternal();
+ }
+ }
+
+ /**
+ * The helper function of {@link AbstractImmutableNativeReference#dispose()}
+ * which all subclasses of {@code AbstractImmutableNativeReference} must
+ * implement to release their underlying native C++ objects.
+ */
+ protected abstract void disposeInternal();
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractMutableOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractMutableOptions.java
new file mode 100644
index 000000000..6180fba15
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractMutableOptions.java
@@ -0,0 +1,256 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+import java.util.*;
+
+public abstract class AbstractMutableOptions {
+
+ protected static final String KEY_VALUE_PAIR_SEPARATOR = ";";
+ protected static final char KEY_VALUE_SEPARATOR = '=';
+ static final String INT_ARRAY_INT_SEPARATOR = ",";
+
+ protected final String[] keys;
+ private final String[] values;
+
+ /**
+ * User must use builder pattern, or parser.
+ *
+ * @param keys the keys
+ * @param values the values
+ */
+ protected AbstractMutableOptions(final String[] keys, final String[] values) {
+ this.keys = keys;
+ this.values = values;
+ }
+
+ String[] getKeys() {
+ return keys;
+ }
+
+ String[] getValues() {
+ return values;
+ }
+
+ /**
+ * Returns a string representation of MutableOptions which
+ * is suitable for consumption by {@code #parse(String)}.
+ *
+ * @return String representation of MutableOptions
+ */
+ @Override
+ public String toString() {
+ final StringBuilder buffer = new StringBuilder();
+ for(int i = 0; i < keys.length; i++) {
+ buffer
+ .append(keys[i])
+ .append(KEY_VALUE_SEPARATOR)
+ .append(values[i]);
+
+ if(i + 1 < keys.length) {
+ buffer.append(KEY_VALUE_PAIR_SEPARATOR);
+ }
+ }
+ return buffer.toString();
+ }
+
+ public static abstract class AbstractMutableOptionsBuilder<
+ T extends AbstractMutableOptions,
+ U extends AbstractMutableOptionsBuilder<T, U, K>,
+ K extends MutableOptionKey> {
+
+ private final Map<K, MutableOptionValue<?>> options = new LinkedHashMap<>();
+
+ protected abstract U self();
+
+ /**
+ * Get all of the possible keys
+ *
+ * @return A map of all keys, indexed by name.
+ */
+ protected abstract Map<String, K> allKeys();
+
+ /**
+ * Construct a sub-class instance of {@link AbstractMutableOptions}.
+ *
+ * @param keys the keys
+ * @param values the values
+ *
+ * @return an instance of the options.
+ */
+ protected abstract T build(final String[] keys, final String[] values);
+
+ public T build() {
+ final String keys[] = new String[options.size()];
+ final String values[] = new String[options.size()];
+
+ int i = 0;
+ for (final Map.Entry<K, MutableOptionValue<?>> option : options.entrySet()) {
+ keys[i] = option.getKey().name();
+ values[i] = option.getValue().asString();
+ i++;
+ }
+
+ return build(keys, values);
+ }
+
+ protected U setDouble(
+ final K key, final double value) {
+ if (key.getValueType() != MutableOptionKey.ValueType.DOUBLE) {
+ throw new IllegalArgumentException(
+ key + " does not accept a double value");
+ }
+ options.put(key, MutableOptionValue.fromDouble(value));
+ return self();
+ }
+
+ protected double getDouble(final K key)
+ throws NoSuchElementException, NumberFormatException {
+ final MutableOptionValue<?> value = options.get(key);
+ if(value == null) {
+ throw new NoSuchElementException(key.name() + " has not been set");
+ }
+ return value.asDouble();
+ }
+
+ protected U setLong(
+ final K key, final long value) {
+ if(key.getValueType() != MutableOptionKey.ValueType.LONG) {
+ throw new IllegalArgumentException(
+ key + " does not accept a long value");
+ }
+ options.put(key, MutableOptionValue.fromLong(value));
+ return self();
+ }
+
+ protected long getLong(final K key)
+ throws NoSuchElementException, NumberFormatException {
+ final MutableOptionValue<?> value = options.get(key);
+ if(value == null) {
+ throw new NoSuchElementException(key.name() + " has not been set");
+ }
+ return value.asLong();
+ }
+
+ protected U setInt(
+ final K key, final int value) {
+ if(key.getValueType() != MutableOptionKey.ValueType.INT) {
+ throw new IllegalArgumentException(
+ key + " does not accept an integer value");
+ }
+ options.put(key, MutableOptionValue.fromInt(value));
+ return self();
+ }
+
+ protected int getInt(final K key)
+ throws NoSuchElementException, NumberFormatException {
+ final MutableOptionValue<?> value = options.get(key);
+ if(value == null) {
+ throw new NoSuchElementException(key.name() + " has not been set");
+ }
+ return value.asInt();
+ }
+
+ protected U setBoolean(
+ final K key, final boolean value) {
+ if(key.getValueType() != MutableOptionKey.ValueType.BOOLEAN) {
+ throw new IllegalArgumentException(
+ key + " does not accept a boolean value");
+ }
+ options.put(key, MutableOptionValue.fromBoolean(value));
+ return self();
+ }
+
+ protected boolean getBoolean(final K key)
+ throws NoSuchElementException, NumberFormatException {
+ final MutableOptionValue<?> value = options.get(key);
+ if(value == null) {
+ throw new NoSuchElementException(key.name() + " has not been set");
+ }
+ return value.asBoolean();
+ }
+
+ protected U setIntArray(
+ final K key, final int[] value) {
+ if(key.getValueType() != MutableOptionKey.ValueType.INT_ARRAY) {
+ throw new IllegalArgumentException(
+ key + " does not accept an int array value");
+ }
+ options.put(key, MutableOptionValue.fromIntArray(value));
+ return self();
+ }
+
+ protected int[] getIntArray(final K key)
+ throws NoSuchElementException, NumberFormatException {
+ final MutableOptionValue<?> value = options.get(key);
+ if(value == null) {
+ throw new NoSuchElementException(key.name() + " has not been set");
+ }
+ return value.asIntArray();
+ }
+
+ protected <N extends Enum<N>> U setEnum(
+ final K key, final N value) {
+ if(key.getValueType() != MutableOptionKey.ValueType.ENUM) {
+ throw new IllegalArgumentException(
+ key + " does not accept a Enum value");
+ }
+ options.put(key, MutableOptionValue.fromEnum(value));
+ return self();
+ }
+
+ @SuppressWarnings("unchecked")
+ protected <N extends Enum<N>> N getEnum(final K key)
+ throws NoSuchElementException, NumberFormatException {
+ final MutableOptionValue<?> value = options.get(key);
+ if (value == null) {
+ throw new NoSuchElementException(key.name() + " has not been set");
+ }
+
+ if (!(value instanceof MutableOptionValue.MutableOptionEnumValue)) {
+ throw new NoSuchElementException(key.name() + " is not of Enum type");
+ }
+
+ return ((MutableOptionValue.MutableOptionEnumValue<N>) value).asObject();
+ }
+
+ public U fromString(
+ final String keyStr, final String valueStr)
+ throws IllegalArgumentException {
+ Objects.requireNonNull(keyStr);
+ Objects.requireNonNull(valueStr);
+
+ final K key = allKeys().get(keyStr);
+ switch(key.getValueType()) {
+ case DOUBLE:
+ return setDouble(key, Double.parseDouble(valueStr));
+
+ case LONG:
+ return setLong(key, Long.parseLong(valueStr));
+
+ case INT:
+ return setInt(key, Integer.parseInt(valueStr));
+
+ case BOOLEAN:
+ return setBoolean(key, Boolean.parseBoolean(valueStr));
+
+ case INT_ARRAY:
+ final String[] strInts = valueStr
+ .trim().split(INT_ARRAY_INT_SEPARATOR);
+ if(strInts == null || strInts.length == 0) {
+ throw new IllegalArgumentException(
+ "int array value is not correctly formatted");
+ }
+
+ final int value[] = new int[strInts.length];
+ int i = 0;
+ for(final String strInt : strInts) {
+ value[i++] = Integer.parseInt(strInt);
+ }
+ return setIntArray(key, value);
+ }
+
+ throw new IllegalStateException(
+ key + " has unknown value type: " + key.getValueType());
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractNativeReference.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractNativeReference.java
new file mode 100644
index 000000000..ffb0776e4
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractNativeReference.java
@@ -0,0 +1,76 @@
+// Copyright (c) 2016, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * AbstractNativeReference is the base-class of all RocksDB classes that have
+ * a pointer to a native C++ {@code rocksdb} object.
+ * <p>
+ * AbstractNativeReference has the {@link AbstractNativeReference#dispose()}
+ * method, which frees its associated C++ object.</p>
+ * <p>
+ * This function should be called manually, however, if required it will be
+ * called automatically during the regular Java GC process via
+ * {@link AbstractNativeReference#finalize()}.</p>
+ * <p>
+ * Note - Java can only see the long member variable (which is the C++ pointer
+ * value to the native object), as such it does not know the real size of the
+ * object and therefore may assign a low GC priority for it; So it is strongly
+ * suggested that you manually dispose of objects when you are finished with
+ * them.</p>
+ */
+public abstract class AbstractNativeReference implements AutoCloseable {
+
+ /**
+ * Returns true if we are responsible for freeing the underlying C++ object
+ *
+ * @return true if we are responsible to free the C++ object
+ * @see #dispose()
+ */
+ protected abstract boolean isOwningHandle();
+
+ /**
+ * Frees the underlying C++ object
+ * <p>
+ * It is strong recommended that the developer calls this after they
+ * have finished using the object.</p>
+ * <p>
+ * Note, that once an instance of {@link AbstractNativeReference} has been
+ * disposed, calling any of its functions will lead to undefined
+ * behavior.</p>
+ */
+ @Override
+ public abstract void close();
+
+ /**
+ * @deprecated Instead use {@link AbstractNativeReference#close()}
+ */
+ @Deprecated
+ public final void dispose() {
+ close();
+ }
+
+ /**
+ * Simply calls {@link AbstractNativeReference#dispose()} to free
+ * any underlying C++ object reference which has not yet been manually
+ * released.
+ *
+ * @deprecated You should not rely on GC of Rocks objects, and instead should
+ * either call {@link AbstractNativeReference#close()} manually or make
+ * use of some sort of ARM (Automatic Resource Management) such as
+ * Java 7's <a href="https://docs.oracle.com/javase/tutorial/essential/exceptions/tryResourceClose.html">try-with-resources</a>
+ * statement
+ */
+ @Override
+ @Deprecated
+ protected void finalize() throws Throwable {
+ if(isOwningHandle()) {
+ //TODO(AR) log a warning message... developer should have called close()
+ }
+ dispose();
+ super.finalize();
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractRocksIterator.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractRocksIterator.java
new file mode 100644
index 000000000..9e08f1465
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractRocksIterator.java
@@ -0,0 +1,126 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.ByteBuffer;
+
+/**
+ * Base class implementation for Rocks Iterators
+ * in the Java API
+ *
+ * <p>Multiple threads can invoke const methods on an RocksIterator without
+ * external synchronization, but if any of the threads may call a
+ * non-const method, all threads accessing the same RocksIterator must use
+ * external synchronization.</p>
+ *
+ * @param <P> The type of the Parent Object from which the Rocks Iterator was
+ * created. This is used by disposeInternal to avoid double-free
+ * issues with the underlying C++ object.
+ * @see org.rocksdb.RocksObject
+ */
+public abstract class AbstractRocksIterator<P extends RocksObject>
+ extends RocksObject implements RocksIteratorInterface {
+ final P parent_;
+
+ protected AbstractRocksIterator(final P parent,
+ final long nativeHandle) {
+ super(nativeHandle);
+ // parent must point to a valid RocksDB instance.
+ assert (parent != null);
+ // RocksIterator must hold a reference to the related parent instance
+ // to guarantee that while a GC cycle starts RocksIterator instances
+ // are freed prior to parent instances.
+ parent_ = parent;
+ }
+
+ @Override
+ public boolean isValid() {
+ assert (isOwningHandle());
+ return isValid0(nativeHandle_);
+ }
+
+ @Override
+ public void seekToFirst() {
+ assert (isOwningHandle());
+ seekToFirst0(nativeHandle_);
+ }
+
+ @Override
+ public void seekToLast() {
+ assert (isOwningHandle());
+ seekToLast0(nativeHandle_);
+ }
+
+ @Override
+ public void seek(byte[] target) {
+ assert (isOwningHandle());
+ seek0(nativeHandle_, target, target.length);
+ }
+
+ @Override
+ public void seekForPrev(byte[] target) {
+ assert (isOwningHandle());
+ seekForPrev0(nativeHandle_, target, target.length);
+ }
+
+ @Override
+ public void seek(ByteBuffer target) {
+ assert (isOwningHandle() && target.isDirect());
+ seekDirect0(nativeHandle_, target, target.position(), target.remaining());
+ target.position(target.limit());
+ }
+
+ @Override
+ public void seekForPrev(ByteBuffer target) {
+ assert (isOwningHandle() && target.isDirect());
+ seekForPrevDirect0(nativeHandle_, target, target.position(), target.remaining());
+ target.position(target.limit());
+ }
+
+ @Override
+ public void next() {
+ assert (isOwningHandle());
+ next0(nativeHandle_);
+ }
+
+ @Override
+ public void prev() {
+ assert (isOwningHandle());
+ prev0(nativeHandle_);
+ }
+
+ @Override
+ public void status() throws RocksDBException {
+ assert (isOwningHandle());
+ status0(nativeHandle_);
+ }
+
+ /**
+ * <p>Deletes underlying C++ iterator pointer.</p>
+ *
+ * <p>Note: the underlying handle can only be safely deleted if the parent
+ * instance related to a certain RocksIterator is still valid and initialized.
+ * Therefore {@code disposeInternal()} checks if the parent is initialized
+ * before freeing the native handle.</p>
+ */
+ @Override
+ protected void disposeInternal() {
+ if (parent_.isOwningHandle()) {
+ disposeInternal(nativeHandle_);
+ }
+ }
+
+ abstract boolean isValid0(long handle);
+ abstract void seekToFirst0(long handle);
+ abstract void seekToLast0(long handle);
+ abstract void next0(long handle);
+ abstract void prev0(long handle);
+ abstract void seek0(long handle, byte[] target, int targetLen);
+ abstract void seekForPrev0(long handle, byte[] target, int targetLen);
+ abstract void seekDirect0(long handle, ByteBuffer target, int targetOffset, int targetLen);
+ abstract void seekForPrevDirect0(long handle, ByteBuffer target, int targetOffset, int targetLen);
+ abstract void status0(long handle) throws RocksDBException;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractSlice.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractSlice.java
new file mode 100644
index 000000000..5a22e2956
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractSlice.java
@@ -0,0 +1,191 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Slices are used by RocksDB to provide
+ * efficient access to keys and values.
+ *
+ * This class is package private, implementers
+ * should extend either of the public abstract classes:
+ * @see org.rocksdb.Slice
+ * @see org.rocksdb.DirectSlice
+ *
+ * Regards the lifecycle of Java Slices in RocksDB:
+ * At present when you configure a Comparator from Java, it creates an
+ * instance of a C++ BaseComparatorJniCallback subclass and
+ * passes that to RocksDB as the comparator. That subclass of
+ * BaseComparatorJniCallback creates the Java
+ * @see org.rocksdb.AbstractSlice subclass Objects. When you dispose
+ * the Java @see org.rocksdb.AbstractComparator subclass, it disposes the
+ * C++ BaseComparatorJniCallback subclass, which in turn destroys the
+ * Java @see org.rocksdb.AbstractSlice subclass Objects.
+ */
+public abstract class AbstractSlice<T> extends RocksMutableObject {
+
+ protected AbstractSlice() {
+ super();
+ }
+
+ protected AbstractSlice(final long nativeHandle) {
+ super(nativeHandle);
+ }
+
+ /**
+ * Returns the data of the slice.
+ *
+ * @return The slice data. Note, the type of access is
+ * determined by the subclass
+ * @see org.rocksdb.AbstractSlice#data0(long)
+ */
+ public T data() {
+ return data0(getNativeHandle());
+ }
+
+ /**
+ * Access to the data is provided by the
+ * subtype as it needs to handle the
+ * generic typing.
+ *
+ * @param handle The address of the underlying
+ * native object.
+ *
+ * @return Java typed access to the data.
+ */
+ protected abstract T data0(long handle);
+
+ /**
+ * Drops the specified {@code n}
+ * number of bytes from the start
+ * of the backing slice
+ *
+ * @param n The number of bytes to drop
+ */
+ public abstract void removePrefix(final int n);
+
+ /**
+ * Clears the backing slice
+ */
+ public abstract void clear();
+
+ /**
+ * Return the length (in bytes) of the data.
+ *
+ * @return The length in bytes.
+ */
+ public int size() {
+ return size0(getNativeHandle());
+ }
+
+ /**
+ * Return true if the length of the
+ * data is zero.
+ *
+ * @return true if there is no data, false otherwise.
+ */
+ public boolean empty() {
+ return empty0(getNativeHandle());
+ }
+
+ /**
+ * Creates a string representation of the data
+ *
+ * @param hex When true, the representation
+ * will be encoded in hexadecimal.
+ *
+ * @return The string representation of the data.
+ */
+ public String toString(final boolean hex) {
+ return toString0(getNativeHandle(), hex);
+ }
+
+ @Override
+ public String toString() {
+ return toString(false);
+ }
+
+ /**
+ * Three-way key comparison
+ *
+ * @param other A slice to compare against
+ *
+ * @return Should return either:
+ * 1) &lt; 0 if this &lt; other
+ * 2) == 0 if this == other
+ * 3) &gt; 0 if this &gt; other
+ */
+ public int compare(final AbstractSlice<?> other) {
+ assert (other != null);
+ if(!isOwningHandle()) {
+ return other.isOwningHandle() ? -1 : 0;
+ } else {
+ if(!other.isOwningHandle()) {
+ return 1;
+ } else {
+ return compare0(getNativeHandle(), other.getNativeHandle());
+ }
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ return toString().hashCode();
+ }
+
+ /**
+ * If other is a slice object, then
+ * we defer to {@link #compare(AbstractSlice) compare}
+ * to check equality, otherwise we return false.
+ *
+ * @param other Object to test for equality
+ *
+ * @return true when {@code this.compare(other) == 0},
+ * false otherwise.
+ */
+ @Override
+ public boolean equals(final Object other) {
+ if (other != null && other instanceof AbstractSlice) {
+ return compare((AbstractSlice<?>)other) == 0;
+ } else {
+ return false;
+ }
+ }
+
+ /**
+ * Determines whether this slice starts with
+ * another slice
+ *
+ * @param prefix Another slice which may of may not
+ * be a prefix of this slice.
+ *
+ * @return true when this slice starts with the
+ * {@code prefix} slice
+ */
+ public boolean startsWith(final AbstractSlice<?> prefix) {
+ if (prefix != null) {
+ return startsWith0(getNativeHandle(), prefix.getNativeHandle());
+ } else {
+ return false;
+ }
+ }
+
+ protected native static long createNewSliceFromString(final String str);
+ private native int size0(long handle);
+ private native boolean empty0(long handle);
+ private native String toString0(long handle, boolean hex);
+ private native int compare0(long handle, long otherHandle);
+ private native boolean startsWith0(long handle, long otherHandle);
+
+ /**
+ * Deletes underlying C++ slice pointer.
+ * Note that this function should be called only after all
+ * RocksDB instances referencing the slice are closed.
+ * Otherwise an undefined behavior will occur.
+ */
+ @Override
+ protected final native void disposeInternal(final long handle);
+
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractTableFilter.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractTableFilter.java
new file mode 100644
index 000000000..c696c3e13
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractTableFilter.java
@@ -0,0 +1,20 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+/**
+ * Base class for Table Filters.
+ */
+public abstract class AbstractTableFilter
+ extends RocksCallbackObject implements TableFilter {
+
+ protected AbstractTableFilter() {
+ super();
+ }
+
+ @Override
+ protected long initializeNative(final long... nativeParameterHandles) {
+ return createNewTableFilter();
+ }
+
+ private native long createNewTableFilter();
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractTraceWriter.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractTraceWriter.java
new file mode 100644
index 000000000..806709b1f
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractTraceWriter.java
@@ -0,0 +1,70 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Base class for TraceWriters.
+ */
+public abstract class AbstractTraceWriter
+ extends RocksCallbackObject implements TraceWriter {
+
+ @Override
+ protected long initializeNative(final long... nativeParameterHandles) {
+ return createNewTraceWriter();
+ }
+
+ /**
+ * Called from JNI, proxy for {@link TraceWriter#write(Slice)}.
+ *
+ * @param sliceHandle the native handle of the slice (which we do not own)
+ *
+ * @return short (2 bytes) where the first byte is the
+ * {@link Status.Code#getValue()} and the second byte is the
+ * {@link Status.SubCode#getValue()}.
+ */
+ private short writeProxy(final long sliceHandle) {
+ try {
+ write(new Slice(sliceHandle));
+ return statusToShort(Status.Code.Ok, Status.SubCode.None);
+ } catch (final RocksDBException e) {
+ return statusToShort(e.getStatus());
+ }
+ }
+
+ /**
+ * Called from JNI, proxy for {@link TraceWriter#closeWriter()}.
+ *
+ * @return short (2 bytes) where the first byte is the
+ * {@link Status.Code#getValue()} and the second byte is the
+ * {@link Status.SubCode#getValue()}.
+ */
+ private short closeWriterProxy() {
+ try {
+ closeWriter();
+ return statusToShort(Status.Code.Ok, Status.SubCode.None);
+ } catch (final RocksDBException e) {
+ return statusToShort(e.getStatus());
+ }
+ }
+
+ private static short statusToShort(/*@Nullable*/ final Status status) {
+ final Status.Code code = status != null && status.getCode() != null
+ ? status.getCode()
+ : Status.Code.IOError;
+ final Status.SubCode subCode = status != null && status.getSubCode() != null
+ ? status.getSubCode()
+ : Status.SubCode.None;
+ return statusToShort(code, subCode);
+ }
+
+ private static short statusToShort(final Status.Code code,
+ final Status.SubCode subCode) {
+ short result = (short)(code.getValue() << 8);
+ return (short)(result | subCode.getValue());
+ }
+
+ private native long createNewTraceWriter();
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java
new file mode 100644
index 000000000..cbb49836d
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java
@@ -0,0 +1,54 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Provides notification to the caller of SetSnapshotOnNextOperation when
+ * the actual snapshot gets created
+ */
+public abstract class AbstractTransactionNotifier
+ extends RocksCallbackObject {
+
+ protected AbstractTransactionNotifier() {
+ super();
+ }
+
+ /**
+ * Implement this method to receive notification when a snapshot is
+ * requested via {@link Transaction#setSnapshotOnNextOperation()}.
+ *
+ * @param newSnapshot the snapshot that has been created.
+ */
+ public abstract void snapshotCreated(final Snapshot newSnapshot);
+
+ /**
+ * This is intentionally private as it is the callback hook
+ * from JNI
+ */
+ private void snapshotCreated(final long snapshotHandle) {
+ snapshotCreated(new Snapshot(snapshotHandle));
+ }
+
+ @Override
+ protected long initializeNative(final long... nativeParameterHandles) {
+ return createNewTransactionNotifier();
+ }
+
+ private native long createNewTransactionNotifier();
+
+ /**
+ * Deletes underlying C++ TransactionNotifier pointer.
+ *
+ * Note that this function should be called only after all
+ * Transactions referencing the comparator are closed.
+ * Otherwise an undefined behavior will occur.
+ */
+ @Override
+ protected void disposeInternal() {
+ disposeInternal(nativeHandle_);
+ }
+ protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractWalFilter.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractWalFilter.java
new file mode 100644
index 000000000..d525045c6
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractWalFilter.java
@@ -0,0 +1,49 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Base class for WAL Filters.
+ */
+public abstract class AbstractWalFilter
+ extends RocksCallbackObject implements WalFilter {
+
+ @Override
+ protected long initializeNative(final long... nativeParameterHandles) {
+ return createNewWalFilter();
+ }
+
+ /**
+ * Called from JNI, proxy for
+ * {@link WalFilter#logRecordFound(long, String, WriteBatch, WriteBatch)}.
+ *
+ * @param logNumber the log handle.
+ * @param logFileName the log file name
+ * @param batchHandle the native handle of a WriteBatch (which we do not own)
+ * @param newBatchHandle the native handle of a
+ * new WriteBatch (which we do not own)
+ *
+ * @return short (2 bytes) where the first byte is the
+ * {@link WalFilter.LogRecordFoundResult#walProcessingOption}
+ * {@link WalFilter.LogRecordFoundResult#batchChanged}.
+ */
+ private short logRecordFoundProxy(final long logNumber,
+ final String logFileName, final long batchHandle,
+ final long newBatchHandle) {
+ final LogRecordFoundResult logRecordFoundResult = logRecordFound(
+ logNumber, logFileName, new WriteBatch(batchHandle),
+ new WriteBatch(newBatchHandle));
+ return logRecordFoundResultToShort(logRecordFoundResult);
+ }
+
+ private static short logRecordFoundResultToShort(
+ final LogRecordFoundResult logRecordFoundResult) {
+ short result = (short)(logRecordFoundResult.walProcessingOption.getValue() << 8);
+ return (short)(result | (logRecordFoundResult.batchChanged ? 1 : 0));
+ }
+
+ private native long createNewWalFilter();
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractWriteBatch.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractWriteBatch.java
new file mode 100644
index 000000000..1f81c99e3
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractWriteBatch.java
@@ -0,0 +1,216 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.ByteBuffer;
+
+public abstract class AbstractWriteBatch extends RocksObject
+ implements WriteBatchInterface {
+
+ protected AbstractWriteBatch(final long nativeHandle) {
+ super(nativeHandle);
+ }
+
+ @Override
+ public int count() {
+ return count0(nativeHandle_);
+ }
+
+ @Override
+ public void put(byte[] key, byte[] value) throws RocksDBException {
+ put(nativeHandle_, key, key.length, value, value.length);
+ }
+
+ @Override
+ public void put(ColumnFamilyHandle columnFamilyHandle, byte[] key,
+ byte[] value) throws RocksDBException {
+ put(nativeHandle_, key, key.length, value, value.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ @Override
+ public void merge(byte[] key, byte[] value) throws RocksDBException {
+ merge(nativeHandle_, key, key.length, value, value.length);
+ }
+
+ @Override
+ public void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key,
+ byte[] value) throws RocksDBException {
+ merge(nativeHandle_, key, key.length, value, value.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ @Override
+ @Deprecated
+ public void remove(byte[] key) throws RocksDBException {
+ delete(nativeHandle_, key, key.length);
+ }
+
+ @Override
+ @Deprecated
+ public void remove(ColumnFamilyHandle columnFamilyHandle, byte[] key)
+ throws RocksDBException {
+ delete(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_);
+ }
+
+ public void put(ByteBuffer key, ByteBuffer value) throws RocksDBException {
+ assert key.isDirect() && value.isDirect();
+ putDirect(nativeHandle_, key, key.position(), key.remaining(), value, value.position(),
+ value.remaining(), 0);
+ key.position(key.limit());
+ value.position(value.limit());
+ }
+
+ @Override
+ public void put(ColumnFamilyHandle columnFamilyHandle, ByteBuffer key, ByteBuffer value)
+ throws RocksDBException {
+ assert key.isDirect() && value.isDirect();
+ putDirect(nativeHandle_, key, key.position(), key.remaining(), value, value.position(),
+ value.remaining(), columnFamilyHandle.nativeHandle_);
+ key.position(key.limit());
+ value.position(value.limit());
+ }
+
+ @Override
+ public void delete(byte[] key) throws RocksDBException {
+ delete(nativeHandle_, key, key.length);
+ }
+
+ @Override
+ public void delete(ColumnFamilyHandle columnFamilyHandle, byte[] key)
+ throws RocksDBException {
+ delete(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_);
+ }
+
+
+ @Override
+ public void singleDelete(byte[] key) throws RocksDBException {
+ singleDelete(nativeHandle_, key, key.length);
+ }
+
+ @Override
+ public void singleDelete(ColumnFamilyHandle columnFamilyHandle, byte[] key)
+ throws RocksDBException {
+ singleDelete(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_);
+ }
+
+ @Override
+ public void deleteRange(byte[] beginKey, byte[] endKey)
+ throws RocksDBException {
+ deleteRange(nativeHandle_, beginKey, beginKey.length, endKey, endKey.length);
+ }
+
+ @Override
+ public void deleteRange(ColumnFamilyHandle columnFamilyHandle,
+ byte[] beginKey, byte[] endKey) throws RocksDBException {
+ deleteRange(nativeHandle_, beginKey, beginKey.length, endKey, endKey.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ public void remove(ByteBuffer key) throws RocksDBException {
+ removeDirect(nativeHandle_, key, key.position(), key.remaining(), 0);
+ key.position(key.limit());
+ }
+
+ @Override
+ public void remove(ColumnFamilyHandle columnFamilyHandle, ByteBuffer key)
+ throws RocksDBException {
+ removeDirect(
+ nativeHandle_, key, key.position(), key.remaining(), columnFamilyHandle.nativeHandle_);
+ key.position(key.limit());
+ }
+
+ @Override
+ public void putLogData(byte[] blob) throws RocksDBException {
+ putLogData(nativeHandle_, blob, blob.length);
+ }
+
+ @Override
+ public void clear() {
+ clear0(nativeHandle_);
+ }
+
+ @Override
+ public void setSavePoint() {
+ setSavePoint0(nativeHandle_);
+ }
+
+ @Override
+ public void rollbackToSavePoint() throws RocksDBException {
+ rollbackToSavePoint0(nativeHandle_);
+ }
+
+ @Override
+ public void popSavePoint() throws RocksDBException {
+ popSavePoint(nativeHandle_);
+ }
+
+ @Override
+ public void setMaxBytes(final long maxBytes) {
+ setMaxBytes(nativeHandle_, maxBytes);
+ }
+
+ @Override
+ public WriteBatch getWriteBatch() {
+ return getWriteBatch(nativeHandle_);
+ }
+
+ abstract int count0(final long handle);
+
+ abstract void put(final long handle, final byte[] key, final int keyLen,
+ final byte[] value, final int valueLen) throws RocksDBException;
+
+ abstract void put(final long handle, final byte[] key, final int keyLen,
+ final byte[] value, final int valueLen, final long cfHandle)
+ throws RocksDBException;
+
+ abstract void putDirect(final long handle, final ByteBuffer key, final int keyOffset,
+ final int keyLength, final ByteBuffer value, final int valueOffset, final int valueLength,
+ final long cfHandle) throws RocksDBException;
+
+ abstract void merge(final long handle, final byte[] key, final int keyLen,
+ final byte[] value, final int valueLen) throws RocksDBException;
+
+ abstract void merge(final long handle, final byte[] key, final int keyLen,
+ final byte[] value, final int valueLen, final long cfHandle)
+ throws RocksDBException;
+
+ abstract void delete(final long handle, final byte[] key,
+ final int keyLen) throws RocksDBException;
+
+ abstract void delete(final long handle, final byte[] key,
+ final int keyLen, final long cfHandle) throws RocksDBException;
+
+ abstract void singleDelete(final long handle, final byte[] key,
+ final int keyLen) throws RocksDBException;
+
+ abstract void singleDelete(final long handle, final byte[] key,
+ final int keyLen, final long cfHandle) throws RocksDBException;
+
+ abstract void removeDirect(final long handle, final ByteBuffer key, final int keyOffset,
+ final int keyLength, final long cfHandle) throws RocksDBException;
+
+ abstract void deleteRange(final long handle, final byte[] beginKey, final int beginKeyLen,
+ final byte[] endKey, final int endKeyLen) throws RocksDBException;
+
+ abstract void deleteRange(final long handle, final byte[] beginKey, final int beginKeyLen,
+ final byte[] endKey, final int endKeyLen, final long cfHandle) throws RocksDBException;
+
+ abstract void putLogData(final long handle, final byte[] blob,
+ final int blobLen) throws RocksDBException;
+
+ abstract void clear0(final long handle);
+
+ abstract void setSavePoint0(final long handle);
+
+ abstract void rollbackToSavePoint0(final long handle);
+
+ abstract void popSavePoint(final long handle) throws RocksDBException;
+
+ abstract void setMaxBytes(final long handle, long maxBytes);
+
+ abstract WriteBatch getWriteBatch(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AccessHint.java b/src/rocksdb/java/src/main/java/org/rocksdb/AccessHint.java
new file mode 100644
index 000000000..877c4ab39
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/AccessHint.java
@@ -0,0 +1,53 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * File access pattern once a compaction has started
+ */
+public enum AccessHint {
+ NONE((byte)0x0),
+ NORMAL((byte)0x1),
+ SEQUENTIAL((byte)0x2),
+ WILLNEED((byte)0x3);
+
+ private final byte value;
+
+ AccessHint(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * <p>Returns the byte value of the enumerations value.</p>
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value;
+ }
+
+ /**
+ * <p>Get the AccessHint enumeration value by
+ * passing the byte identifier to this method.</p>
+ *
+ * @param byteIdentifier of AccessHint.
+ *
+ * @return AccessHint instance.
+ *
+ * @throws IllegalArgumentException if the access hint for the byteIdentifier
+ * cannot be found
+ */
+ public static AccessHint getAccessHint(final byte byteIdentifier) {
+ for (final AccessHint accessHint : AccessHint.values()) {
+ if (accessHint.getValue() == byteIdentifier) {
+ return accessHint;
+ }
+ }
+
+ throw new IllegalArgumentException(
+ "Illegal value provided for AccessHint.");
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java b/src/rocksdb/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java
new file mode 100644
index 000000000..772a5900b
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java
@@ -0,0 +1,465 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.List;
+
+/**
+ * Advanced Column Family Options which are not
+ * mutable (i.e. present in {@link AdvancedMutableColumnFamilyOptionsInterface}
+ *
+ * Taken from include/rocksdb/advanced_options.h
+ */
+public interface AdvancedColumnFamilyOptionsInterface<
+ T extends AdvancedColumnFamilyOptionsInterface<T>> {
+ /**
+ * The minimum number of write buffers that will be merged together
+ * before writing to storage. If set to 1, then
+ * all write buffers are flushed to L0 as individual files and this increases
+ * read amplification because a get request has to check in all of these
+ * files. Also, an in-memory merge may result in writing lesser
+ * data to storage if there are duplicate records in each of these
+ * individual write buffers. Default: 1
+ *
+ * @param minWriteBufferNumberToMerge the minimum number of write buffers
+ * that will be merged together.
+ * @return the reference to the current options.
+ */
+ T setMinWriteBufferNumberToMerge(
+ int minWriteBufferNumberToMerge);
+
+ /**
+ * The minimum number of write buffers that will be merged together
+ * before writing to storage. If set to 1, then
+ * all write buffers are flushed to L0 as individual files and this increases
+ * read amplification because a get request has to check in all of these
+ * files. Also, an in-memory merge may result in writing lesser
+ * data to storage if there are duplicate records in each of these
+ * individual write buffers. Default: 1
+ *
+ * @return the minimum number of write buffers that will be merged together.
+ */
+ int minWriteBufferNumberToMerge();
+
+ /**
+ * The total maximum number of write buffers to maintain in memory including
+ * copies of buffers that have already been flushed. Unlike
+ * {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()},
+ * this parameter does not affect flushing.
+ * This controls the minimum amount of write history that will be available
+ * in memory for conflict checking when Transactions are used.
+ *
+ * When using an OptimisticTransactionDB:
+ * If this value is too low, some transactions may fail at commit time due
+ * to not being able to determine whether there were any write conflicts.
+ *
+ * When using a TransactionDB:
+ * If Transaction::SetSnapshot is used, TransactionDB will read either
+ * in-memory write buffers or SST files to do write-conflict checking.
+ * Increasing this value can reduce the number of reads to SST files
+ * done for conflict detection.
+ *
+ * Setting this value to 0 will cause write buffers to be freed immediately
+ * after they are flushed.
+ * If this value is set to -1,
+ * {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()}
+ * will be used.
+ *
+ * Default:
+ * If using a TransactionDB/OptimisticTransactionDB, the default value will
+ * be set to the value of
+ * {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()}
+ * if it is not explicitly set by the user. Otherwise, the default is 0.
+ *
+ * @param maxWriteBufferNumberToMaintain The maximum number of write
+ * buffers to maintain
+ *
+ * @return the reference to the current options.
+ */
+ T setMaxWriteBufferNumberToMaintain(
+ int maxWriteBufferNumberToMaintain);
+
+ /**
+ * The total maximum number of write buffers to maintain in memory including
+ * copies of buffers that have already been flushed.
+ *
+ * @return maxWriteBufferNumberToMaintain The maximum number of write buffers
+ * to maintain
+ */
+ int maxWriteBufferNumberToMaintain();
+
+ /**
+ * Allows thread-safe inplace updates.
+ * If inplace_callback function is not set,
+ * Put(key, new_value) will update inplace the existing_value iff
+ * * key exists in current memtable
+ * * new sizeof(new_value) &le; sizeof(existing_value)
+ * * existing_value for that key is a put i.e. kTypeValue
+ * If inplace_callback function is set, check doc for inplace_callback.
+ * Default: false.
+ *
+ * @param inplaceUpdateSupport true if thread-safe inplace updates
+ * are allowed.
+ * @return the reference to the current options.
+ */
+ T setInplaceUpdateSupport(
+ boolean inplaceUpdateSupport);
+
+ /**
+ * Allows thread-safe inplace updates.
+ * If inplace_callback function is not set,
+ * Put(key, new_value) will update inplace the existing_value iff
+ * * key exists in current memtable
+ * * new sizeof(new_value) &le; sizeof(existing_value)
+ * * existing_value for that key is a put i.e. kTypeValue
+ * If inplace_callback function is set, check doc for inplace_callback.
+ * Default: false.
+ *
+ * @return true if thread-safe inplace updates are allowed.
+ */
+ boolean inplaceUpdateSupport();
+
+ /**
+ * Control locality of bloom filter probes to improve cache miss rate.
+ * This option only applies to memtable prefix bloom and plaintable
+ * prefix bloom. It essentially limits the max number of cache lines each
+ * bloom filter check can touch.
+ * This optimization is turned off when set to 0. The number should never
+ * be greater than number of probes. This option can boost performance
+ * for in-memory workload but should use with care since it can cause
+ * higher false positive rate.
+ * Default: 0
+ *
+ * @param bloomLocality the level of locality of bloom-filter probes.
+ * @return the reference to the current options.
+ */
+ T setBloomLocality(int bloomLocality);
+
+ /**
+ * Control locality of bloom filter probes to improve cache miss rate.
+ * This option only applies to memtable prefix bloom and plaintable
+ * prefix bloom. It essentially limits the max number of cache lines each
+ * bloom filter check can touch.
+ * This optimization is turned off when set to 0. The number should never
+ * be greater than number of probes. This option can boost performance
+ * for in-memory workload but should use with care since it can cause
+ * higher false positive rate.
+ * Default: 0
+ *
+ * @return the level of locality of bloom-filter probes.
+ * @see #setBloomLocality(int)
+ */
+ int bloomLocality();
+
+ /**
+ * <p>Different levels can have different compression
+ * policies. There are cases where most lower levels
+ * would like to use quick compression algorithms while
+ * the higher levels (which have more data) use
+ * compression algorithms that have better compression
+ * but could be slower. This array, if non-empty, should
+ * have an entry for each level of the database;
+ * these override the value specified in the previous
+ * field 'compression'.</p>
+ *
+ * <strong>NOTICE</strong>
+ * <p>If {@code level_compaction_dynamic_level_bytes=true},
+ * {@code compression_per_level[0]} still determines {@code L0},
+ * but other elements of the array are based on base level
+ * (the level {@code L0} files are merged to), and may not
+ * match the level users see from info log for metadata.
+ * </p>
+ * <p>If {@code L0} files are merged to {@code level - n},
+ * then, for {@code i&gt;0}, {@code compression_per_level[i]}
+ * determines compaction type for level {@code n+i-1}.</p>
+ *
+ * <strong>Example</strong>
+ * <p>For example, if we have 5 levels, and we determine to
+ * merge {@code L0} data to {@code L4} (which means {@code L1..L3}
+ * will be empty), then the new files go to {@code L4} uses
+ * compression type {@code compression_per_level[1]}.</p>
+ *
+ * <p>If now {@code L0} is merged to {@code L2}. Data goes to
+ * {@code L2} will be compressed according to
+ * {@code compression_per_level[1]}, {@code L3} using
+ * {@code compression_per_level[2]}and {@code L4} using
+ * {@code compression_per_level[3]}. Compaction for each
+ * level can change when data grows.</p>
+ *
+ * <p><strong>Default:</strong> empty</p>
+ *
+ * @param compressionLevels list of
+ * {@link org.rocksdb.CompressionType} instances.
+ *
+ * @return the reference to the current options.
+ */
+ T setCompressionPerLevel(
+ List<CompressionType> compressionLevels);
+
+ /**
+ * <p>Return the currently set {@link org.rocksdb.CompressionType}
+ * per instances.</p>
+ *
+ * <p>See: {@link #setCompressionPerLevel(java.util.List)}</p>
+ *
+ * @return list of {@link org.rocksdb.CompressionType}
+ * instances.
+ */
+ List<CompressionType> compressionPerLevel();
+
+ /**
+ * Set the number of levels for this database
+ * If level-styled compaction is used, then this number determines
+ * the total number of levels.
+ *
+ * @param numLevels the number of levels.
+ * @return the reference to the current options.
+ */
+ T setNumLevels(int numLevels);
+
+ /**
+ * If level-styled compaction is used, then this number determines
+ * the total number of levels.
+ *
+ * @return the number of levels.
+ */
+ int numLevels();
+
+ /**
+ * <p>If {@code true}, RocksDB will pick target size of each level
+ * dynamically. We will pick a base level b &gt;= 1. L0 will be
+ * directly merged into level b, instead of always into level 1.
+ * Level 1 to b-1 need to be empty. We try to pick b and its target
+ * size so that</p>
+ *
+ * <ol>
+ * <li>target size is in the range of
+ * (max_bytes_for_level_base / max_bytes_for_level_multiplier,
+ * max_bytes_for_level_base]</li>
+ * <li>target size of the last level (level num_levels-1) equals to extra size
+ * of the level.</li>
+ * </ol>
+ *
+ * <p>At the same time max_bytes_for_level_multiplier and
+ * max_bytes_for_level_multiplier_additional are still satisfied.</p>
+ *
+ * <p>With this option on, from an empty DB, we make last level the base
+ * level, which means merging L0 data into the last level, until it exceeds
+ * max_bytes_for_level_base. And then we make the second last level to be
+ * base level, to start to merge L0 data to second last level, with its
+ * target size to be {@code 1/max_bytes_for_level_multiplier} of the last
+ * levels extra size. After the data accumulates more so that we need to
+ * move the base level to the third last one, and so on.</p>
+ *
+ * <p><b>Example</b></p>
+ *
+ * <p>For example, assume {@code max_bytes_for_level_multiplier=10},
+ * {@code num_levels=6}, and {@code max_bytes_for_level_base=10MB}.</p>
+ *
+ * <p>Target sizes of level 1 to 5 starts with:</p>
+ * {@code [- - - - 10MB]}
+ * <p>with base level is level. Target sizes of level 1 to 4 are not applicable
+ * because they will not be used.
+ * Until the size of Level 5 grows to more than 10MB, say 11MB, we make
+ * base target to level 4 and now the targets looks like:</p>
+ * {@code [- - - 1.1MB 11MB]}
+ * <p>While data are accumulated, size targets are tuned based on actual data
+ * of level 5. When level 5 has 50MB of data, the target is like:</p>
+ * {@code [- - - 5MB 50MB]}
+ * <p>Until level 5's actual size is more than 100MB, say 101MB. Now if we
+ * keep level 4 to be the base level, its target size needs to be 10.1MB,
+ * which doesn't satisfy the target size range. So now we make level 3
+ * the target size and the target sizes of the levels look like:</p>
+ * {@code [- - 1.01MB 10.1MB 101MB]}
+ * <p>In the same way, while level 5 further grows, all levels' targets grow,
+ * like</p>
+ * {@code [- - 5MB 50MB 500MB]}
+ * <p>Until level 5 exceeds 1000MB and becomes 1001MB, we make level 2 the
+ * base level and make levels' target sizes like this:</p>
+ * {@code [- 1.001MB 10.01MB 100.1MB 1001MB]}
+ * <p>and go on...</p>
+ *
+ * <p>By doing it, we give {@code max_bytes_for_level_multiplier} a priority
+ * against {@code max_bytes_for_level_base}, for a more predictable LSM tree
+ * shape. It is useful to limit worse case space amplification.</p>
+ *
+ * <p>{@code max_bytes_for_level_multiplier_additional} is ignored with
+ * this flag on.</p>
+ *
+ * <p>Turning this feature on or off for an existing DB can cause unexpected
+ * LSM tree structure so it's not recommended.</p>
+ *
+ * <p><strong>Caution</strong>: this option is experimental</p>
+ *
+ * <p>Default: false</p>
+ *
+ * @param enableLevelCompactionDynamicLevelBytes boolean value indicating
+ * if {@code LevelCompactionDynamicLevelBytes} shall be enabled.
+ * @return the reference to the current options.
+ */
+ @Experimental("Turning this feature on or off for an existing DB can cause" +
+ "unexpected LSM tree structure so it's not recommended")
+ T setLevelCompactionDynamicLevelBytes(
+ boolean enableLevelCompactionDynamicLevelBytes);
+
+ /**
+ * <p>Return if {@code LevelCompactionDynamicLevelBytes} is enabled.
+ * </p>
+ *
+ * <p>For further information see
+ * {@link #setLevelCompactionDynamicLevelBytes(boolean)}</p>
+ *
+ * @return boolean value indicating if
+ * {@code levelCompactionDynamicLevelBytes} is enabled.
+ */
+ @Experimental("Caution: this option is experimental")
+ boolean levelCompactionDynamicLevelBytes();
+
+ /**
+ * Maximum size of each compaction (not guarantee)
+ *
+ * @param maxCompactionBytes the compaction size limit
+ * @return the reference to the current options.
+ */
+ T setMaxCompactionBytes(
+ long maxCompactionBytes);
+
+ /**
+ * Control maximum size of each compaction (not guaranteed)
+ *
+ * @return compaction size threshold
+ */
+ long maxCompactionBytes();
+
+ /**
+ * Set compaction style for DB.
+ *
+ * Default: LEVEL.
+ *
+ * @param compactionStyle Compaction style.
+ * @return the reference to the current options.
+ */
+ ColumnFamilyOptionsInterface setCompactionStyle(
+ CompactionStyle compactionStyle);
+
+ /**
+ * Compaction style for DB.
+ *
+ * @return Compaction style.
+ */
+ CompactionStyle compactionStyle();
+
+ /**
+ * If level {@link #compactionStyle()} == {@link CompactionStyle#LEVEL},
+ * for each level, which files are prioritized to be picked to compact.
+ *
+ * Default: {@link CompactionPriority#ByCompensatedSize}
+ *
+ * @param compactionPriority The compaction priority
+ *
+ * @return the reference to the current options.
+ */
+ T setCompactionPriority(
+ CompactionPriority compactionPriority);
+
+ /**
+ * Get the Compaction priority if level compaction
+ * is used for all levels
+ *
+ * @return The compaction priority
+ */
+ CompactionPriority compactionPriority();
+
+ /**
+ * Set the options needed to support Universal Style compactions
+ *
+ * @param compactionOptionsUniversal The Universal Style compaction options
+ *
+ * @return the reference to the current options.
+ */
+ T setCompactionOptionsUniversal(
+ CompactionOptionsUniversal compactionOptionsUniversal);
+
+ /**
+ * The options needed to support Universal Style compactions
+ *
+ * @return The Universal Style compaction options
+ */
+ CompactionOptionsUniversal compactionOptionsUniversal();
+
+ /**
+ * The options for FIFO compaction style
+ *
+ * @param compactionOptionsFIFO The FIFO compaction options
+ *
+ * @return the reference to the current options.
+ */
+ T setCompactionOptionsFIFO(
+ CompactionOptionsFIFO compactionOptionsFIFO);
+
+ /**
+ * The options for FIFO compaction style
+ *
+ * @return The FIFO compaction options
+ */
+ CompactionOptionsFIFO compactionOptionsFIFO();
+
+ /**
+ * <p>This flag specifies that the implementation should optimize the filters
+ * mainly for cases where keys are found rather than also optimize for keys
+ * missed. This would be used in cases where the application knows that
+ * there are very few misses or the performance in the case of misses is not
+ * important.</p>
+ *
+ * <p>For now, this flag allows us to not store filters for the last level i.e
+ * the largest level which contains data of the LSM store. For keys which
+ * are hits, the filters in this level are not useful because we will search
+ * for the data anyway.</p>
+ *
+ * <p><strong>NOTE</strong>: the filters in other levels are still useful
+ * even for key hit because they tell us whether to look in that level or go
+ * to the higher level.</p>
+ *
+ * <p>Default: false<p>
+ *
+ * @param optimizeFiltersForHits boolean value indicating if this flag is set.
+ * @return the reference to the current options.
+ */
+ T setOptimizeFiltersForHits(
+ boolean optimizeFiltersForHits);
+
+ /**
+ * <p>Returns the current state of the {@code optimize_filters_for_hits}
+ * setting.</p>
+ *
+ * @return boolean value indicating if the flag
+ * {@code optimize_filters_for_hits} was set.
+ */
+ boolean optimizeFiltersForHits();
+
+ /**
+ * In debug mode, RocksDB run consistency checks on the LSM every time the LSM
+ * change (Flush, Compaction, AddFile). These checks are disabled in release
+ * mode, use this option to enable them in release mode as well.
+ *
+ * Default: false
+ *
+ * @param forceConsistencyChecks true to force consistency checks
+ *
+ * @return the reference to the current options.
+ */
+ T setForceConsistencyChecks(
+ boolean forceConsistencyChecks);
+
+ /**
+ * In debug mode, RocksDB run consistency checks on the LSM every time the LSM
+ * change (Flush, Compaction, AddFile). These checks are disabled in release
+ * mode.
+ *
+ * @return true if consistency checks are enforced
+ */
+ boolean forceConsistencyChecks();
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java b/src/rocksdb/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java
new file mode 100644
index 000000000..5b581ca1d
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java
@@ -0,0 +1,464 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Advanced Column Family Options which are mutable
+ *
+ * Taken from include/rocksdb/advanced_options.h
+ * and MutableCFOptions in util/cf_options.h
+ */
+public interface AdvancedMutableColumnFamilyOptionsInterface<
+ T extends AdvancedMutableColumnFamilyOptionsInterface<T>> {
+ /**
+ * The maximum number of write buffers that are built up in memory.
+ * The default is 2, so that when 1 write buffer is being flushed to
+ * storage, new writes can continue to the other write buffer.
+ * Default: 2
+ *
+ * @param maxWriteBufferNumber maximum number of write buffers.
+ * @return the instance of the current options.
+ */
+ T setMaxWriteBufferNumber(
+ int maxWriteBufferNumber);
+
+ /**
+ * Returns maximum number of write buffers.
+ *
+ * @return maximum number of write buffers.
+ * @see #setMaxWriteBufferNumber(int)
+ */
+ int maxWriteBufferNumber();
+
+ /**
+ * Number of locks used for inplace update
+ * Default: 10000, if inplace_update_support = true, else 0.
+ *
+ * @param inplaceUpdateNumLocks the number of locks used for
+ * inplace updates.
+ * @return the reference to the current options.
+ * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
+ * while overflowing the underlying platform specific value.
+ */
+ T setInplaceUpdateNumLocks(
+ long inplaceUpdateNumLocks);
+
+ /**
+ * Number of locks used for inplace update
+ * Default: 10000, if inplace_update_support = true, else 0.
+ *
+ * @return the number of locks used for inplace update.
+ */
+ long inplaceUpdateNumLocks();
+
+ /**
+ * if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
+ * create prefix bloom for memtable with the size of
+ * write_buffer_size * memtable_prefix_bloom_size_ratio.
+ * If it is larger than 0.25, it is santinized to 0.25.
+ *
+ * Default: 0 (disable)
+ *
+ * @param memtablePrefixBloomSizeRatio The ratio
+ * @return the reference to the current options.
+ */
+ T setMemtablePrefixBloomSizeRatio(
+ double memtablePrefixBloomSizeRatio);
+
+ /**
+ * if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
+ * create prefix bloom for memtable with the size of
+ * write_buffer_size * memtable_prefix_bloom_size_ratio.
+ * If it is larger than 0.25, it is santinized to 0.25.
+ *
+ * Default: 0 (disable)
+ *
+ * @return the ratio
+ */
+ double memtablePrefixBloomSizeRatio();
+
+ /**
+ * Page size for huge page TLB for bloom in memtable. If &le; 0, not allocate
+ * from huge page TLB but from malloc.
+ * Need to reserve huge pages for it to be allocated. For example:
+ * sysctl -w vm.nr_hugepages=20
+ * See linux doc Documentation/vm/hugetlbpage.txt
+ *
+ * @param memtableHugePageSize The page size of the huge
+ * page tlb
+ * @return the reference to the current options.
+ */
+ T setMemtableHugePageSize(
+ long memtableHugePageSize);
+
+ /**
+ * Page size for huge page TLB for bloom in memtable. If &le; 0, not allocate
+ * from huge page TLB but from malloc.
+ * Need to reserve huge pages for it to be allocated. For example:
+ * sysctl -w vm.nr_hugepages=20
+ * See linux doc Documentation/vm/hugetlbpage.txt
+ *
+ * @return The page size of the huge page tlb
+ */
+ long memtableHugePageSize();
+
+ /**
+ * The size of one block in arena memory allocation.
+ * If &le; 0, a proper value is automatically calculated (usually 1/10 of
+ * writer_buffer_size).
+ *
+ * There are two additional restriction of the specified size:
+ * (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
+ * (2) be the multiple of the CPU word (which helps with the memory
+ * alignment).
+ *
+ * We'll automatically check and adjust the size number to make sure it
+ * conforms to the restrictions.
+ * Default: 0
+ *
+ * @param arenaBlockSize the size of an arena block
+ * @return the reference to the current options.
+ * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
+ * while overflowing the underlying platform specific value.
+ */
+ T setArenaBlockSize(long arenaBlockSize);
+
+ /**
+ * The size of one block in arena memory allocation.
+ * If &le; 0, a proper value is automatically calculated (usually 1/10 of
+ * writer_buffer_size).
+ *
+ * There are two additional restriction of the specified size:
+ * (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
+ * (2) be the multiple of the CPU word (which helps with the memory
+ * alignment).
+ *
+ * We'll automatically check and adjust the size number to make sure it
+ * conforms to the restrictions.
+ * Default: 0
+ *
+ * @return the size of an arena block
+ */
+ long arenaBlockSize();
+
+ /**
+ * Soft limit on number of level-0 files. We start slowing down writes at this
+ * point. A value &lt; 0 means that no writing slow down will be triggered by
+ * number of files in level-0.
+ *
+ * @param level0SlowdownWritesTrigger The soft limit on the number of
+ * level-0 files
+ * @return the reference to the current options.
+ */
+ T setLevel0SlowdownWritesTrigger(
+ int level0SlowdownWritesTrigger);
+
+ /**
+ * Soft limit on number of level-0 files. We start slowing down writes at this
+ * point. A value &lt; 0 means that no writing slow down will be triggered by
+ * number of files in level-0.
+ *
+ * @return The soft limit on the number of
+ * level-0 files
+ */
+ int level0SlowdownWritesTrigger();
+
+ /**
+ * Maximum number of level-0 files. We stop writes at this point.
+ *
+ * @param level0StopWritesTrigger The maximum number of level-0 files
+ * @return the reference to the current options.
+ */
+ T setLevel0StopWritesTrigger(
+ int level0StopWritesTrigger);
+
+ /**
+ * Maximum number of level-0 files. We stop writes at this point.
+ *
+ * @return The maximum number of level-0 files
+ */
+ int level0StopWritesTrigger();
+
+ /**
+ * The target file size for compaction.
+ * This targetFileSizeBase determines a level-1 file size.
+ * Target file size for level L can be calculated by
+ * targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1))
+ * For example, if targetFileSizeBase is 2MB and
+ * target_file_size_multiplier is 10, then each file on level-1 will
+ * be 2MB, and each file on level 2 will be 20MB,
+ * and each file on level-3 will be 200MB.
+ * by default targetFileSizeBase is 64MB.
+ *
+ * @param targetFileSizeBase the target size of a level-0 file.
+ * @return the reference to the current options.
+ *
+ * @see #setTargetFileSizeMultiplier(int)
+ */
+ T setTargetFileSizeBase(
+ long targetFileSizeBase);
+
+ /**
+ * The target file size for compaction.
+ * This targetFileSizeBase determines a level-1 file size.
+ * Target file size for level L can be calculated by
+ * targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1))
+ * For example, if targetFileSizeBase is 2MB and
+ * target_file_size_multiplier is 10, then each file on level-1 will
+ * be 2MB, and each file on level 2 will be 20MB,
+ * and each file on level-3 will be 200MB.
+ * by default targetFileSizeBase is 64MB.
+ *
+ * @return the target size of a level-0 file.
+ *
+ * @see #targetFileSizeMultiplier()
+ */
+ long targetFileSizeBase();
+
+ /**
+ * targetFileSizeMultiplier defines the size ratio between a
+ * level-L file and level-(L+1) file.
+ * By default target_file_size_multiplier is 1, meaning
+ * files in different levels have the same target.
+ *
+ * @param multiplier the size ratio between a level-(L+1) file
+ * and level-L file.
+ * @return the reference to the current options.
+ */
+ T setTargetFileSizeMultiplier(
+ int multiplier);
+
+ /**
+ * targetFileSizeMultiplier defines the size ratio between a
+ * level-(L+1) file and level-L file.
+ * By default targetFileSizeMultiplier is 1, meaning
+ * files in different levels have the same target.
+ *
+ * @return the size ratio between a level-(L+1) file and level-L file.
+ */
+ int targetFileSizeMultiplier();
+
+ /**
+ * The ratio between the total size of level-(L+1) files and the total
+ * size of level-L files for all L.
+ * DEFAULT: 10
+ *
+ * @param multiplier the ratio between the total size of level-(L+1)
+ * files and the total size of level-L files for all L.
+ * @return the reference to the current options.
+ *
+ * See {@link MutableColumnFamilyOptionsInterface#setMaxBytesForLevelBase(long)}
+ */
+ T setMaxBytesForLevelMultiplier(double multiplier);
+
+ /**
+ * The ratio between the total size of level-(L+1) files and the total
+ * size of level-L files for all L.
+ * DEFAULT: 10
+ *
+ * @return the ratio between the total size of level-(L+1) files and
+ * the total size of level-L files for all L.
+ *
+ * See {@link MutableColumnFamilyOptionsInterface#maxBytesForLevelBase()}
+ */
+ double maxBytesForLevelMultiplier();
+
+ /**
+ * Different max-size multipliers for different levels.
+ * These are multiplied by max_bytes_for_level_multiplier to arrive
+ * at the max-size of each level.
+ *
+ * Default: 1
+ *
+ * @param maxBytesForLevelMultiplierAdditional The max-size multipliers
+ * for each level
+ * @return the reference to the current options.
+ */
+ T setMaxBytesForLevelMultiplierAdditional(
+ int[] maxBytesForLevelMultiplierAdditional);
+
+ /**
+ * Different max-size multipliers for different levels.
+ * These are multiplied by max_bytes_for_level_multiplier to arrive
+ * at the max-size of each level.
+ *
+ * Default: 1
+ *
+ * @return The max-size multipliers for each level
+ */
+ int[] maxBytesForLevelMultiplierAdditional();
+
+ /**
+ * All writes will be slowed down to at least delayed_write_rate if estimated
+ * bytes needed to be compaction exceed this threshold.
+ *
+ * Default: 64GB
+ *
+ * @param softPendingCompactionBytesLimit The soft limit to impose on
+ * compaction
+ * @return the reference to the current options.
+ */
+ T setSoftPendingCompactionBytesLimit(
+ long softPendingCompactionBytesLimit);
+
+ /**
+ * All writes will be slowed down to at least delayed_write_rate if estimated
+ * bytes needed to be compaction exceed this threshold.
+ *
+ * Default: 64GB
+ *
+ * @return The soft limit to impose on compaction
+ */
+ long softPendingCompactionBytesLimit();
+
+ /**
+ * All writes are stopped if estimated bytes needed to be compaction exceed
+ * this threshold.
+ *
+ * Default: 256GB
+ *
+ * @param hardPendingCompactionBytesLimit The hard limit to impose on
+ * compaction
+ * @return the reference to the current options.
+ */
+ T setHardPendingCompactionBytesLimit(
+ long hardPendingCompactionBytesLimit);
+
+ /**
+ * All writes are stopped if estimated bytes needed to be compaction exceed
+ * this threshold.
+ *
+ * Default: 256GB
+ *
+ * @return The hard limit to impose on compaction
+ */
+ long hardPendingCompactionBytesLimit();
+
+ /**
+ * An iteration-&gt;Next() sequentially skips over keys with the same
+ * user-key unless this option is set. This number specifies the number
+ * of keys (with the same userkey) that will be sequentially
+ * skipped before a reseek is issued.
+ * Default: 8
+ *
+ * @param maxSequentialSkipInIterations the number of keys could
+ * be skipped in a iteration.
+ * @return the reference to the current options.
+ */
+ T setMaxSequentialSkipInIterations(
+ long maxSequentialSkipInIterations);
+
+ /**
+ * An iteration-&gt;Next() sequentially skips over keys with the same
+ * user-key unless this option is set. This number specifies the number
+ * of keys (with the same userkey) that will be sequentially
+ * skipped before a reseek is issued.
+ * Default: 8
+ *
+ * @return the number of keys could be skipped in a iteration.
+ */
+ long maxSequentialSkipInIterations();
+
+ /**
+ * Maximum number of successive merge operations on a key in the memtable.
+ *
+ * When a merge operation is added to the memtable and the maximum number of
+ * successive merges is reached, the value of the key will be calculated and
+ * inserted into the memtable instead of the merge operation. This will
+ * ensure that there are never more than max_successive_merges merge
+ * operations in the memtable.
+ *
+ * Default: 0 (disabled)
+ *
+ * @param maxSuccessiveMerges the maximum number of successive merges.
+ * @return the reference to the current options.
+ * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
+ * while overflowing the underlying platform specific value.
+ */
+ T setMaxSuccessiveMerges(
+ long maxSuccessiveMerges);
+
+ /**
+ * Maximum number of successive merge operations on a key in the memtable.
+ *
+ * When a merge operation is added to the memtable and the maximum number of
+ * successive merges is reached, the value of the key will be calculated and
+ * inserted into the memtable instead of the merge operation. This will
+ * ensure that there are never more than max_successive_merges merge
+ * operations in the memtable.
+ *
+ * Default: 0 (disabled)
+ *
+ * @return the maximum number of successive merges.
+ */
+ long maxSuccessiveMerges();
+
+ /**
+ * After writing every SST file, reopen it and read all the keys.
+ *
+ * Default: false
+ *
+ * @param paranoidFileChecks true to enable paranoid file checks
+ * @return the reference to the current options.
+ */
+ T setParanoidFileChecks(
+ boolean paranoidFileChecks);
+
+ /**
+ * After writing every SST file, reopen it and read all the keys.
+ *
+ * Default: false
+ *
+ * @return true if paranoid file checks are enabled
+ */
+ boolean paranoidFileChecks();
+
+ /**
+ * Measure IO stats in compactions and flushes, if true.
+ *
+ * Default: false
+ *
+ * @param reportBgIoStats true to enable reporting
+ * @return the reference to the current options.
+ */
+ T setReportBgIoStats(
+ boolean reportBgIoStats);
+
+ /**
+ * Determine whether IO stats in compactions and flushes are being measured
+ *
+ * @return true if reporting is enabled
+ */
+ boolean reportBgIoStats();
+
+ /**
+ * Non-bottom-level files older than TTL will go through the compaction
+ * process. This needs {@link MutableDBOptionsInterface#maxOpenFiles()} to be
+ * set to -1.
+ *
+ * Enabled only for level compaction for now.
+ *
+ * Default: 0 (disabled)
+ *
+ * Dynamically changeable through
+ * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
+ *
+ * @param ttl the time-to-live.
+ *
+ * @return the reference to the current options.
+ */
+ T setTtl(final long ttl);
+
+ /**
+ * Get the TTL for Non-bottom-level files that will go through the compaction
+ * process.
+ *
+ * See {@link #setTtl(long)}.
+ *
+ * @return the time-to-live.
+ */
+ long ttl();
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/BackupEngine.java b/src/rocksdb/java/src/main/java/org/rocksdb/BackupEngine.java
new file mode 100644
index 000000000..a028edea0
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/BackupEngine.java
@@ -0,0 +1,259 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+import java.util.List;
+
+/**
+ * BackupEngine allows you to backup
+ * and restore the database
+ *
+ * Be aware, that `new BackupEngine` takes time proportional to the amount
+ * of backups. So if you have a slow filesystem to backup (like HDFS)
+ * and you have a lot of backups then restoring can take some time.
+ * That's why we recommend to limit the number of backups.
+ * Also we recommend to keep BackupEngine alive and not to recreate it every
+ * time you need to do a backup.
+ */
+public class BackupEngine extends RocksObject implements AutoCloseable {
+
+ protected BackupEngine(final long nativeHandle) {
+ super(nativeHandle);
+ }
+
+ /**
+ * Opens a new Backup Engine
+ *
+ * @param env The environment that the backup engine should operate within
+ * @param options Any options for the backup engine
+ *
+ * @return A new BackupEngine instance
+ * @throws RocksDBException thrown if the backup engine could not be opened
+ */
+ public static BackupEngine open(final Env env,
+ final BackupableDBOptions options) throws RocksDBException {
+ return new BackupEngine(open(env.nativeHandle_, options.nativeHandle_));
+ }
+
+ /**
+ * Captures the state of the database in the latest backup
+ *
+ * Just a convenience for {@link #createNewBackup(RocksDB, boolean)} with
+ * the flushBeforeBackup parameter set to false
+ *
+ * @param db The database to backup
+ *
+ * Note - This method is not thread safe
+ *
+ * @throws RocksDBException thrown if a new backup could not be created
+ */
+ public void createNewBackup(final RocksDB db) throws RocksDBException {
+ createNewBackup(db, false);
+ }
+
+ /**
+ * Captures the state of the database in the latest backup
+ *
+ * @param db The database to backup
+ * @param flushBeforeBackup When true, the Backup Engine will first issue a
+ * memtable flush and only then copy the DB files to
+ * the backup directory. Doing so will prevent log
+ * files from being copied to the backup directory
+ * (since flush will delete them).
+ * When false, the Backup Engine will not issue a
+ * flush before starting the backup. In that case,
+ * the backup will also include log files
+ * corresponding to live memtables. If writes have
+ * been performed with the write ahead log disabled,
+ * set flushBeforeBackup to true to prevent those
+ * writes from being lost. Otherwise, the backup will
+ * always be consistent with the current state of the
+ * database regardless of the flushBeforeBackup
+ * parameter.
+ *
+ * Note - This method is not thread safe
+ *
+ * @throws RocksDBException thrown if a new backup could not be created
+ */
+ public void createNewBackup(
+ final RocksDB db, final boolean flushBeforeBackup)
+ throws RocksDBException {
+ assert (isOwningHandle());
+ createNewBackup(nativeHandle_, db.nativeHandle_, flushBeforeBackup);
+ }
+
+ /**
+ * Captures the state of the database in the latest backup along with
+ * application specific metadata.
+ *
+ * @param db The database to backup
+ * @param metadata Application metadata
+ * @param flushBeforeBackup When true, the Backup Engine will first issue a
+ * memtable flush and only then copy the DB files to
+ * the backup directory. Doing so will prevent log
+ * files from being copied to the backup directory
+ * (since flush will delete them).
+ * When false, the Backup Engine will not issue a
+ * flush before starting the backup. In that case,
+ * the backup will also include log files
+ * corresponding to live memtables. If writes have
+ * been performed with the write ahead log disabled,
+ * set flushBeforeBackup to true to prevent those
+ * writes from being lost. Otherwise, the backup will
+ * always be consistent with the current state of the
+ * database regardless of the flushBeforeBackup
+ * parameter.
+ *
+ * Note - This method is not thread safe
+ *
+ * @throws RocksDBException thrown if a new backup could not be created
+ */
+ public void createNewBackupWithMetadata(final RocksDB db, final String metadata,
+ final boolean flushBeforeBackup) throws RocksDBException {
+ assert (isOwningHandle());
+ createNewBackupWithMetadata(nativeHandle_, db.nativeHandle_, metadata, flushBeforeBackup);
+ }
+
+ /**
+ * Gets information about the available
+ * backups
+ *
+ * @return A list of information about each available backup
+ */
+ public List<BackupInfo> getBackupInfo() {
+ assert (isOwningHandle());
+ return getBackupInfo(nativeHandle_);
+ }
+
+ /**
+ * <p>Returns a list of corrupted backup ids. If there
+ * is no corrupted backup the method will return an
+ * empty list.</p>
+ *
+ * @return array of backup ids as int ids.
+ */
+ public int[] getCorruptedBackups() {
+ assert(isOwningHandle());
+ return getCorruptedBackups(nativeHandle_);
+ }
+
+ /**
+ * <p>Will delete all the files we don't need anymore. It will
+ * do the full scan of the files/ directory and delete all the
+ * files that are not referenced.</p>
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void garbageCollect() throws RocksDBException {
+ assert(isOwningHandle());
+ garbageCollect(nativeHandle_);
+ }
+
+ /**
+ * Deletes old backups, keeping just the latest numBackupsToKeep
+ *
+ * @param numBackupsToKeep The latest n backups to keep
+ *
+ * @throws RocksDBException thrown if the old backups could not be deleted
+ */
+ public void purgeOldBackups(
+ final int numBackupsToKeep) throws RocksDBException {
+ assert (isOwningHandle());
+ purgeOldBackups(nativeHandle_, numBackupsToKeep);
+ }
+
+ /**
+ * Deletes a backup
+ *
+ * @param backupId The id of the backup to delete
+ *
+ * @throws RocksDBException thrown if the backup could not be deleted
+ */
+ public void deleteBackup(final int backupId) throws RocksDBException {
+ assert (isOwningHandle());
+ deleteBackup(nativeHandle_, backupId);
+ }
+
+ /**
+ * Restore the database from a backup
+ *
+ * IMPORTANT: if options.share_table_files == true and you restore the DB
+ * from some backup that is not the latest, and you start creating new
+ * backups from the new DB, they will probably fail!
+ *
+ * Example: Let's say you have backups 1, 2, 3, 4, 5 and you restore 3.
+ * If you add new data to the DB and try creating a new backup now, the
+ * database will diverge from backups 4 and 5 and the new backup will fail.
+ * If you want to create new backup, you will first have to delete backups 4
+ * and 5.
+ *
+ * @param backupId The id of the backup to restore
+ * @param dbDir The directory to restore the backup to, i.e. where your
+ * database is
+ * @param walDir The location of the log files for your database,
+ * often the same as dbDir
+ * @param restoreOptions Options for controlling the restore
+ *
+ * @throws RocksDBException thrown if the database could not be restored
+ */
+ public void restoreDbFromBackup(
+ final int backupId, final String dbDir, final String walDir,
+ final RestoreOptions restoreOptions) throws RocksDBException {
+ assert (isOwningHandle());
+ restoreDbFromBackup(nativeHandle_, backupId, dbDir, walDir,
+ restoreOptions.nativeHandle_);
+ }
+
+ /**
+ * Restore the database from the latest backup
+ *
+ * @param dbDir The directory to restore the backup to, i.e. where your
+ * database is
+ * @param walDir The location of the log files for your database, often the
+ * same as dbDir
+ * @param restoreOptions Options for controlling the restore
+ *
+ * @throws RocksDBException thrown if the database could not be restored
+ */
+ public void restoreDbFromLatestBackup(
+ final String dbDir, final String walDir,
+ final RestoreOptions restoreOptions) throws RocksDBException {
+ assert (isOwningHandle());
+ restoreDbFromLatestBackup(nativeHandle_, dbDir, walDir,
+ restoreOptions.nativeHandle_);
+ }
+
+ private native static long open(final long env,
+ final long backupableDbOptions) throws RocksDBException;
+
+ private native void createNewBackup(final long handle, final long dbHandle,
+ final boolean flushBeforeBackup) throws RocksDBException;
+
+ private native void createNewBackupWithMetadata(final long handle, final long dbHandle,
+ final String metadata, final boolean flushBeforeBackup) throws RocksDBException;
+
+ private native List<BackupInfo> getBackupInfo(final long handle);
+
+ private native int[] getCorruptedBackups(final long handle);
+
+ private native void garbageCollect(final long handle) throws RocksDBException;
+
+ private native void purgeOldBackups(final long handle,
+ final int numBackupsToKeep) throws RocksDBException;
+
+ private native void deleteBackup(final long handle, final int backupId)
+ throws RocksDBException;
+
+ private native void restoreDbFromBackup(final long handle, final int backupId,
+ final String dbDir, final String walDir, final long restoreOptionsHandle)
+ throws RocksDBException;
+
+ private native void restoreDbFromLatestBackup(final long handle,
+ final String dbDir, final String walDir, final long restoreOptionsHandle)
+ throws RocksDBException;
+
+ @Override protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/BackupInfo.java b/src/rocksdb/java/src/main/java/org/rocksdb/BackupInfo.java
new file mode 100644
index 000000000..9244e4eb1
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/BackupInfo.java
@@ -0,0 +1,76 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+/**
+ * Instances of this class describe a Backup made by
+ * {@link org.rocksdb.BackupEngine}.
+ */
+public class BackupInfo {
+
+ /**
+ * Package private constructor used to create instances
+ * of BackupInfo by {@link org.rocksdb.BackupEngine}
+ *
+ * @param backupId id of backup
+ * @param timestamp timestamp of backup
+ * @param size size of backup
+ * @param numberFiles number of files related to this backup.
+ */
+ BackupInfo(final int backupId, final long timestamp, final long size, final int numberFiles,
+ final String app_metadata) {
+ backupId_ = backupId;
+ timestamp_ = timestamp;
+ size_ = size;
+ numberFiles_ = numberFiles;
+ app_metadata_ = app_metadata;
+ }
+
+ /**
+ *
+ * @return the backup id.
+ */
+ public int backupId() {
+ return backupId_;
+ }
+
+ /**
+ *
+ * @return the timestamp of the backup.
+ */
+ public long timestamp() {
+ return timestamp_;
+ }
+
+ /**
+ *
+ * @return the size of the backup
+ */
+ public long size() {
+ return size_;
+ }
+
+ /**
+ *
+ * @return the number of files of this backup.
+ */
+ public int numberFiles() {
+ return numberFiles_;
+ }
+
+ /**
+ *
+ * @return the associated application metadata, or null
+ */
+ public String appMetadata() {
+ return app_metadata_;
+ }
+
+ private int backupId_;
+ private long timestamp_;
+ private long size_;
+ private int numberFiles_;
+ private String app_metadata_;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/BackupableDBOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/BackupableDBOptions.java
new file mode 100644
index 000000000..8bb41433f
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/BackupableDBOptions.java
@@ -0,0 +1,465 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.io.File;
+
+/**
+ * <p>BackupableDBOptions to control the behavior of a backupable database.
+ * It will be used during the creation of a {@link org.rocksdb.BackupEngine}.
+ * </p>
+ * <p>Note that dispose() must be called before an Options instance
+ * become out-of-scope to release the allocated memory in c++.</p>
+ *
+ * @see org.rocksdb.BackupEngine
+ */
+public class BackupableDBOptions extends RocksObject {
+
+ private Env backupEnv = null;
+ private Logger infoLog = null;
+ private RateLimiter backupRateLimiter = null;
+ private RateLimiter restoreRateLimiter = null;
+
+ /**
+ * <p>BackupableDBOptions constructor.</p>
+ *
+ * @param path Where to keep the backup files. Has to be different than db
+ * name. Best to set this to {@code db name_ + "/backups"}
+ * @throws java.lang.IllegalArgumentException if illegal path is used.
+ */
+ public BackupableDBOptions(final String path) {
+ super(newBackupableDBOptions(ensureWritableFile(path)));
+ }
+
+ private static String ensureWritableFile(final String path) {
+ final File backupPath = path == null ? null : new File(path);
+ if (backupPath == null || !backupPath.isDirectory() ||
+ !backupPath.canWrite()) {
+ throw new IllegalArgumentException("Illegal path provided.");
+ } else {
+ return path;
+ }
+ }
+
+ /**
+ * <p>Returns the path to the BackupableDB directory.</p>
+ *
+ * @return the path to the BackupableDB directory.
+ */
+ public String backupDir() {
+ assert(isOwningHandle());
+ return backupDir(nativeHandle_);
+ }
+
+ /**
+ * Backup Env object. It will be used for backup file I/O. If it's
+ * null, backups will be written out using DBs Env. Otherwise
+ * backup's I/O will be performed using this object.
+ *
+ * If you want to have backups on HDFS, use HDFS Env here!
+ *
+ * Default: null
+ *
+ * @param env The environment to use
+ * @return instance of current BackupableDBOptions.
+ */
+ public BackupableDBOptions setBackupEnv(final Env env) {
+ assert(isOwningHandle());
+ setBackupEnv(nativeHandle_, env.nativeHandle_);
+ this.backupEnv = env;
+ return this;
+ }
+
+ /**
+ * Backup Env object. It will be used for backup file I/O. If it's
+ * null, backups will be written out using DBs Env. Otherwise
+ * backup's I/O will be performed using this object.
+ *
+ * If you want to have backups on HDFS, use HDFS Env here!
+ *
+ * Default: null
+ *
+ * @return The environment in use
+ */
+ public Env backupEnv() {
+ return this.backupEnv;
+ }
+
+ /**
+ * <p>Share table files between backups.</p>
+ *
+ * @param shareTableFiles If {@code share_table_files == true}, backup will
+ * assume that table files with same name have the same contents. This
+ * enables incremental backups and avoids unnecessary data copies. If
+ * {@code share_table_files == false}, each backup will be on its own and
+ * will not share any data with other backups.
+ *
+ * <p>Default: true</p>
+ *
+ * @return instance of current BackupableDBOptions.
+ */
+ public BackupableDBOptions setShareTableFiles(final boolean shareTableFiles) {
+ assert(isOwningHandle());
+ setShareTableFiles(nativeHandle_, shareTableFiles);
+ return this;
+ }
+
+ /**
+ * <p>Share table files between backups.</p>
+ *
+ * @return boolean value indicating if SST files will be shared between
+ * backups.
+ */
+ public boolean shareTableFiles() {
+ assert(isOwningHandle());
+ return shareTableFiles(nativeHandle_);
+ }
+
+ /**
+ * Set the logger to use for Backup info and error messages
+ *
+ * @param logger The logger to use for the backup
+ * @return instance of current BackupableDBOptions.
+ */
+ public BackupableDBOptions setInfoLog(final Logger logger) {
+ assert(isOwningHandle());
+ setInfoLog(nativeHandle_, logger.nativeHandle_);
+ this.infoLog = logger;
+ return this;
+ }
+
+ /**
+ * Set the logger to use for Backup info and error messages
+ *
+ * Default: null
+ *
+ * @return The logger in use for the backup
+ */
+ public Logger infoLog() {
+ return this.infoLog;
+ }
+
+ /**
+ * <p>Set synchronous backups.</p>
+ *
+ * @param sync If {@code sync == true}, we can guarantee you'll get consistent
+ * backup even on a machine crash/reboot. Backup process is slower with sync
+ * enabled. If {@code sync == false}, we don't guarantee anything on machine
+ * reboot. However, chances are some of the backups are consistent.
+ *
+ * <p>Default: true</p>
+ *
+ * @return instance of current BackupableDBOptions.
+ */
+ public BackupableDBOptions setSync(final boolean sync) {
+ assert(isOwningHandle());
+ setSync(nativeHandle_, sync);
+ return this;
+ }
+
+ /**
+ * <p>Are synchronous backups activated.</p>
+ *
+ * @return boolean value if synchronous backups are configured.
+ */
+ public boolean sync() {
+ assert(isOwningHandle());
+ return sync(nativeHandle_);
+ }
+
+ /**
+ * <p>Set if old data will be destroyed.</p>
+ *
+ * @param destroyOldData If true, it will delete whatever backups there are
+ * already.
+ *
+ * <p>Default: false</p>
+ *
+ * @return instance of current BackupableDBOptions.
+ */
+ public BackupableDBOptions setDestroyOldData(final boolean destroyOldData) {
+ assert(isOwningHandle());
+ setDestroyOldData(nativeHandle_, destroyOldData);
+ return this;
+ }
+
+ /**
+ * <p>Returns if old data will be destroyed will performing new backups.</p>
+ *
+ * @return boolean value indicating if old data will be destroyed.
+ */
+ public boolean destroyOldData() {
+ assert(isOwningHandle());
+ return destroyOldData(nativeHandle_);
+ }
+
+ /**
+ * <p>Set if log files shall be persisted.</p>
+ *
+ * @param backupLogFiles If false, we won't backup log files. This option can
+ * be useful for backing up in-memory databases where log file are
+ * persisted, but table files are in memory.
+ *
+ * <p>Default: true</p>
+ *
+ * @return instance of current BackupableDBOptions.
+ */
+ public BackupableDBOptions setBackupLogFiles(final boolean backupLogFiles) {
+ assert(isOwningHandle());
+ setBackupLogFiles(nativeHandle_, backupLogFiles);
+ return this;
+ }
+
+ /**
+ * <p>Return information if log files shall be persisted.</p>
+ *
+ * @return boolean value indicating if log files will be persisted.
+ */
+ public boolean backupLogFiles() {
+ assert(isOwningHandle());
+ return backupLogFiles(nativeHandle_);
+ }
+
+ /**
+ * <p>Set backup rate limit.</p>
+ *
+ * @param backupRateLimit Max bytes that can be transferred in a second during
+ * backup. If 0 or negative, then go as fast as you can.
+ *
+ * <p>Default: 0</p>
+ *
+ * @return instance of current BackupableDBOptions.
+ */
+ public BackupableDBOptions setBackupRateLimit(long backupRateLimit) {
+ assert(isOwningHandle());
+ backupRateLimit = (backupRateLimit <= 0) ? 0 : backupRateLimit;
+ setBackupRateLimit(nativeHandle_, backupRateLimit);
+ return this;
+ }
+
+ /**
+ * <p>Return backup rate limit which described the max bytes that can be
+ * transferred in a second during backup.</p>
+ *
+ * @return numerical value describing the backup transfer limit in bytes per
+ * second.
+ */
+ public long backupRateLimit() {
+ assert(isOwningHandle());
+ return backupRateLimit(nativeHandle_);
+ }
+
+ /**
+ * Backup rate limiter. Used to control transfer speed for backup. If this is
+ * not null, {@link #backupRateLimit()} is ignored.
+ *
+ * Default: null
+ *
+ * @param backupRateLimiter The rate limiter to use for the backup
+ * @return instance of current BackupableDBOptions.
+ */
+ public BackupableDBOptions setBackupRateLimiter(final RateLimiter backupRateLimiter) {
+ assert(isOwningHandle());
+ setBackupRateLimiter(nativeHandle_, backupRateLimiter.nativeHandle_);
+ this.backupRateLimiter = backupRateLimiter;
+ return this;
+ }
+
+ /**
+ * Backup rate limiter. Used to control transfer speed for backup. If this is
+ * not null, {@link #backupRateLimit()} is ignored.
+ *
+ * Default: null
+ *
+ * @return The rate limiter in use for the backup
+ */
+ public RateLimiter backupRateLimiter() {
+ assert(isOwningHandle());
+ return this.backupRateLimiter;
+ }
+
+ /**
+ * <p>Set restore rate limit.</p>
+ *
+ * @param restoreRateLimit Max bytes that can be transferred in a second
+ * during restore. If 0 or negative, then go as fast as you can.
+ *
+ * <p>Default: 0</p>
+ *
+ * @return instance of current BackupableDBOptions.
+ */
+ public BackupableDBOptions setRestoreRateLimit(long restoreRateLimit) {
+ assert(isOwningHandle());
+ restoreRateLimit = (restoreRateLimit <= 0) ? 0 : restoreRateLimit;
+ setRestoreRateLimit(nativeHandle_, restoreRateLimit);
+ return this;
+ }
+
+ /**
+ * <p>Return restore rate limit which described the max bytes that can be
+ * transferred in a second during restore.</p>
+ *
+ * @return numerical value describing the restore transfer limit in bytes per
+ * second.
+ */
+ public long restoreRateLimit() {
+ assert(isOwningHandle());
+ return restoreRateLimit(nativeHandle_);
+ }
+
+ /**
+ * Restore rate limiter. Used to control transfer speed during restore. If
+ * this is not null, {@link #restoreRateLimit()} is ignored.
+ *
+ * Default: null
+ *
+ * @param restoreRateLimiter The rate limiter to use during restore
+ * @return instance of current BackupableDBOptions.
+ */
+ public BackupableDBOptions setRestoreRateLimiter(final RateLimiter restoreRateLimiter) {
+ assert(isOwningHandle());
+ setRestoreRateLimiter(nativeHandle_, restoreRateLimiter.nativeHandle_);
+ this.restoreRateLimiter = restoreRateLimiter;
+ return this;
+ }
+
+ /**
+ * Restore rate limiter. Used to control transfer speed during restore. If
+ * this is not null, {@link #restoreRateLimit()} is ignored.
+ *
+ * Default: null
+ *
+ * @return The rate limiter in use during restore
+ */
+ public RateLimiter restoreRateLimiter() {
+ assert(isOwningHandle());
+ return this.restoreRateLimiter;
+ }
+
+ /**
+ * <p>Only used if share_table_files is set to true. If true, will consider
+ * that backups can come from different databases, hence a sst is not uniquely
+ * identified by its name, but by the triple (file name, crc32, file length)
+ * </p>
+ *
+ * @param shareFilesWithChecksum boolean value indicating if SST files are
+ * stored using the triple (file name, crc32, file length) and not its name.
+ *
+ * <p>Note: this is an experimental option, and you'll need to set it manually
+ * turn it on only if you know what you're doing*</p>
+ *
+ * <p>Default: false</p>
+ *
+ * @return instance of current BackupableDBOptions.
+ */
+ public BackupableDBOptions setShareFilesWithChecksum(
+ final boolean shareFilesWithChecksum) {
+ assert(isOwningHandle());
+ setShareFilesWithChecksum(nativeHandle_, shareFilesWithChecksum);
+ return this;
+ }
+
+ /**
+ * <p>Return of share files with checksum is active.</p>
+ *
+ * @return boolean value indicating if share files with checksum
+ * is active.
+ */
+ public boolean shareFilesWithChecksum() {
+ assert(isOwningHandle());
+ return shareFilesWithChecksum(nativeHandle_);
+ }
+
+ /**
+ * Up to this many background threads will copy files for
+ * {@link BackupEngine#createNewBackup(RocksDB, boolean)} and
+ * {@link BackupEngine#restoreDbFromBackup(int, String, String, RestoreOptions)}
+ *
+ * Default: 1
+ *
+ * @param maxBackgroundOperations The maximum number of background threads
+ * @return instance of current BackupableDBOptions.
+ */
+ public BackupableDBOptions setMaxBackgroundOperations(
+ final int maxBackgroundOperations) {
+ assert(isOwningHandle());
+ setMaxBackgroundOperations(nativeHandle_, maxBackgroundOperations);
+ return this;
+ }
+
+ /**
+ * Up to this many background threads will copy files for
+ * {@link BackupEngine#createNewBackup(RocksDB, boolean)} and
+ * {@link BackupEngine#restoreDbFromBackup(int, String, String, RestoreOptions)}
+ *
+ * Default: 1
+ *
+ * @return The maximum number of background threads
+ */
+ public int maxBackgroundOperations() {
+ assert(isOwningHandle());
+ return maxBackgroundOperations(nativeHandle_);
+ }
+
+ /**
+ * During backup user can get callback every time next
+ * {@link #callbackTriggerIntervalSize()} bytes being copied.
+ *
+ * Default: 4194304
+ *
+ * @param callbackTriggerIntervalSize The interval size for the
+ * callback trigger
+ * @return instance of current BackupableDBOptions.
+ */
+ public BackupableDBOptions setCallbackTriggerIntervalSize(
+ final long callbackTriggerIntervalSize) {
+ assert(isOwningHandle());
+ setCallbackTriggerIntervalSize(nativeHandle_, callbackTriggerIntervalSize);
+ return this;
+ }
+
+ /**
+ * During backup user can get callback every time next
+ * {@link #callbackTriggerIntervalSize()} bytes being copied.
+ *
+ * Default: 4194304
+ *
+ * @return The interval size for the callback trigger
+ */
+ public long callbackTriggerIntervalSize() {
+ assert(isOwningHandle());
+ return callbackTriggerIntervalSize(nativeHandle_);
+ }
+
+ private native static long newBackupableDBOptions(final String path);
+ private native String backupDir(long handle);
+ private native void setBackupEnv(final long handle, final long envHandle);
+ private native void setShareTableFiles(long handle, boolean flag);
+ private native boolean shareTableFiles(long handle);
+ private native void setInfoLog(final long handle, final long infoLogHandle);
+ private native void setSync(long handle, boolean flag);
+ private native boolean sync(long handle);
+ private native void setDestroyOldData(long handle, boolean flag);
+ private native boolean destroyOldData(long handle);
+ private native void setBackupLogFiles(long handle, boolean flag);
+ private native boolean backupLogFiles(long handle);
+ private native void setBackupRateLimit(long handle, long rateLimit);
+ private native long backupRateLimit(long handle);
+ private native void setBackupRateLimiter(long handle, long rateLimiterHandle);
+ private native void setRestoreRateLimit(long handle, long rateLimit);
+ private native long restoreRateLimit(long handle);
+ private native void setRestoreRateLimiter(final long handle,
+ final long rateLimiterHandle);
+ private native void setShareFilesWithChecksum(long handle, boolean flag);
+ private native boolean shareFilesWithChecksum(long handle);
+ private native void setMaxBackgroundOperations(final long handle,
+ final int maxBackgroundOperations);
+ private native int maxBackgroundOperations(final long handle);
+ private native void setCallbackTriggerIntervalSize(final long handle,
+ long callbackTriggerIntervalSize);
+ private native long callbackTriggerIntervalSize(final long handle);
+ @Override protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java b/src/rocksdb/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java
new file mode 100644
index 000000000..5bc694af5
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java
@@ -0,0 +1,987 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+/**
+ * The config for plain table sst format.
+ *
+ * BlockBasedTable is a RocksDB's default SST file format.
+ */
+//TODO(AR) should be renamed BlockBasedTableOptions
+public class BlockBasedTableConfig extends TableFormatConfig {
+
+ public BlockBasedTableConfig() {
+ //TODO(AR) flushBlockPolicyFactory
+ cacheIndexAndFilterBlocks = false;
+ cacheIndexAndFilterBlocksWithHighPriority = false;
+ pinL0FilterAndIndexBlocksInCache = false;
+ pinTopLevelIndexAndFilter = true;
+ indexType = IndexType.kBinarySearch;
+ dataBlockIndexType = DataBlockIndexType.kDataBlockBinarySearch;
+ dataBlockHashTableUtilRatio = 0.75;
+ checksumType = ChecksumType.kCRC32c;
+ noBlockCache = false;
+ blockCache = null;
+ persistentCache = null;
+ blockCacheCompressed = null;
+ blockSize = 4 * 1024;
+ blockSizeDeviation = 10;
+ blockRestartInterval = 16;
+ indexBlockRestartInterval = 1;
+ metadataBlockSize = 4096;
+ partitionFilters = false;
+ useDeltaEncoding = true;
+ filterPolicy = null;
+ wholeKeyFiltering = true;
+ verifyCompression = true;
+ readAmpBytesPerBit = 0;
+ formatVersion = 2;
+ enableIndexCompression = true;
+ blockAlign = false;
+
+ // NOTE: ONLY used if blockCache == null
+ blockCacheSize = 8 * 1024 * 1024;
+ blockCacheNumShardBits = 0;
+
+ // NOTE: ONLY used if blockCacheCompressed == null
+ blockCacheCompressedSize = 0;
+ blockCacheCompressedNumShardBits = 0;
+ }
+
+ /**
+ * Indicating if we'd put index/filter blocks to the block cache.
+ * If not specified, each "table reader" object will pre-load index/filter
+ * block during table initialization.
+ *
+ * @return if index and filter blocks should be put in block cache.
+ */
+ public boolean cacheIndexAndFilterBlocks() {
+ return cacheIndexAndFilterBlocks;
+ }
+
+ /**
+ * Indicating if we'd put index/filter blocks to the block cache.
+ * If not specified, each "table reader" object will pre-load index/filter
+ * block during table initialization.
+ *
+ * @param cacheIndexAndFilterBlocks and filter blocks should be put in block cache.
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setCacheIndexAndFilterBlocks(
+ final boolean cacheIndexAndFilterBlocks) {
+ this.cacheIndexAndFilterBlocks = cacheIndexAndFilterBlocks;
+ return this;
+ }
+
+ /**
+ * Indicates if index and filter blocks will be treated as high-priority in the block cache.
+ * See note below about applicability. If not specified, defaults to false.
+ *
+ * @return if index and filter blocks will be treated as high-priority.
+ */
+ public boolean cacheIndexAndFilterBlocksWithHighPriority() {
+ return cacheIndexAndFilterBlocksWithHighPriority;
+ }
+
+ /**
+ * If true, cache index and filter blocks with high priority. If set to true,
+ * depending on implementation of block cache, index and filter blocks may be
+ * less likely to be evicted than data blocks.
+ *
+ * @param cacheIndexAndFilterBlocksWithHighPriority if index and filter blocks
+ * will be treated as high-priority.
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setCacheIndexAndFilterBlocksWithHighPriority(
+ final boolean cacheIndexAndFilterBlocksWithHighPriority) {
+ this.cacheIndexAndFilterBlocksWithHighPriority = cacheIndexAndFilterBlocksWithHighPriority;
+ return this;
+ }
+
+ /**
+ * Indicating if we'd like to pin L0 index/filter blocks to the block cache.
+ If not specified, defaults to false.
+ *
+ * @return if L0 index and filter blocks should be pinned to the block cache.
+ */
+ public boolean pinL0FilterAndIndexBlocksInCache() {
+ return pinL0FilterAndIndexBlocksInCache;
+ }
+
+ /**
+ * Indicating if we'd like to pin L0 index/filter blocks to the block cache.
+ If not specified, defaults to false.
+ *
+ * @param pinL0FilterAndIndexBlocksInCache pin blocks in block cache
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setPinL0FilterAndIndexBlocksInCache(
+ final boolean pinL0FilterAndIndexBlocksInCache) {
+ this.pinL0FilterAndIndexBlocksInCache = pinL0FilterAndIndexBlocksInCache;
+ return this;
+ }
+
+ /**
+ * Indicates if top-level index and filter blocks should be pinned.
+ *
+ * @return if top-level index and filter blocks should be pinned.
+ */
+ public boolean pinTopLevelIndexAndFilter() {
+ return pinTopLevelIndexAndFilter;
+ }
+
+ /**
+ * If cacheIndexAndFilterBlocks is true and the below is true, then
+ * the top-level index of partitioned filter and index blocks are stored in
+ * the cache, but a reference is held in the "table reader" object so the
+ * blocks are pinned and only evicted from cache when the table reader is
+ * freed. This is not limited to l0 in LSM tree.
+ *
+ * @param pinTopLevelIndexAndFilter if top-level index and filter blocks should be pinned.
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setPinTopLevelIndexAndFilter(final boolean pinTopLevelIndexAndFilter) {
+ this.pinTopLevelIndexAndFilter = pinTopLevelIndexAndFilter;
+ return this;
+ }
+
+ /**
+ * Get the index type.
+ *
+ * @return the currently set index type
+ */
+ public IndexType indexType() {
+ return indexType;
+ }
+
+ /**
+ * Sets the index type to used with this table.
+ *
+ * @param indexType {@link org.rocksdb.IndexType} value
+ * @return the reference to the current option.
+ */
+ public BlockBasedTableConfig setIndexType(
+ final IndexType indexType) {
+ this.indexType = indexType;
+ return this;
+ }
+
+ /**
+ * Get the data block index type.
+ *
+ * @return the currently set data block index type
+ */
+ public DataBlockIndexType dataBlockIndexType() {
+ return dataBlockIndexType;
+ }
+
+ /**
+ * Sets the data block index type to used with this table.
+ *
+ * @param dataBlockIndexType {@link org.rocksdb.DataBlockIndexType} value
+ * @return the reference to the current option.
+ */
+ public BlockBasedTableConfig setDataBlockIndexType(
+ final DataBlockIndexType dataBlockIndexType) {
+ this.dataBlockIndexType = dataBlockIndexType;
+ return this;
+ }
+
+ /**
+ * Get the #entries/#buckets. It is valid only when {@link #dataBlockIndexType()} is
+ * {@link DataBlockIndexType#kDataBlockBinaryAndHash}.
+ *
+ * @return the #entries/#buckets.
+ */
+ public double dataBlockHashTableUtilRatio() {
+ return dataBlockHashTableUtilRatio;
+ }
+
+ /**
+ * Set the #entries/#buckets. It is valid only when {@link #dataBlockIndexType()} is
+ * {@link DataBlockIndexType#kDataBlockBinaryAndHash}.
+ *
+ * @param dataBlockHashTableUtilRatio #entries/#buckets
+ * @return the reference to the current option.
+ */
+ public BlockBasedTableConfig setDataBlockHashTableUtilRatio(
+ final double dataBlockHashTableUtilRatio) {
+ this.dataBlockHashTableUtilRatio = dataBlockHashTableUtilRatio;
+ return this;
+ }
+
+ /**
+ * Get the checksum type to be used with this table.
+ *
+ * @return the currently set checksum type
+ */
+ public ChecksumType checksumType() {
+ return checksumType;
+ }
+
+ /**
+ * Sets
+ *
+ * @param checksumType {@link org.rocksdb.ChecksumType} value.
+ * @return the reference to the current option.
+ */
+ public BlockBasedTableConfig setChecksumType(
+ final ChecksumType checksumType) {
+ this.checksumType = checksumType;
+ return this;
+ }
+
+ /**
+ * Determine if the block cache is disabled.
+ *
+ * @return if block cache is disabled
+ */
+ public boolean noBlockCache() {
+ return noBlockCache;
+ }
+
+ /**
+ * Disable block cache. If this is set to true,
+ * then no block cache should be used, and the {@link #setBlockCache(Cache)}
+ * should point to a {@code null} object.
+ *
+ * Default: false
+ *
+ * @param noBlockCache if use block cache
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setNoBlockCache(final boolean noBlockCache) {
+ this.noBlockCache = noBlockCache;
+ return this;
+ }
+
+ /**
+ * Use the specified cache for blocks.
+ * When not null this take precedence even if the user sets a block cache size.
+ *
+ * {@link org.rocksdb.Cache} should not be disposed before options instances
+ * using this cache is disposed.
+ *
+ * {@link org.rocksdb.Cache} instance can be re-used in multiple options
+ * instances.
+ *
+ * @param blockCache {@link org.rocksdb.Cache} Cache java instance
+ * (e.g. LRUCache).
+ *
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setBlockCache(final Cache blockCache) {
+ this.blockCache = blockCache;
+ return this;
+ }
+
+ /**
+ * Use the specified persistent cache.
+ *
+ * If {@code !null} use the specified cache for pages read from device,
+ * otherwise no page cache is used.
+ *
+ * @param persistentCache the persistent cache
+ *
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setPersistentCache(
+ final PersistentCache persistentCache) {
+ this.persistentCache = persistentCache;
+ return this;
+ }
+
+ /**
+ * Use the specified cache for compressed blocks.
+ *
+ * If {@code null}, RocksDB will not use a compressed block cache.
+ *
+ * Note: though it looks similar to {@link #setBlockCache(Cache)}, RocksDB
+ * doesn't put the same type of object there.
+ *
+ * {@link org.rocksdb.Cache} should not be disposed before options instances
+ * using this cache is disposed.
+ *
+ * {@link org.rocksdb.Cache} instance can be re-used in multiple options
+ * instances.
+ *
+ * @param blockCacheCompressed {@link org.rocksdb.Cache} Cache java instance
+ * (e.g. LRUCache).
+ *
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setBlockCacheCompressed(
+ final Cache blockCacheCompressed) {
+ this.blockCacheCompressed = blockCacheCompressed;
+ return this;
+ }
+
+ /**
+ * Get the approximate size of user data packed per block.
+ *
+ * @return block size in bytes
+ */
+ public long blockSize() {
+ return blockSize;
+ }
+
+ /**
+ * Approximate size of user data packed per block. Note that the
+ * block size specified here corresponds to uncompressed data. The
+ * actual size of the unit read from disk may be smaller if
+ * compression is enabled. This parameter can be changed dynamically.
+ * Default: 4K
+ *
+ * @param blockSize block size in bytes
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setBlockSize(final long blockSize) {
+ this.blockSize = blockSize;
+ return this;
+ }
+
+ /**
+ * @return the hash table ratio.
+ */
+ public int blockSizeDeviation() {
+ return blockSizeDeviation;
+ }
+
+ /**
+ * This is used to close a block before it reaches the configured
+ * {@link #blockSize()}. If the percentage of free space in the current block
+ * is less than this specified number and adding a new record to the block
+ * will exceed the configured block size, then this block will be closed and
+ * the new record will be written to the next block.
+ *
+ * Default is 10.
+ *
+ * @param blockSizeDeviation the deviation to block size allowed
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setBlockSizeDeviation(
+ final int blockSizeDeviation) {
+ this.blockSizeDeviation = blockSizeDeviation;
+ return this;
+ }
+
+ /**
+ * Get the block restart interval.
+ *
+ * @return block restart interval
+ */
+ public int blockRestartInterval() {
+ return blockRestartInterval;
+ }
+
+ /**
+ * Set the block restart interval.
+ *
+ * @param restartInterval block restart interval.
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setBlockRestartInterval(
+ final int restartInterval) {
+ blockRestartInterval = restartInterval;
+ return this;
+ }
+
+ /**
+ * Get the index block restart interval.
+ *
+ * @return index block restart interval
+ */
+ public int indexBlockRestartInterval() {
+ return indexBlockRestartInterval;
+ }
+
+ /**
+ * Set the index block restart interval
+ *
+ * @param restartInterval index block restart interval.
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setIndexBlockRestartInterval(
+ final int restartInterval) {
+ indexBlockRestartInterval = restartInterval;
+ return this;
+ }
+
+ /**
+ * Get the block size for partitioned metadata.
+ *
+ * @return block size for partitioned metadata.
+ */
+ public long metadataBlockSize() {
+ return metadataBlockSize;
+ }
+
+ /**
+ * Set block size for partitioned metadata.
+ *
+ * @param metadataBlockSize Partitioned metadata block size.
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setMetadataBlockSize(
+ final long metadataBlockSize) {
+ this.metadataBlockSize = metadataBlockSize;
+ return this;
+ }
+
+ /**
+ * Indicates if we're using partitioned filters.
+ *
+ * @return if we're using partition filters.
+ */
+ public boolean partitionFilters() {
+ return partitionFilters;
+ }
+
+ /**
+ * Use partitioned full filters for each SST file. This option is incompatible
+ * with block-based filters.
+ *
+ * Defaults to false.
+ *
+ * @param partitionFilters use partition filters.
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setPartitionFilters(final boolean partitionFilters) {
+ this.partitionFilters = partitionFilters;
+ return this;
+ }
+
+ /**
+ * Determine if delta encoding is being used to compress block keys.
+ *
+ * @return true if delta encoding is enabled, false otherwise.
+ */
+ public boolean useDeltaEncoding() {
+ return useDeltaEncoding;
+ }
+
+ /**
+ * Use delta encoding to compress keys in blocks.
+ *
+ * NOTE: {@link ReadOptions#pinData()} requires this option to be disabled.
+ *
+ * Default: true
+ *
+ * @param useDeltaEncoding true to enable delta encoding
+ *
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setUseDeltaEncoding(
+ final boolean useDeltaEncoding) {
+ this.useDeltaEncoding = useDeltaEncoding;
+ return this;
+ }
+
+ /**
+ * Get the filter policy.
+ *
+ * @return the current filter policy.
+ */
+ public Filter filterPolicy() {
+ return filterPolicy;
+ }
+
+ /**
+ * Use the specified filter policy to reduce disk reads.
+ *
+ * {@link org.rocksdb.Filter} should not be disposed before options instances
+ * using this filter is disposed. If {@link Filter#dispose()} function is not
+ * called, then filter object will be GC'd automatically.
+ *
+ * {@link org.rocksdb.Filter} instance can be re-used in multiple options
+ * instances.
+ *
+ * @param filterPolicy {@link org.rocksdb.Filter} Filter Policy java instance.
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setFilterPolicy(
+ final Filter filterPolicy) {
+ this.filterPolicy = filterPolicy;
+ return this;
+ }
+
+ /**
+ * Set the filter.
+ *
+ * @param filter the filter
+ * @return the reference to the current config.
+ *
+ * @deprecated Use {@link #setFilterPolicy(Filter)}
+ */
+ @Deprecated
+ public BlockBasedTableConfig setFilter(
+ final Filter filter) {
+ return setFilterPolicy(filter);
+ }
+
+ /**
+ * Determine if whole keys as opposed to prefixes are placed in the filter.
+ *
+ * @return if whole key filtering is enabled
+ */
+ public boolean wholeKeyFiltering() {
+ return wholeKeyFiltering;
+ }
+
+ /**
+ * If true, place whole keys in the filter (not just prefixes).
+ * This must generally be true for gets to be efficient.
+ * Default: true
+ *
+ * @param wholeKeyFiltering if enable whole key filtering
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setWholeKeyFiltering(
+ final boolean wholeKeyFiltering) {
+ this.wholeKeyFiltering = wholeKeyFiltering;
+ return this;
+ }
+
+ /**
+ * Returns true when compression verification is enabled.
+ *
+ * See {@link #setVerifyCompression(boolean)}.
+ *
+ * @return true if compression verification is enabled.
+ */
+ public boolean verifyCompression() {
+ return verifyCompression;
+ }
+
+ /**
+ * Verify that decompressing the compressed block gives back the input. This
+ * is a verification mode that we use to detect bugs in compression
+ * algorithms.
+ *
+ * @param verifyCompression true to enable compression verification.
+ *
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setVerifyCompression(
+ final boolean verifyCompression) {
+ this.verifyCompression = verifyCompression;
+ return this;
+ }
+
+ /**
+ * Get the Read amplification bytes per-bit.
+ *
+ * See {@link #setReadAmpBytesPerBit(int)}.
+ *
+ * @return the bytes per-bit.
+ */
+ public int readAmpBytesPerBit() {
+ return readAmpBytesPerBit;
+ }
+
+ /**
+ * Set the Read amplification bytes per-bit.
+ *
+ * If used, For every data block we load into memory, we will create a bitmap
+ * of size ((block_size / `read_amp_bytes_per_bit`) / 8) bytes. This bitmap
+ * will be used to figure out the percentage we actually read of the blocks.
+ *
+ * When this feature is used Tickers::READ_AMP_ESTIMATE_USEFUL_BYTES and
+ * Tickers::READ_AMP_TOTAL_READ_BYTES can be used to calculate the
+ * read amplification using this formula
+ * (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES)
+ *
+ * value =&gt; memory usage (percentage of loaded blocks memory)
+ * 1 =&gt; 12.50 %
+ * 2 =&gt; 06.25 %
+ * 4 =&gt; 03.12 %
+ * 8 =&gt; 01.56 %
+ * 16 =&gt; 00.78 %
+ *
+ * Note: This number must be a power of 2, if not it will be sanitized
+ * to be the next lowest power of 2, for example a value of 7 will be
+ * treated as 4, a value of 19 will be treated as 16.
+ *
+ * Default: 0 (disabled)
+ *
+ * @param readAmpBytesPerBit the bytes per-bit
+ *
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setReadAmpBytesPerBit(final int readAmpBytesPerBit) {
+ this.readAmpBytesPerBit = readAmpBytesPerBit;
+ return this;
+ }
+
+ /**
+ * Get the format version.
+ * See {@link #setFormatVersion(int)}.
+ *
+ * @return the currently configured format version.
+ */
+ public int formatVersion() {
+ return formatVersion;
+ }
+
+ /**
+ * <p>We currently have five versions:</p>
+ *
+ * <ul>
+ * <li><strong>0</strong> - This version is currently written
+ * out by all RocksDB's versions by default. Can be read by really old
+ * RocksDB's. Doesn't support changing checksum (default is CRC32).</li>
+ * <li><strong>1</strong> - Can be read by RocksDB's versions since 3.0.
+ * Supports non-default checksum, like xxHash. It is written by RocksDB when
+ * BlockBasedTableOptions::checksum is something other than kCRC32c. (version
+ * 0 is silently upconverted)</li>
+ * <li><strong>2</strong> - Can be read by RocksDB's versions since 3.10.
+ * Changes the way we encode compressed blocks with LZ4, BZip2 and Zlib
+ * compression. If you don't plan to run RocksDB before version 3.10,
+ * you should probably use this.</li>
+ * <li><strong>3</strong> - Can be read by RocksDB's versions since 5.15. Changes the way we
+ * encode the keys in index blocks. If you don't plan to run RocksDB before
+ * version 5.15, you should probably use this.
+ * This option only affects newly written tables. When reading existing
+ * tables, the information about version is read from the footer.</li>
+ * <li><strong>4</strong> - Can be read by RocksDB's versions since 5.16. Changes the way we
+ * encode the values in index blocks. If you don't plan to run RocksDB before
+ * version 5.16 and you are using index_block_restart_interval &gt; 1, you should
+ * probably use this as it would reduce the index size.</li>
+ * </ul>
+ * <p> This option only affects newly written tables. When reading existing
+ * tables, the information about version is read from the footer.</p>
+ *
+ * @param formatVersion integer representing the version to be used.
+ *
+ * @return the reference to the current option.
+ */
+ public BlockBasedTableConfig setFormatVersion(
+ final int formatVersion) {
+ assert(formatVersion >= 0 && formatVersion <= 4);
+ this.formatVersion = formatVersion;
+ return this;
+ }
+
+ /**
+ * Determine if index compression is enabled.
+ *
+ * See {@link #setEnableIndexCompression(boolean)}.
+ *
+ * @return true if index compression is enabled, false otherwise
+ */
+ public boolean enableIndexCompression() {
+ return enableIndexCompression;
+ }
+
+ /**
+ * Store index blocks on disk in compressed format.
+ *
+ * Changing this option to false will avoid the overhead of decompression
+ * if index blocks are evicted and read back.
+ *
+ * @param enableIndexCompression true to enable index compression,
+ * false to disable
+ *
+ * @return the reference to the current option.
+ */
+ public BlockBasedTableConfig setEnableIndexCompression(
+ final boolean enableIndexCompression) {
+ this.enableIndexCompression = enableIndexCompression;
+ return this;
+ }
+
+ /**
+ * Determines whether data blocks are aligned on the lesser of page size
+ * and block size.
+ *
+ * @return true if data blocks are aligned on the lesser of page size
+ * and block size.
+ */
+ public boolean blockAlign() {
+ return blockAlign;
+ }
+
+ /**
+ * Set whether data blocks should be aligned on the lesser of page size
+ * and block size.
+ *
+ * @param blockAlign true to align data blocks on the lesser of page size
+ * and block size.
+ *
+ * @return the reference to the current option.
+ */
+ public BlockBasedTableConfig setBlockAlign(final boolean blockAlign) {
+ this.blockAlign = blockAlign;
+ return this;
+ }
+
+
+ /**
+ * Get the size of the cache in bytes that will be used by RocksDB.
+ *
+ * @return block cache size in bytes
+ */
+ @Deprecated
+ public long blockCacheSize() {
+ return blockCacheSize;
+ }
+
+ /**
+ * Set the size of the cache in bytes that will be used by RocksDB.
+ * If cacheSize is negative, then cache will not be used.
+ * DEFAULT: 8M
+ *
+ * @param blockCacheSize block cache size in bytes
+ * @return the reference to the current config.
+ *
+ * @deprecated Use {@link #setBlockCache(Cache)}.
+ */
+ @Deprecated
+ public BlockBasedTableConfig setBlockCacheSize(final long blockCacheSize) {
+ this.blockCacheSize = blockCacheSize;
+ return this;
+ }
+
+ /**
+ * Returns the number of shard bits used in the block cache.
+ * The resulting number of shards would be 2 ^ (returned value).
+ * Any negative number means use default settings.
+ *
+ * @return the number of shard bits used in the block cache.
+ */
+ @Deprecated
+ public int cacheNumShardBits() {
+ return blockCacheNumShardBits;
+ }
+
+ /**
+ * Controls the number of shards for the block cache.
+ * This is applied only if cacheSize is set to non-negative.
+ *
+ * @param blockCacheNumShardBits the number of shard bits. The resulting
+ * number of shards would be 2 ^ numShardBits. Any negative
+ * number means use default settings."
+ * @return the reference to the current option.
+ *
+ * @deprecated Use {@link #setBlockCache(Cache)}.
+ */
+ @Deprecated
+ public BlockBasedTableConfig setCacheNumShardBits(
+ final int blockCacheNumShardBits) {
+ this.blockCacheNumShardBits = blockCacheNumShardBits;
+ return this;
+ }
+
+ /**
+ * Size of compressed block cache. If 0, then block_cache_compressed is set
+ * to null.
+ *
+ * @return size of compressed block cache.
+ */
+ @Deprecated
+ public long blockCacheCompressedSize() {
+ return blockCacheCompressedSize;
+ }
+
+ /**
+ * Size of compressed block cache. If 0, then block_cache_compressed is set
+ * to null.
+ *
+ * @param blockCacheCompressedSize of compressed block cache.
+ * @return the reference to the current config.
+ *
+ * @deprecated Use {@link #setBlockCacheCompressed(Cache)}.
+ */
+ @Deprecated
+ public BlockBasedTableConfig setBlockCacheCompressedSize(
+ final long blockCacheCompressedSize) {
+ this.blockCacheCompressedSize = blockCacheCompressedSize;
+ return this;
+ }
+
+ /**
+ * Controls the number of shards for the block compressed cache.
+ * This is applied only if blockCompressedCacheSize is set to non-negative.
+ *
+ * @return numShardBits the number of shard bits. The resulting
+ * number of shards would be 2 ^ numShardBits. Any negative
+ * number means use default settings.
+ */
+ @Deprecated
+ public int blockCacheCompressedNumShardBits() {
+ return blockCacheCompressedNumShardBits;
+ }
+
+ /**
+ * Controls the number of shards for the block compressed cache.
+ * This is applied only if blockCompressedCacheSize is set to non-negative.
+ *
+ * @param blockCacheCompressedNumShardBits the number of shard bits. The resulting
+ * number of shards would be 2 ^ numShardBits. Any negative
+ * number means use default settings."
+ * @return the reference to the current option.
+ *
+ * @deprecated Use {@link #setBlockCacheCompressed(Cache)}.
+ */
+ @Deprecated
+ public BlockBasedTableConfig setBlockCacheCompressedNumShardBits(
+ final int blockCacheCompressedNumShardBits) {
+ this.blockCacheCompressedNumShardBits = blockCacheCompressedNumShardBits;
+ return this;
+ }
+
+ /**
+ * Influence the behavior when kHashSearch is used.
+ * if false, stores a precise prefix to block range mapping
+ * if true, does not store prefix and allows prefix hash collision
+ * (less memory consumption)
+ *
+ * @return if hash collisions should be allowed.
+ *
+ * @deprecated This option is now deprecated. No matter what value it
+ * is set to, it will behave as
+ * if {@link #hashIndexAllowCollision()} == true.
+ */
+ @Deprecated
+ public boolean hashIndexAllowCollision() {
+ return true;
+ }
+
+ /**
+ * Influence the behavior when kHashSearch is used.
+ * if false, stores a precise prefix to block range mapping
+ * if true, does not store prefix and allows prefix hash collision
+ * (less memory consumption)
+ *
+ * @param hashIndexAllowCollision points out if hash collisions should be allowed.
+ *
+ * @return the reference to the current config.
+ *
+ * @deprecated This option is now deprecated. No matter what value it
+ * is set to, it will behave as
+ * if {@link #hashIndexAllowCollision()} == true.
+ */
+ @Deprecated
+ public BlockBasedTableConfig setHashIndexAllowCollision(
+ final boolean hashIndexAllowCollision) {
+ // no-op
+ return this;
+ }
+
+ @Override protected long newTableFactoryHandle() {
+ final long filterPolicyHandle;
+ if (filterPolicy != null) {
+ filterPolicyHandle = filterPolicy.nativeHandle_;
+ } else {
+ filterPolicyHandle = 0;
+ }
+
+ final long blockCacheHandle;
+ if (blockCache != null) {
+ blockCacheHandle = blockCache.nativeHandle_;
+ } else {
+ blockCacheHandle = 0;
+ }
+
+ final long persistentCacheHandle;
+ if (persistentCache != null) {
+ persistentCacheHandle = persistentCache.nativeHandle_;
+ } else {
+ persistentCacheHandle = 0;
+ }
+
+ final long blockCacheCompressedHandle;
+ if (blockCacheCompressed != null) {
+ blockCacheCompressedHandle = blockCacheCompressed.nativeHandle_;
+ } else {
+ blockCacheCompressedHandle = 0;
+ }
+
+ return newTableFactoryHandle(cacheIndexAndFilterBlocks,
+ cacheIndexAndFilterBlocksWithHighPriority,
+ pinL0FilterAndIndexBlocksInCache, pinTopLevelIndexAndFilter,
+ indexType.getValue(), dataBlockIndexType.getValue(),
+ dataBlockHashTableUtilRatio, checksumType.getValue(), noBlockCache,
+ blockCacheHandle, persistentCacheHandle, blockCacheCompressedHandle,
+ blockSize, blockSizeDeviation, blockRestartInterval,
+ indexBlockRestartInterval, metadataBlockSize, partitionFilters,
+ useDeltaEncoding, filterPolicyHandle, wholeKeyFiltering,
+ verifyCompression, readAmpBytesPerBit, formatVersion,
+ enableIndexCompression, blockAlign,
+ blockCacheSize, blockCacheNumShardBits,
+ blockCacheCompressedSize, blockCacheCompressedNumShardBits);
+ }
+
+ private native long newTableFactoryHandle(
+ final boolean cacheIndexAndFilterBlocks,
+ final boolean cacheIndexAndFilterBlocksWithHighPriority,
+ final boolean pinL0FilterAndIndexBlocksInCache,
+ final boolean pinTopLevelIndexAndFilter,
+ final byte indexTypeValue,
+ final byte dataBlockIndexTypeValue,
+ final double dataBlockHashTableUtilRatio,
+ final byte checksumTypeValue,
+ final boolean noBlockCache,
+ final long blockCacheHandle,
+ final long persistentCacheHandle,
+ final long blockCacheCompressedHandle,
+ final long blockSize,
+ final int blockSizeDeviation,
+ final int blockRestartInterval,
+ final int indexBlockRestartInterval,
+ final long metadataBlockSize,
+ final boolean partitionFilters,
+ final boolean useDeltaEncoding,
+ final long filterPolicyHandle,
+ final boolean wholeKeyFiltering,
+ final boolean verifyCompression,
+ final int readAmpBytesPerBit,
+ final int formatVersion,
+ final boolean enableIndexCompression,
+ final boolean blockAlign,
+
+ @Deprecated final long blockCacheSize,
+ @Deprecated final int blockCacheNumShardBits,
+
+ @Deprecated final long blockCacheCompressedSize,
+ @Deprecated final int blockCacheCompressedNumShardBits
+ );
+
+ //TODO(AR) flushBlockPolicyFactory
+ private boolean cacheIndexAndFilterBlocks;
+ private boolean cacheIndexAndFilterBlocksWithHighPriority;
+ private boolean pinL0FilterAndIndexBlocksInCache;
+ private boolean pinTopLevelIndexAndFilter;
+ private IndexType indexType;
+ private DataBlockIndexType dataBlockIndexType;
+ private double dataBlockHashTableUtilRatio;
+ private ChecksumType checksumType;
+ private boolean noBlockCache;
+ private Cache blockCache;
+ private PersistentCache persistentCache;
+ private Cache blockCacheCompressed;
+ private long blockSize;
+ private int blockSizeDeviation;
+ private int blockRestartInterval;
+ private int indexBlockRestartInterval;
+ private long metadataBlockSize;
+ private boolean partitionFilters;
+ private boolean useDeltaEncoding;
+ private Filter filterPolicy;
+ private boolean wholeKeyFiltering;
+ private boolean verifyCompression;
+ private int readAmpBytesPerBit;
+ private int formatVersion;
+ private boolean enableIndexCompression;
+ private boolean blockAlign;
+
+ // NOTE: ONLY used if blockCache == null
+ @Deprecated private long blockCacheSize;
+ @Deprecated private int blockCacheNumShardBits;
+
+ // NOTE: ONLY used if blockCacheCompressed == null
+ @Deprecated private long blockCacheCompressedSize;
+ @Deprecated private int blockCacheCompressedNumShardBits;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/BloomFilter.java b/src/rocksdb/java/src/main/java/org/rocksdb/BloomFilter.java
new file mode 100644
index 000000000..0a119878a
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/BloomFilter.java
@@ -0,0 +1,79 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Bloom filter policy that uses a bloom filter with approximately
+ * the specified number of bits per key.
+ *
+ * <p>
+ * Note: if you are using a custom comparator that ignores some parts
+ * of the keys being compared, you must not use this {@code BloomFilter}
+ * and must provide your own FilterPolicy that also ignores the
+ * corresponding parts of the keys. For example, if the comparator
+ * ignores trailing spaces, it would be incorrect to use a
+ * FilterPolicy (like {@code BloomFilter}) that does not ignore
+ * trailing spaces in keys.</p>
+ */
+public class BloomFilter extends Filter {
+
+ private static final double DEFAULT_BITS_PER_KEY = 10.0;
+ private static final boolean DEFAULT_MODE = true;
+
+ /**
+ * BloomFilter constructor
+ *
+ * <p>
+ * Callers must delete the result after any database that is using the
+ * result has been closed.</p>
+ */
+ public BloomFilter() {
+ this(DEFAULT_BITS_PER_KEY, DEFAULT_MODE);
+ }
+
+ /**
+ * BloomFilter constructor
+ *
+ * <p>
+ * bits_per_key: bits per key in bloom filter. A good value for bits_per_key
+ * is 9.9, which yields a filter with ~ 1% false positive rate.
+ * </p>
+ * <p>
+ * Callers must delete the result after any database that is using the
+ * result has been closed.</p>
+ *
+ * @param bitsPerKey number of bits to use
+ */
+ public BloomFilter(final double bitsPerKey) {
+ this(bitsPerKey, DEFAULT_MODE);
+ }
+
+ /**
+ * BloomFilter constructor
+ *
+ * <p>
+ * bits_per_key: bits per key in bloom filter. A good value for bits_per_key
+ * is 10, which yields a filter with ~ 1% false positive rate.
+ * <p><strong>default bits_per_key</strong>: 10</p>
+ *
+ * <p>use_block_based_builder: use block based filter rather than full filter.
+ * If you want to builder full filter, it needs to be set to false.
+ * </p>
+ * <p><strong>default mode: block based filter</strong></p>
+ * <p>
+ * Callers must delete the result after any database that is using the
+ * result has been closed.</p>
+ *
+ * @param bitsPerKey number of bits to use
+ * @param useBlockBasedMode use block based mode or full filter mode
+ */
+ public BloomFilter(final double bitsPerKey, final boolean useBlockBasedMode) {
+ super(createNewBloomFilter(bitsPerKey, useBlockBasedMode));
+ }
+
+ private native static long createNewBloomFilter(final double bitsKeyKey,
+ final boolean useBlockBasedMode);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/BuiltinComparator.java b/src/rocksdb/java/src/main/java/org/rocksdb/BuiltinComparator.java
new file mode 100644
index 000000000..2c89bf218
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/BuiltinComparator.java
@@ -0,0 +1,20 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Builtin RocksDB comparators
+ *
+ * <ol>
+ * <li>BYTEWISE_COMPARATOR - Sorts all keys in ascending bytewise
+ * order.</li>
+ * <li>REVERSE_BYTEWISE_COMPARATOR - Sorts all keys in descending bytewise
+ * order</li>
+ * </ol>
+ */
+public enum BuiltinComparator {
+ BYTEWISE_COMPARATOR, REVERSE_BYTEWISE_COMPARATOR
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Cache.java b/src/rocksdb/java/src/main/java/org/rocksdb/Cache.java
new file mode 100644
index 000000000..3952e1d10
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/Cache.java
@@ -0,0 +1,13 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+
+public abstract class Cache extends RocksObject {
+ protected Cache(final long nativeHandle) {
+ super(nativeHandle);
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java b/src/rocksdb/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java
new file mode 100644
index 000000000..6c87cc188
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java
@@ -0,0 +1,19 @@
+// Copyright (c) 2017-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Just a Java wrapper around CassandraCompactionFilter implemented in C++
+ */
+public class CassandraCompactionFilter
+ extends AbstractCompactionFilter<Slice> {
+ public CassandraCompactionFilter(boolean purgeTtlOnExpiration, int gcGracePeriodInSeconds) {
+ super(createNewCassandraCompactionFilter0(purgeTtlOnExpiration, gcGracePeriodInSeconds));
+ }
+
+ private native static long createNewCassandraCompactionFilter0(
+ boolean purgeTtlOnExpiration, int gcGracePeriodInSeconds);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java b/src/rocksdb/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java
new file mode 100644
index 000000000..4b0c71ba5
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java
@@ -0,0 +1,25 @@
+// Copyright (c) 2017-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * CassandraValueMergeOperator is a merge operator that merges two cassandra wide column
+ * values.
+ */
+public class CassandraValueMergeOperator extends MergeOperator {
+ public CassandraValueMergeOperator(int gcGracePeriodInSeconds) {
+ super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, 0));
+ }
+
+ public CassandraValueMergeOperator(int gcGracePeriodInSeconds, int operandsLimit) {
+ super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, operandsLimit));
+ }
+
+ private native static long newSharedCassandraValueMergeOperator(
+ int gcGracePeriodInSeconds, int limit);
+
+ @Override protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Checkpoint.java b/src/rocksdb/java/src/main/java/org/rocksdb/Checkpoint.java
new file mode 100644
index 000000000..000969932
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/Checkpoint.java
@@ -0,0 +1,66 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Provides Checkpoint functionality. Checkpoints
+ * provide persistent snapshots of RocksDB databases.
+ */
+public class Checkpoint extends RocksObject {
+
+ /**
+ * Creates a Checkpoint object to be used for creating open-able
+ * snapshots.
+ *
+ * @param db {@link RocksDB} instance.
+ * @return a Checkpoint instance.
+ *
+ * @throws java.lang.IllegalArgumentException if {@link RocksDB}
+ * instance is null.
+ * @throws java.lang.IllegalStateException if {@link RocksDB}
+ * instance is not initialized.
+ */
+ public static Checkpoint create(final RocksDB db) {
+ if (db == null) {
+ throw new IllegalArgumentException(
+ "RocksDB instance shall not be null.");
+ } else if (!db.isOwningHandle()) {
+ throw new IllegalStateException(
+ "RocksDB instance must be initialized.");
+ }
+ Checkpoint checkpoint = new Checkpoint(db);
+ return checkpoint;
+ }
+
+ /**
+ * <p>Builds an open-able snapshot of RocksDB on the same disk, which
+ * accepts an output directory on the same disk, and under the directory
+ * (1) hard-linked SST files pointing to existing live SST files
+ * (2) a copied manifest files and other files</p>
+ *
+ * @param checkpointPath path to the folder where the snapshot is going
+ * to be stored.
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ public void createCheckpoint(final String checkpointPath)
+ throws RocksDBException {
+ createCheckpoint(nativeHandle_, checkpointPath);
+ }
+
+ private Checkpoint(final RocksDB db) {
+ super(newCheckpoint(db.nativeHandle_));
+ this.db_ = db;
+ }
+
+ private final RocksDB db_;
+
+ private static native long newCheckpoint(long dbHandle);
+ @Override protected final native void disposeInternal(final long handle);
+
+ private native void createCheckpoint(long handle, String checkpointPath)
+ throws RocksDBException;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ChecksumType.java b/src/rocksdb/java/src/main/java/org/rocksdb/ChecksumType.java
new file mode 100644
index 000000000..def9f2e9f
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/ChecksumType.java
@@ -0,0 +1,39 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Checksum types used in conjunction with BlockBasedTable.
+ */
+public enum ChecksumType {
+ /**
+ * Not implemented yet.
+ */
+ kNoChecksum((byte) 0),
+ /**
+ * CRC32 Checksum
+ */
+ kCRC32c((byte) 1),
+ /**
+ * XX Hash
+ */
+ kxxHash((byte) 2);
+
+ /**
+ * Returns the byte value of the enumerations value
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value_;
+ }
+
+ private ChecksumType(byte value) {
+ value_ = value;
+ }
+
+ private final byte value_;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ClockCache.java b/src/rocksdb/java/src/main/java/org/rocksdb/ClockCache.java
new file mode 100644
index 000000000..a66dc0e8a
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/ClockCache.java
@@ -0,0 +1,59 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Similar to {@link LRUCache}, but based on the CLOCK algorithm with
+ * better concurrent performance in some cases
+ */
+public class ClockCache extends Cache {
+
+ /**
+ * Create a new cache with a fixed size capacity.
+ *
+ * @param capacity The fixed size capacity of the cache
+ */
+ public ClockCache(final long capacity) {
+ super(newClockCache(capacity, -1, false));
+ }
+
+ /**
+ * Create a new cache with a fixed size capacity. The cache is sharded
+ * to 2^numShardBits shards, by hash of the key. The total capacity
+ * is divided and evenly assigned to each shard.
+ * numShardBits = -1 means it is automatically determined: every shard
+ * will be at least 512KB and number of shard bits will not exceed 6.
+ *
+ * @param capacity The fixed size capacity of the cache
+ * @param numShardBits The cache is sharded to 2^numShardBits shards,
+ * by hash of the key
+ */
+ public ClockCache(final long capacity, final int numShardBits) {
+ super(newClockCache(capacity, numShardBits, false));
+ }
+
+ /**
+ * Create a new cache with a fixed size capacity. The cache is sharded
+ * to 2^numShardBits shards, by hash of the key. The total capacity
+ * is divided and evenly assigned to each shard. If strictCapacityLimit
+ * is set, insert to the cache will fail when cache is full.
+ * numShardBits = -1 means it is automatically determined: every shard
+ * will be at least 512KB and number of shard bits will not exceed 6.
+ *
+ * @param capacity The fixed size capacity of the cache
+ * @param numShardBits The cache is sharded to 2^numShardBits shards,
+ * by hash of the key
+ * @param strictCapacityLimit insert to the cache will fail when cache is full
+ */
+ public ClockCache(final long capacity, final int numShardBits,
+ final boolean strictCapacityLimit) {
+ super(newClockCache(capacity, numShardBits, strictCapacityLimit));
+ }
+
+ private native static long newClockCache(final long capacity,
+ final int numShardBits, final boolean strictCapacityLimit);
+ @Override protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java
new file mode 100644
index 000000000..8bb570e5d
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java
@@ -0,0 +1,109 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.Arrays;
+
+/**
+ * <p>Describes a column family with a
+ * name and respective Options.</p>
+ */
+public class ColumnFamilyDescriptor {
+
+ /**
+ * <p>Creates a new Column Family using a name and default
+ * options,</p>
+ *
+ * @param columnFamilyName name of column family.
+ * @since 3.10.0
+ */
+ public ColumnFamilyDescriptor(final byte[] columnFamilyName) {
+ this(columnFamilyName, new ColumnFamilyOptions());
+ }
+
+ /**
+ * <p>Creates a new Column Family using a name and custom
+ * options.</p>
+ *
+ * @param columnFamilyName name of column family.
+ * @param columnFamilyOptions options to be used with
+ * column family.
+ * @since 3.10.0
+ */
+ public ColumnFamilyDescriptor(final byte[] columnFamilyName,
+ final ColumnFamilyOptions columnFamilyOptions) {
+ columnFamilyName_ = columnFamilyName;
+ columnFamilyOptions_ = columnFamilyOptions;
+ }
+
+ /**
+ * Retrieve name of column family.
+ *
+ * @return column family name.
+ * @since 3.10.0
+ */
+ public byte[] getName() {
+ return columnFamilyName_;
+ }
+
+ /**
+ * Retrieve name of column family.
+ *
+ * @return column family name.
+ * @since 3.10.0
+ *
+ * @deprecated Use {@link #getName()} instead.
+ */
+ @Deprecated
+ public byte[] columnFamilyName() {
+ return getName();
+ }
+
+ /**
+ * Retrieve assigned options instance.
+ *
+ * @return Options instance assigned to this instance.
+ */
+ public ColumnFamilyOptions getOptions() {
+ return columnFamilyOptions_;
+ }
+
+ /**
+ * Retrieve assigned options instance.
+ *
+ * @return Options instance assigned to this instance.
+ *
+ * @deprecated Use {@link #getOptions()} instead.
+ */
+ @Deprecated
+ public ColumnFamilyOptions columnFamilyOptions() {
+ return getOptions();
+ }
+
+ @Override
+ public boolean equals(final Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ final ColumnFamilyDescriptor that = (ColumnFamilyDescriptor) o;
+ return Arrays.equals(columnFamilyName_, that.columnFamilyName_)
+ && columnFamilyOptions_.nativeHandle_ == that.columnFamilyOptions_.nativeHandle_;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = (int) (columnFamilyOptions_.nativeHandle_ ^ (columnFamilyOptions_.nativeHandle_ >>> 32));
+ result = 31 * result + Arrays.hashCode(columnFamilyName_);
+ return result;
+ }
+
+ private final byte[] columnFamilyName_;
+ private final ColumnFamilyOptions columnFamilyOptions_;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java
new file mode 100644
index 000000000..9cda136b7
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java
@@ -0,0 +1,115 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.Arrays;
+import java.util.Objects;
+
+/**
+ * ColumnFamilyHandle class to hold handles to underlying rocksdb
+ * ColumnFamily Pointers.
+ */
+public class ColumnFamilyHandle extends RocksObject {
+ ColumnFamilyHandle(final RocksDB rocksDB,
+ final long nativeHandle) {
+ super(nativeHandle);
+ // rocksDB must point to a valid RocksDB instance;
+ assert(rocksDB != null);
+ // ColumnFamilyHandle must hold a reference to the related RocksDB instance
+ // to guarantee that while a GC cycle starts ColumnFamilyHandle instances
+ // are freed prior to RocksDB instances.
+ this.rocksDB_ = rocksDB;
+ }
+
+ /**
+ * Gets the name of the Column Family.
+ *
+ * @return The name of the Column Family.
+ *
+ * @throws RocksDBException if an error occurs whilst retrieving the name.
+ */
+ public byte[] getName() throws RocksDBException {
+ return getName(nativeHandle_);
+ }
+
+ /**
+ * Gets the ID of the Column Family.
+ *
+ * @return the ID of the Column Family.
+ */
+ public int getID() {
+ return getID(nativeHandle_);
+ }
+
+ /**
+ * Gets the up-to-date descriptor of the column family
+ * associated with this handle. Since it fills "*desc" with the up-to-date
+ * information, this call might internally lock and release DB mutex to
+ * access the up-to-date CF options. In addition, all the pointer-typed
+ * options cannot be referenced any longer than the original options exist.
+ *
+ * Note that this function is not supported in RocksDBLite.
+ *
+ * @return the up-to-date descriptor.
+ *
+ * @throws RocksDBException if an error occurs whilst retrieving the
+ * descriptor.
+ */
+ public ColumnFamilyDescriptor getDescriptor() throws RocksDBException {
+ assert(isOwningHandle());
+ return getDescriptor(nativeHandle_);
+ }
+
+ @Override
+ public boolean equals(final Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ final ColumnFamilyHandle that = (ColumnFamilyHandle) o;
+ try {
+ return rocksDB_.nativeHandle_ == that.rocksDB_.nativeHandle_ &&
+ getID() == that.getID() &&
+ Arrays.equals(getName(), that.getName());
+ } catch (RocksDBException e) {
+ throw new RuntimeException("Cannot compare column family handles", e);
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ try {
+ return Objects.hash(getName(), getID(), rocksDB_.nativeHandle_);
+ } catch (RocksDBException e) {
+ throw new RuntimeException("Cannot calculate hash code of column family handle", e);
+ }
+ }
+
+ /**
+ * <p>Deletes underlying C++ iterator pointer.</p>
+ *
+ * <p>Note: the underlying handle can only be safely deleted if the RocksDB
+ * instance related to a certain ColumnFamilyHandle is still valid and
+ * initialized. Therefore {@code disposeInternal()} checks if the RocksDB is
+ * initialized before freeing the native handle.</p>
+ */
+ @Override
+ protected void disposeInternal() {
+ if(rocksDB_.isOwningHandle()) {
+ disposeInternal(nativeHandle_);
+ }
+ }
+
+ private native byte[] getName(final long handle) throws RocksDBException;
+ private native int getID(final long handle);
+ private native ColumnFamilyDescriptor getDescriptor(final long handle) throws RocksDBException;
+ @Override protected final native void disposeInternal(final long handle);
+
+ private final RocksDB rocksDB_;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyMetaData.java b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyMetaData.java
new file mode 100644
index 000000000..191904017
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyMetaData.java
@@ -0,0 +1,70 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * The metadata that describes a column family.
+ */
+public class ColumnFamilyMetaData {
+ private final long size;
+ private final long fileCount;
+ private final byte[] name;
+ private final LevelMetaData[] levels;
+
+ /**
+ * Called from JNI C++
+ */
+ private ColumnFamilyMetaData(
+ final long size,
+ final long fileCount,
+ final byte[] name,
+ final LevelMetaData[] levels) {
+ this.size = size;
+ this.fileCount = fileCount;
+ this.name = name;
+ this.levels = levels;
+ }
+
+ /**
+ * The size of this column family in bytes, which is equal to the sum of
+ * the file size of its {@link #levels()}.
+ *
+ * @return the size of this column family
+ */
+ public long size() {
+ return size;
+ }
+
+ /**
+ * The number of files in this column family.
+ *
+ * @return the number of files
+ */
+ public long fileCount() {
+ return fileCount;
+ }
+
+ /**
+ * The name of the column family.
+ *
+ * @return the name
+ */
+ public byte[] name() {
+ return name;
+ }
+
+ /**
+ * The metadata of all levels in this column family.
+ *
+ * @return the levels metadata
+ */
+ public List<LevelMetaData> levels() {
+ return Arrays.asList(levels);
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java
new file mode 100644
index 000000000..07f07b3ec
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java
@@ -0,0 +1,1001 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+
+/**
+ * ColumnFamilyOptions to control the behavior of a database. It will be used
+ * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
+ *
+ * If {@link #dispose()} function is not called, then it will be GC'd
+ * automatically and native resources will be released as part of the process.
+ */
+public class ColumnFamilyOptions extends RocksObject
+ implements ColumnFamilyOptionsInterface<ColumnFamilyOptions>,
+ MutableColumnFamilyOptionsInterface<ColumnFamilyOptions> {
+ static {
+ RocksDB.loadLibrary();
+ }
+
+ /**
+ * Construct ColumnFamilyOptions.
+ *
+ * This constructor will create (by allocating a block of memory)
+ * an {@code rocksdb::ColumnFamilyOptions} in the c++ side.
+ */
+ public ColumnFamilyOptions() {
+ super(newColumnFamilyOptions());
+ }
+
+ /**
+ * Copy constructor for ColumnFamilyOptions.
+ *
+ * NOTE: This does a shallow copy, which means comparator, merge_operator, compaction_filter,
+ * compaction_filter_factory and other pointers will be cloned!
+ *
+ * @param other The ColumnFamilyOptions to copy.
+ */
+ public ColumnFamilyOptions(ColumnFamilyOptions other) {
+ super(copyColumnFamilyOptions(other.nativeHandle_));
+ this.memTableConfig_ = other.memTableConfig_;
+ this.tableFormatConfig_ = other.tableFormatConfig_;
+ this.comparator_ = other.comparator_;
+ this.compactionFilter_ = other.compactionFilter_;
+ this.compactionFilterFactory_ = other.compactionFilterFactory_;
+ this.compactionOptionsUniversal_ = other.compactionOptionsUniversal_;
+ this.compactionOptionsFIFO_ = other.compactionOptionsFIFO_;
+ this.bottommostCompressionOptions_ = other.bottommostCompressionOptions_;
+ this.compressionOptions_ = other.compressionOptions_;
+ }
+
+ /**
+ * Constructor from Options
+ *
+ * @param options The options.
+ */
+ public ColumnFamilyOptions(final Options options) {
+ super(newColumnFamilyOptionsFromOptions(options.nativeHandle_));
+ }
+
+ /**
+ * <p>Constructor to be used by
+ * {@link #getColumnFamilyOptionsFromProps(java.util.Properties)},
+ * {@link ColumnFamilyDescriptor#getOptions()}
+ * and also called via JNI.</p>
+ *
+ * @param handle native handle to ColumnFamilyOptions instance.
+ */
+ ColumnFamilyOptions(final long handle) {
+ super(handle);
+ }
+
+ /**
+ * <p>Method to get a options instance by using pre-configured
+ * property values. If one or many values are undefined in
+ * the context of RocksDB the method will return a null
+ * value.</p>
+ *
+ * <p><strong>Note</strong>: Property keys can be derived from
+ * getter methods within the options class. Example: the method
+ * {@code writeBufferSize()} has a property key:
+ * {@code write_buffer_size}.</p>
+ *
+ * @param properties {@link java.util.Properties} instance.
+ *
+ * @return {@link org.rocksdb.ColumnFamilyOptions instance}
+ * or null.
+ *
+ * @throws java.lang.IllegalArgumentException if null or empty
+ * {@link Properties} instance is passed to the method call.
+ */
+ public static ColumnFamilyOptions getColumnFamilyOptionsFromProps(
+ final Properties properties) {
+ if (properties == null || properties.size() == 0) {
+ throw new IllegalArgumentException(
+ "Properties value must contain at least one value.");
+ }
+ ColumnFamilyOptions columnFamilyOptions = null;
+ StringBuilder stringBuilder = new StringBuilder();
+ for (final String name : properties.stringPropertyNames()){
+ stringBuilder.append(name);
+ stringBuilder.append("=");
+ stringBuilder.append(properties.getProperty(name));
+ stringBuilder.append(";");
+ }
+ long handle = getColumnFamilyOptionsFromProps(
+ stringBuilder.toString());
+ if (handle != 0){
+ columnFamilyOptions = new ColumnFamilyOptions(handle);
+ }
+ return columnFamilyOptions;
+ }
+
+ @Override
+ public ColumnFamilyOptions optimizeForSmallDb() {
+ optimizeForSmallDb(nativeHandle_);
+ return this;
+ }
+
+ @Override
+ public ColumnFamilyOptions optimizeForPointLookup(
+ final long blockCacheSizeMb) {
+ optimizeForPointLookup(nativeHandle_,
+ blockCacheSizeMb);
+ return this;
+ }
+
+ @Override
+ public ColumnFamilyOptions optimizeLevelStyleCompaction() {
+ optimizeLevelStyleCompaction(nativeHandle_,
+ DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET);
+ return this;
+ }
+
+ @Override
+ public ColumnFamilyOptions optimizeLevelStyleCompaction(
+ final long memtableMemoryBudget) {
+ optimizeLevelStyleCompaction(nativeHandle_,
+ memtableMemoryBudget);
+ return this;
+ }
+
+ @Override
+ public ColumnFamilyOptions optimizeUniversalStyleCompaction() {
+ optimizeUniversalStyleCompaction(nativeHandle_,
+ DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET);
+ return this;
+ }
+
+ @Override
+ public ColumnFamilyOptions optimizeUniversalStyleCompaction(
+ final long memtableMemoryBudget) {
+ optimizeUniversalStyleCompaction(nativeHandle_,
+ memtableMemoryBudget);
+ return this;
+ }
+
+ @Override
+ public ColumnFamilyOptions setComparator(
+ final BuiltinComparator builtinComparator) {
+ assert(isOwningHandle());
+ setComparatorHandle(nativeHandle_, builtinComparator.ordinal());
+ return this;
+ }
+
+ @Override
+ public ColumnFamilyOptions setComparator(
+ final AbstractComparator comparator) {
+ assert (isOwningHandle());
+ setComparatorHandle(nativeHandle_, comparator.nativeHandle_,
+ comparator.getComparatorType().getValue());
+ comparator_ = comparator;
+ return this;
+ }
+
+ @Override
+ public ColumnFamilyOptions setMergeOperatorName(final String name) {
+ assert (isOwningHandle());
+ if (name == null) {
+ throw new IllegalArgumentException(
+ "Merge operator name must not be null.");
+ }
+ setMergeOperatorName(nativeHandle_, name);
+ return this;
+ }
+
+ @Override
+ public ColumnFamilyOptions setMergeOperator(
+ final MergeOperator mergeOperator) {
+ setMergeOperator(nativeHandle_, mergeOperator.nativeHandle_);
+ return this;
+ }
+
+ @Override
+ public ColumnFamilyOptions setCompactionFilter(
+ final AbstractCompactionFilter<? extends AbstractSlice<?>>
+ compactionFilter) {
+ setCompactionFilterHandle(nativeHandle_, compactionFilter.nativeHandle_);
+ compactionFilter_ = compactionFilter;
+ return this;
+ }
+
+ @Override
+ public AbstractCompactionFilter<? extends AbstractSlice<?>> compactionFilter() {
+ assert (isOwningHandle());
+ return compactionFilter_;
+ }
+
+ @Override
+ public ColumnFamilyOptions setCompactionFilterFactory(final AbstractCompactionFilterFactory<? extends AbstractCompactionFilter<?>> compactionFilterFactory) {
+ assert (isOwningHandle());
+ setCompactionFilterFactoryHandle(nativeHandle_, compactionFilterFactory.nativeHandle_);
+ compactionFilterFactory_ = compactionFilterFactory;
+ return this;
+ }
+
+ @Override
+ public AbstractCompactionFilterFactory<? extends AbstractCompactionFilter<?>> compactionFilterFactory() {
+ assert (isOwningHandle());
+ return compactionFilterFactory_;
+ }
+
+ @Override
+ public ColumnFamilyOptions setWriteBufferSize(final long writeBufferSize) {
+ assert(isOwningHandle());
+ setWriteBufferSize(nativeHandle_, writeBufferSize);
+ return this;
+ }
+
+ @Override
+ public long writeBufferSize() {
+ assert(isOwningHandle());
+ return writeBufferSize(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setMaxWriteBufferNumber(
+ final int maxWriteBufferNumber) {
+ assert(isOwningHandle());
+ setMaxWriteBufferNumber(nativeHandle_, maxWriteBufferNumber);
+ return this;
+ }
+
+ @Override
+ public int maxWriteBufferNumber() {
+ assert(isOwningHandle());
+ return maxWriteBufferNumber(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setMinWriteBufferNumberToMerge(
+ final int minWriteBufferNumberToMerge) {
+ setMinWriteBufferNumberToMerge(nativeHandle_, minWriteBufferNumberToMerge);
+ return this;
+ }
+
+ @Override
+ public int minWriteBufferNumberToMerge() {
+ return minWriteBufferNumberToMerge(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions useFixedLengthPrefixExtractor(final int n) {
+ assert(isOwningHandle());
+ useFixedLengthPrefixExtractor(nativeHandle_, n);
+ return this;
+ }
+
+ @Override
+ public ColumnFamilyOptions useCappedPrefixExtractor(final int n) {
+ assert(isOwningHandle());
+ useCappedPrefixExtractor(nativeHandle_, n);
+ return this;
+ }
+
+ @Override
+ public ColumnFamilyOptions setCompressionType(
+ final CompressionType compressionType) {
+ setCompressionType(nativeHandle_, compressionType.getValue());
+ return this;
+ }
+
+ @Override
+ public CompressionType compressionType() {
+ return CompressionType.getCompressionType(compressionType(nativeHandle_));
+ }
+
+ @Override
+ public ColumnFamilyOptions setCompressionPerLevel(
+ final List<CompressionType> compressionLevels) {
+ final byte[] byteCompressionTypes = new byte[
+ compressionLevels.size()];
+ for (int i = 0; i < compressionLevels.size(); i++) {
+ byteCompressionTypes[i] = compressionLevels.get(i).getValue();
+ }
+ setCompressionPerLevel(nativeHandle_, byteCompressionTypes);
+ return this;
+ }
+
+ @Override
+ public List<CompressionType> compressionPerLevel() {
+ final byte[] byteCompressionTypes =
+ compressionPerLevel(nativeHandle_);
+ final List<CompressionType> compressionLevels = new ArrayList<>();
+ for (final Byte byteCompressionType : byteCompressionTypes) {
+ compressionLevels.add(CompressionType.getCompressionType(
+ byteCompressionType));
+ }
+ return compressionLevels;
+ }
+
+ @Override
+ public ColumnFamilyOptions setBottommostCompressionType(
+ final CompressionType bottommostCompressionType) {
+ setBottommostCompressionType(nativeHandle_,
+ bottommostCompressionType.getValue());
+ return this;
+ }
+
+ @Override
+ public CompressionType bottommostCompressionType() {
+ return CompressionType.getCompressionType(
+ bottommostCompressionType(nativeHandle_));
+ }
+
+ @Override
+ public ColumnFamilyOptions setBottommostCompressionOptions(
+ final CompressionOptions bottommostCompressionOptions) {
+ setBottommostCompressionOptions(nativeHandle_,
+ bottommostCompressionOptions.nativeHandle_);
+ this.bottommostCompressionOptions_ = bottommostCompressionOptions;
+ return this;
+ }
+
+ @Override
+ public CompressionOptions bottommostCompressionOptions() {
+ return this.bottommostCompressionOptions_;
+ }
+
+ @Override
+ public ColumnFamilyOptions setCompressionOptions(
+ final CompressionOptions compressionOptions) {
+ setCompressionOptions(nativeHandle_, compressionOptions.nativeHandle_);
+ this.compressionOptions_ = compressionOptions;
+ return this;
+ }
+
+ @Override
+ public CompressionOptions compressionOptions() {
+ return this.compressionOptions_;
+ }
+
+ @Override
+ public ColumnFamilyOptions setNumLevels(final int numLevels) {
+ setNumLevels(nativeHandle_, numLevels);
+ return this;
+ }
+
+ @Override
+ public int numLevels() {
+ return numLevels(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setLevelZeroFileNumCompactionTrigger(
+ final int numFiles) {
+ setLevelZeroFileNumCompactionTrigger(
+ nativeHandle_, numFiles);
+ return this;
+ }
+
+ @Override
+ public int levelZeroFileNumCompactionTrigger() {
+ return levelZeroFileNumCompactionTrigger(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setLevelZeroSlowdownWritesTrigger(
+ final int numFiles) {
+ setLevelZeroSlowdownWritesTrigger(nativeHandle_, numFiles);
+ return this;
+ }
+
+ @Override
+ public int levelZeroSlowdownWritesTrigger() {
+ return levelZeroSlowdownWritesTrigger(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setLevelZeroStopWritesTrigger(final int numFiles) {
+ setLevelZeroStopWritesTrigger(nativeHandle_, numFiles);
+ return this;
+ }
+
+ @Override
+ public int levelZeroStopWritesTrigger() {
+ return levelZeroStopWritesTrigger(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setTargetFileSizeBase(
+ final long targetFileSizeBase) {
+ setTargetFileSizeBase(nativeHandle_, targetFileSizeBase);
+ return this;
+ }
+
+ @Override
+ public long targetFileSizeBase() {
+ return targetFileSizeBase(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setTargetFileSizeMultiplier(
+ final int multiplier) {
+ setTargetFileSizeMultiplier(nativeHandle_, multiplier);
+ return this;
+ }
+
+ @Override
+ public int targetFileSizeMultiplier() {
+ return targetFileSizeMultiplier(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setMaxBytesForLevelBase(
+ final long maxBytesForLevelBase) {
+ setMaxBytesForLevelBase(nativeHandle_, maxBytesForLevelBase);
+ return this;
+ }
+
+ @Override
+ public long maxBytesForLevelBase() {
+ return maxBytesForLevelBase(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setLevelCompactionDynamicLevelBytes(
+ final boolean enableLevelCompactionDynamicLevelBytes) {
+ setLevelCompactionDynamicLevelBytes(nativeHandle_,
+ enableLevelCompactionDynamicLevelBytes);
+ return this;
+ }
+
+ @Override
+ public boolean levelCompactionDynamicLevelBytes() {
+ return levelCompactionDynamicLevelBytes(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setMaxBytesForLevelMultiplier(final double multiplier) {
+ setMaxBytesForLevelMultiplier(nativeHandle_, multiplier);
+ return this;
+ }
+
+ @Override
+ public double maxBytesForLevelMultiplier() {
+ return maxBytesForLevelMultiplier(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setMaxCompactionBytes(final long maxCompactionBytes) {
+ setMaxCompactionBytes(nativeHandle_, maxCompactionBytes);
+ return this;
+ }
+
+ @Override
+ public long maxCompactionBytes() {
+ return maxCompactionBytes(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setArenaBlockSize(
+ final long arenaBlockSize) {
+ setArenaBlockSize(nativeHandle_, arenaBlockSize);
+ return this;
+ }
+
+ @Override
+ public long arenaBlockSize() {
+ return arenaBlockSize(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setDisableAutoCompactions(
+ final boolean disableAutoCompactions) {
+ setDisableAutoCompactions(nativeHandle_, disableAutoCompactions);
+ return this;
+ }
+
+ @Override
+ public boolean disableAutoCompactions() {
+ return disableAutoCompactions(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setCompactionStyle(
+ final CompactionStyle compactionStyle) {
+ setCompactionStyle(nativeHandle_, compactionStyle.getValue());
+ return this;
+ }
+
+ @Override
+ public CompactionStyle compactionStyle() {
+ return CompactionStyle.fromValue(compactionStyle(nativeHandle_));
+ }
+
+ @Override
+ public ColumnFamilyOptions setMaxTableFilesSizeFIFO(
+ final long maxTableFilesSize) {
+ assert(maxTableFilesSize > 0); // unsigned native type
+ assert(isOwningHandle());
+ setMaxTableFilesSizeFIFO(nativeHandle_, maxTableFilesSize);
+ return this;
+ }
+
+ @Override
+ public long maxTableFilesSizeFIFO() {
+ return maxTableFilesSizeFIFO(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setMaxSequentialSkipInIterations(
+ final long maxSequentialSkipInIterations) {
+ setMaxSequentialSkipInIterations(nativeHandle_,
+ maxSequentialSkipInIterations);
+ return this;
+ }
+
+ @Override
+ public long maxSequentialSkipInIterations() {
+ return maxSequentialSkipInIterations(nativeHandle_);
+ }
+
+ @Override
+ public MemTableConfig memTableConfig() {
+ return this.memTableConfig_;
+ }
+
+ @Override
+ public ColumnFamilyOptions setMemTableConfig(
+ final MemTableConfig memTableConfig) {
+ setMemTableFactory(
+ nativeHandle_, memTableConfig.newMemTableFactoryHandle());
+ this.memTableConfig_ = memTableConfig;
+ return this;
+ }
+
+ @Override
+ public String memTableFactoryName() {
+ assert(isOwningHandle());
+ return memTableFactoryName(nativeHandle_);
+ }
+
+ @Override
+ public TableFormatConfig tableFormatConfig() {
+ return this.tableFormatConfig_;
+ }
+
+ @Override
+ public ColumnFamilyOptions setTableFormatConfig(
+ final TableFormatConfig tableFormatConfig) {
+ setTableFactory(nativeHandle_, tableFormatConfig.newTableFactoryHandle());
+ this.tableFormatConfig_ = tableFormatConfig;
+ return this;
+ }
+
+ @Override
+ public String tableFactoryName() {
+ assert(isOwningHandle());
+ return tableFactoryName(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setInplaceUpdateSupport(
+ final boolean inplaceUpdateSupport) {
+ setInplaceUpdateSupport(nativeHandle_, inplaceUpdateSupport);
+ return this;
+ }
+
+ @Override
+ public boolean inplaceUpdateSupport() {
+ return inplaceUpdateSupport(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setInplaceUpdateNumLocks(
+ final long inplaceUpdateNumLocks) {
+ setInplaceUpdateNumLocks(nativeHandle_, inplaceUpdateNumLocks);
+ return this;
+ }
+
+ @Override
+ public long inplaceUpdateNumLocks() {
+ return inplaceUpdateNumLocks(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setMemtablePrefixBloomSizeRatio(
+ final double memtablePrefixBloomSizeRatio) {
+ setMemtablePrefixBloomSizeRatio(nativeHandle_, memtablePrefixBloomSizeRatio);
+ return this;
+ }
+
+ @Override
+ public double memtablePrefixBloomSizeRatio() {
+ return memtablePrefixBloomSizeRatio(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setBloomLocality(int bloomLocality) {
+ setBloomLocality(nativeHandle_, bloomLocality);
+ return this;
+ }
+
+ @Override
+ public int bloomLocality() {
+ return bloomLocality(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setMaxSuccessiveMerges(
+ final long maxSuccessiveMerges) {
+ setMaxSuccessiveMerges(nativeHandle_, maxSuccessiveMerges);
+ return this;
+ }
+
+ @Override
+ public long maxSuccessiveMerges() {
+ return maxSuccessiveMerges(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setOptimizeFiltersForHits(
+ final boolean optimizeFiltersForHits) {
+ setOptimizeFiltersForHits(nativeHandle_, optimizeFiltersForHits);
+ return this;
+ }
+
+ @Override
+ public boolean optimizeFiltersForHits() {
+ return optimizeFiltersForHits(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions
+ setMemtableHugePageSize(
+ long memtableHugePageSize) {
+ setMemtableHugePageSize(nativeHandle_,
+ memtableHugePageSize);
+ return this;
+ }
+
+ @Override
+ public long memtableHugePageSize() {
+ return memtableHugePageSize(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setSoftPendingCompactionBytesLimit(long softPendingCompactionBytesLimit) {
+ setSoftPendingCompactionBytesLimit(nativeHandle_,
+ softPendingCompactionBytesLimit);
+ return this;
+ }
+
+ @Override
+ public long softPendingCompactionBytesLimit() {
+ return softPendingCompactionBytesLimit(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setHardPendingCompactionBytesLimit(long hardPendingCompactionBytesLimit) {
+ setHardPendingCompactionBytesLimit(nativeHandle_, hardPendingCompactionBytesLimit);
+ return this;
+ }
+
+ @Override
+ public long hardPendingCompactionBytesLimit() {
+ return hardPendingCompactionBytesLimit(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setLevel0FileNumCompactionTrigger(int level0FileNumCompactionTrigger) {
+ setLevel0FileNumCompactionTrigger(nativeHandle_, level0FileNumCompactionTrigger);
+ return this;
+ }
+
+ @Override
+ public int level0FileNumCompactionTrigger() {
+ return level0FileNumCompactionTrigger(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setLevel0SlowdownWritesTrigger(int level0SlowdownWritesTrigger) {
+ setLevel0SlowdownWritesTrigger(nativeHandle_, level0SlowdownWritesTrigger);
+ return this;
+ }
+
+ @Override
+ public int level0SlowdownWritesTrigger() {
+ return level0SlowdownWritesTrigger(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setLevel0StopWritesTrigger(int level0StopWritesTrigger) {
+ setLevel0StopWritesTrigger(nativeHandle_, level0StopWritesTrigger);
+ return this;
+ }
+
+ @Override
+ public int level0StopWritesTrigger() {
+ return level0StopWritesTrigger(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setMaxBytesForLevelMultiplierAdditional(int[] maxBytesForLevelMultiplierAdditional) {
+ setMaxBytesForLevelMultiplierAdditional(nativeHandle_, maxBytesForLevelMultiplierAdditional);
+ return this;
+ }
+
+ @Override
+ public int[] maxBytesForLevelMultiplierAdditional() {
+ return maxBytesForLevelMultiplierAdditional(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setParanoidFileChecks(boolean paranoidFileChecks) {
+ setParanoidFileChecks(nativeHandle_, paranoidFileChecks);
+ return this;
+ }
+
+ @Override
+ public boolean paranoidFileChecks() {
+ return paranoidFileChecks(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setMaxWriteBufferNumberToMaintain(
+ final int maxWriteBufferNumberToMaintain) {
+ setMaxWriteBufferNumberToMaintain(
+ nativeHandle_, maxWriteBufferNumberToMaintain);
+ return this;
+ }
+
+ @Override
+ public int maxWriteBufferNumberToMaintain() {
+ return maxWriteBufferNumberToMaintain(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setCompactionPriority(
+ final CompactionPriority compactionPriority) {
+ setCompactionPriority(nativeHandle_, compactionPriority.getValue());
+ return this;
+ }
+
+ @Override
+ public CompactionPriority compactionPriority() {
+ return CompactionPriority.getCompactionPriority(
+ compactionPriority(nativeHandle_));
+ }
+
+ @Override
+ public ColumnFamilyOptions setReportBgIoStats(final boolean reportBgIoStats) {
+ setReportBgIoStats(nativeHandle_, reportBgIoStats);
+ return this;
+ }
+
+ @Override
+ public boolean reportBgIoStats() {
+ return reportBgIoStats(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setTtl(final long ttl) {
+ setTtl(nativeHandle_, ttl);
+ return this;
+ }
+
+ @Override
+ public long ttl() {
+ return ttl(nativeHandle_);
+ }
+
+ @Override
+ public ColumnFamilyOptions setCompactionOptionsUniversal(
+ final CompactionOptionsUniversal compactionOptionsUniversal) {
+ setCompactionOptionsUniversal(nativeHandle_,
+ compactionOptionsUniversal.nativeHandle_);
+ this.compactionOptionsUniversal_ = compactionOptionsUniversal;
+ return this;
+ }
+
+ @Override
+ public CompactionOptionsUniversal compactionOptionsUniversal() {
+ return this.compactionOptionsUniversal_;
+ }
+
+ @Override
+ public ColumnFamilyOptions setCompactionOptionsFIFO(final CompactionOptionsFIFO compactionOptionsFIFO) {
+ setCompactionOptionsFIFO(nativeHandle_,
+ compactionOptionsFIFO.nativeHandle_);
+ this.compactionOptionsFIFO_ = compactionOptionsFIFO;
+ return this;
+ }
+
+ @Override
+ public CompactionOptionsFIFO compactionOptionsFIFO() {
+ return this.compactionOptionsFIFO_;
+ }
+
+ @Override
+ public ColumnFamilyOptions setForceConsistencyChecks(final boolean forceConsistencyChecks) {
+ setForceConsistencyChecks(nativeHandle_, forceConsistencyChecks);
+ return this;
+ }
+
+ @Override
+ public boolean forceConsistencyChecks() {
+ return forceConsistencyChecks(nativeHandle_);
+ }
+
+ private static native long getColumnFamilyOptionsFromProps(
+ String optString);
+
+ private static native long newColumnFamilyOptions();
+ private static native long copyColumnFamilyOptions(final long handle);
+ private static native long newColumnFamilyOptionsFromOptions(
+ final long optionsHandle);
+ @Override protected final native void disposeInternal(final long handle);
+
+ private native void optimizeForSmallDb(final long handle);
+ private native void optimizeForPointLookup(long handle,
+ long blockCacheSizeMb);
+ private native void optimizeLevelStyleCompaction(long handle,
+ long memtableMemoryBudget);
+ private native void optimizeUniversalStyleCompaction(long handle,
+ long memtableMemoryBudget);
+ private native void setComparatorHandle(long handle, int builtinComparator);
+ private native void setComparatorHandle(long optHandle,
+ long comparatorHandle, byte comparatorType);
+ private native void setMergeOperatorName(long handle, String name);
+ private native void setMergeOperator(long handle, long mergeOperatorHandle);
+ private native void setCompactionFilterHandle(long handle,
+ long compactionFilterHandle);
+ private native void setCompactionFilterFactoryHandle(long handle,
+ long compactionFilterFactoryHandle);
+ private native void setWriteBufferSize(long handle, long writeBufferSize)
+ throws IllegalArgumentException;
+ private native long writeBufferSize(long handle);
+ private native void setMaxWriteBufferNumber(
+ long handle, int maxWriteBufferNumber);
+ private native int maxWriteBufferNumber(long handle);
+ private native void setMinWriteBufferNumberToMerge(
+ long handle, int minWriteBufferNumberToMerge);
+ private native int minWriteBufferNumberToMerge(long handle);
+ private native void setCompressionType(long handle, byte compressionType);
+ private native byte compressionType(long handle);
+ private native void setCompressionPerLevel(long handle,
+ byte[] compressionLevels);
+ private native byte[] compressionPerLevel(long handle);
+ private native void setBottommostCompressionType(long handle,
+ byte bottommostCompressionType);
+ private native byte bottommostCompressionType(long handle);
+ private native void setBottommostCompressionOptions(final long handle,
+ final long bottommostCompressionOptionsHandle);
+ private native void setCompressionOptions(long handle,
+ long compressionOptionsHandle);
+ private native void useFixedLengthPrefixExtractor(
+ long handle, int prefixLength);
+ private native void useCappedPrefixExtractor(
+ long handle, int prefixLength);
+ private native void setNumLevels(
+ long handle, int numLevels);
+ private native int numLevels(long handle);
+ private native void setLevelZeroFileNumCompactionTrigger(
+ long handle, int numFiles);
+ private native int levelZeroFileNumCompactionTrigger(long handle);
+ private native void setLevelZeroSlowdownWritesTrigger(
+ long handle, int numFiles);
+ private native int levelZeroSlowdownWritesTrigger(long handle);
+ private native void setLevelZeroStopWritesTrigger(
+ long handle, int numFiles);
+ private native int levelZeroStopWritesTrigger(long handle);
+ private native void setTargetFileSizeBase(
+ long handle, long targetFileSizeBase);
+ private native long targetFileSizeBase(long handle);
+ private native void setTargetFileSizeMultiplier(
+ long handle, int multiplier);
+ private native int targetFileSizeMultiplier(long handle);
+ private native void setMaxBytesForLevelBase(
+ long handle, long maxBytesForLevelBase);
+ private native long maxBytesForLevelBase(long handle);
+ private native void setLevelCompactionDynamicLevelBytes(
+ long handle, boolean enableLevelCompactionDynamicLevelBytes);
+ private native boolean levelCompactionDynamicLevelBytes(
+ long handle);
+ private native void setMaxBytesForLevelMultiplier(long handle, double multiplier);
+ private native double maxBytesForLevelMultiplier(long handle);
+ private native void setMaxCompactionBytes(long handle, long maxCompactionBytes);
+ private native long maxCompactionBytes(long handle);
+ private native void setArenaBlockSize(
+ long handle, long arenaBlockSize)
+ throws IllegalArgumentException;
+ private native long arenaBlockSize(long handle);
+ private native void setDisableAutoCompactions(
+ long handle, boolean disableAutoCompactions);
+ private native boolean disableAutoCompactions(long handle);
+ private native void setCompactionStyle(long handle, byte compactionStyle);
+ private native byte compactionStyle(long handle);
+ private native void setMaxTableFilesSizeFIFO(
+ long handle, long max_table_files_size);
+ private native long maxTableFilesSizeFIFO(long handle);
+ private native void setMaxSequentialSkipInIterations(
+ long handle, long maxSequentialSkipInIterations);
+ private native long maxSequentialSkipInIterations(long handle);
+ private native void setMemTableFactory(long handle, long factoryHandle);
+ private native String memTableFactoryName(long handle);
+ private native void setTableFactory(long handle, long factoryHandle);
+ private native String tableFactoryName(long handle);
+ private native void setInplaceUpdateSupport(
+ long handle, boolean inplaceUpdateSupport);
+ private native boolean inplaceUpdateSupport(long handle);
+ private native void setInplaceUpdateNumLocks(
+ long handle, long inplaceUpdateNumLocks)
+ throws IllegalArgumentException;
+ private native long inplaceUpdateNumLocks(long handle);
+ private native void setMemtablePrefixBloomSizeRatio(
+ long handle, double memtablePrefixBloomSizeRatio);
+ private native double memtablePrefixBloomSizeRatio(long handle);
+ private native void setBloomLocality(
+ long handle, int bloomLocality);
+ private native int bloomLocality(long handle);
+ private native void setMaxSuccessiveMerges(
+ long handle, long maxSuccessiveMerges)
+ throws IllegalArgumentException;
+ private native long maxSuccessiveMerges(long handle);
+ private native void setOptimizeFiltersForHits(long handle,
+ boolean optimizeFiltersForHits);
+ private native boolean optimizeFiltersForHits(long handle);
+ private native void setMemtableHugePageSize(long handle,
+ long memtableHugePageSize);
+ private native long memtableHugePageSize(long handle);
+ private native void setSoftPendingCompactionBytesLimit(long handle,
+ long softPendingCompactionBytesLimit);
+ private native long softPendingCompactionBytesLimit(long handle);
+ private native void setHardPendingCompactionBytesLimit(long handle,
+ long hardPendingCompactionBytesLimit);
+ private native long hardPendingCompactionBytesLimit(long handle);
+ private native void setLevel0FileNumCompactionTrigger(long handle,
+ int level0FileNumCompactionTrigger);
+ private native int level0FileNumCompactionTrigger(long handle);
+ private native void setLevel0SlowdownWritesTrigger(long handle,
+ int level0SlowdownWritesTrigger);
+ private native int level0SlowdownWritesTrigger(long handle);
+ private native void setLevel0StopWritesTrigger(long handle,
+ int level0StopWritesTrigger);
+ private native int level0StopWritesTrigger(long handle);
+ private native void setMaxBytesForLevelMultiplierAdditional(long handle,
+ int[] maxBytesForLevelMultiplierAdditional);
+ private native int[] maxBytesForLevelMultiplierAdditional(long handle);
+ private native void setParanoidFileChecks(long handle,
+ boolean paranoidFileChecks);
+ private native boolean paranoidFileChecks(long handle);
+ private native void setMaxWriteBufferNumberToMaintain(final long handle,
+ final int maxWriteBufferNumberToMaintain);
+ private native int maxWriteBufferNumberToMaintain(final long handle);
+ private native void setCompactionPriority(final long handle,
+ final byte compactionPriority);
+ private native byte compactionPriority(final long handle);
+ private native void setReportBgIoStats(final long handle,
+ final boolean reportBgIoStats);
+ private native boolean reportBgIoStats(final long handle);
+ private native void setTtl(final long handle, final long ttl);
+ private native long ttl(final long handle);
+ private native void setCompactionOptionsUniversal(final long handle,
+ final long compactionOptionsUniversalHandle);
+ private native void setCompactionOptionsFIFO(final long handle,
+ final long compactionOptionsFIFOHandle);
+ private native void setForceConsistencyChecks(final long handle,
+ final boolean forceConsistencyChecks);
+ private native boolean forceConsistencyChecks(final long handle);
+
+ // instance variables
+ // NOTE: If you add new member variables, please update the copy constructor above!
+ private MemTableConfig memTableConfig_;
+ private TableFormatConfig tableFormatConfig_;
+ private AbstractComparator comparator_;
+ private AbstractCompactionFilter<? extends AbstractSlice<?>> compactionFilter_;
+ private AbstractCompactionFilterFactory<? extends AbstractCompactionFilter<?>>
+ compactionFilterFactory_;
+ private CompactionOptionsUniversal compactionOptionsUniversal_;
+ private CompactionOptionsFIFO compactionOptionsFIFO_;
+ private CompressionOptions bottommostCompressionOptions_;
+ private CompressionOptions compressionOptions_;
+
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
new file mode 100644
index 000000000..b02c6c236
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
@@ -0,0 +1,449 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+public interface ColumnFamilyOptionsInterface<T extends ColumnFamilyOptionsInterface<T>>
+ extends AdvancedColumnFamilyOptionsInterface<T> {
+ /**
+ * Use this if your DB is very small (like under 1GB) and you don't want to
+ * spend lots of memory for memtables.
+ *
+ * @return the instance of the current object.
+ */
+ T optimizeForSmallDb();
+
+ /**
+ * Use this if you don't need to keep the data sorted, i.e. you'll never use
+ * an iterator, only Put() and Get() API calls
+ *
+ * @param blockCacheSizeMb Block cache size in MB
+ * @return the instance of the current object.
+ */
+ T optimizeForPointLookup(long blockCacheSizeMb);
+
+ /**
+ * <p>Default values for some parameters in ColumnFamilyOptions are not
+ * optimized for heavy workloads and big datasets, which means you might
+ * observe write stalls under some conditions. As a starting point for tuning
+ * RocksDB options, use the following for level style compaction.</p>
+ *
+ * <p>Make sure to also call IncreaseParallelism(), which will provide the
+ * biggest performance gains.</p>
+ * <p>Note: we might use more memory than memtable_memory_budget during high
+ * write rate period</p>
+ *
+ * @return the instance of the current object.
+ */
+ T optimizeLevelStyleCompaction();
+
+ /**
+ * <p>Default values for some parameters in ColumnFamilyOptions are not
+ * optimized for heavy workloads and big datasets, which means you might
+ * observe write stalls under some conditions. As a starting point for tuning
+ * RocksDB options, use the following for level style compaction.</p>
+ *
+ * <p>Make sure to also call IncreaseParallelism(), which will provide the
+ * biggest performance gains.</p>
+ * <p>Note: we might use more memory than memtable_memory_budget during high
+ * write rate period</p>
+ *
+ * @param memtableMemoryBudget memory budget in bytes
+ * @return the instance of the current object.
+ */
+ T optimizeLevelStyleCompaction(
+ long memtableMemoryBudget);
+
+ /**
+ * <p>Default values for some parameters in ColumnFamilyOptions are not
+ * optimized for heavy workloads and big datasets, which means you might
+ * observe write stalls under some conditions. As a starting point for tuning
+ * RocksDB options, use the following for universal style compaction.</p>
+ *
+ * <p>Universal style compaction is focused on reducing Write Amplification
+ * Factor for big data sets, but increases Space Amplification.</p>
+ *
+ * <p>Make sure to also call IncreaseParallelism(), which will provide the
+ * biggest performance gains.</p>
+ *
+ * <p>Note: we might use more memory than memtable_memory_budget during high
+ * write rate period</p>
+ *
+ * @return the instance of the current object.
+ */
+ T optimizeUniversalStyleCompaction();
+
+ /**
+ * <p>Default values for some parameters in ColumnFamilyOptions are not
+ * optimized for heavy workloads and big datasets, which means you might
+ * observe write stalls under some conditions. As a starting point for tuning
+ * RocksDB options, use the following for universal style compaction.</p>
+ *
+ * <p>Universal style compaction is focused on reducing Write Amplification
+ * Factor for big data sets, but increases Space Amplification.</p>
+ *
+ * <p>Make sure to also call IncreaseParallelism(), which will provide the
+ * biggest performance gains.</p>
+ *
+ * <p>Note: we might use more memory than memtable_memory_budget during high
+ * write rate period</p>
+ *
+ * @param memtableMemoryBudget memory budget in bytes
+ * @return the instance of the current object.
+ */
+ T optimizeUniversalStyleCompaction(
+ long memtableMemoryBudget);
+
+ /**
+ * Set {@link BuiltinComparator} to be used with RocksDB.
+ *
+ * Note: Comparator can be set once upon database creation.
+ *
+ * Default: BytewiseComparator.
+ * @param builtinComparator a {@link BuiltinComparator} type.
+ * @return the instance of the current object.
+ */
+ T setComparator(
+ BuiltinComparator builtinComparator);
+
+ /**
+ * Use the specified comparator for key ordering.
+ *
+ * Comparator should not be disposed before options instances using this comparator is
+ * disposed. If dispose() function is not called, then comparator object will be
+ * GC'd automatically.
+ *
+ * Comparator instance can be re-used in multiple options instances.
+ *
+ * @param comparator java instance.
+ * @return the instance of the current object.
+ */
+ T setComparator(
+ AbstractComparator comparator);
+
+ /**
+ * <p>Set the merge operator to be used for merging two merge operands
+ * of the same key. The merge function is invoked during
+ * compaction and at lookup time, if multiple key/value pairs belonging
+ * to the same key are found in the database.</p>
+ *
+ * @param name the name of the merge function, as defined by
+ * the MergeOperators factory (see utilities/MergeOperators.h)
+ * The merge function is specified by name and must be one of the
+ * standard merge operators provided by RocksDB. The available
+ * operators are "put", "uint64add", "stringappend" and "stringappendtest".
+ * @return the instance of the current object.
+ */
+ T setMergeOperatorName(String name);
+
+ /**
+ * <p>Set the merge operator to be used for merging two different key/value
+ * pairs that share the same key. The merge function is invoked during
+ * compaction and at lookup time, if multiple key/value pairs belonging
+ * to the same key are found in the database.</p>
+ *
+ * @param mergeOperator {@link MergeOperator} instance.
+ * @return the instance of the current object.
+ */
+ T setMergeOperator(MergeOperator mergeOperator);
+
+ /**
+ * A single CompactionFilter instance to call into during compaction.
+ * Allows an application to modify/delete a key-value during background
+ * compaction.
+ *
+ * If the client requires a new compaction filter to be used for different
+ * compaction runs, it can specify call
+ * {@link #setCompactionFilterFactory(AbstractCompactionFilterFactory)}
+ * instead.
+ *
+ * The client should specify only set one of the two.
+ * {@link #setCompactionFilter(AbstractCompactionFilter)} takes precedence
+ * over {@link #setCompactionFilterFactory(AbstractCompactionFilterFactory)}
+ * if the client specifies both.
+ *
+ * If multithreaded compaction is being used, the supplied CompactionFilter
+ * instance may be used from different threads concurrently and so should be thread-safe.
+ *
+ * @param compactionFilter {@link AbstractCompactionFilter} instance.
+ * @return the instance of the current object.
+ */
+ T setCompactionFilter(
+ final AbstractCompactionFilter<? extends AbstractSlice<?>> compactionFilter);
+
+ /**
+ * Accessor for the CompactionFilter instance in use.
+ *
+ * @return Reference to the CompactionFilter, or null if one hasn't been set.
+ */
+ AbstractCompactionFilter<? extends AbstractSlice<?>> compactionFilter();
+
+ /**
+ * This is a factory that provides {@link AbstractCompactionFilter} objects
+ * which allow an application to modify/delete a key-value during background
+ * compaction.
+ *
+ * A new filter will be created on each compaction run. If multithreaded
+ * compaction is being used, each created CompactionFilter will only be used
+ * from a single thread and so does not need to be thread-safe.
+ *
+ * @param compactionFilterFactory {@link AbstractCompactionFilterFactory} instance.
+ * @return the instance of the current object.
+ */
+ T setCompactionFilterFactory(
+ final AbstractCompactionFilterFactory<? extends AbstractCompactionFilter<?>>
+ compactionFilterFactory);
+
+ /**
+ * Accessor for the CompactionFilterFactory instance in use.
+ *
+ * @return Reference to the CompactionFilterFactory, or null if one hasn't been set.
+ */
+ AbstractCompactionFilterFactory<? extends AbstractCompactionFilter<?>> compactionFilterFactory();
+
+ /**
+ * This prefix-extractor uses the first n bytes of a key as its prefix.
+ *
+ * In some hash-based memtable representation such as HashLinkedList
+ * and HashSkipList, prefixes are used to partition the keys into
+ * several buckets. Prefix extractor is used to specify how to
+ * extract the prefix given a key.
+ *
+ * @param n use the first n bytes of a key as its prefix.
+ * @return the reference to the current option.
+ */
+ T useFixedLengthPrefixExtractor(int n);
+
+ /**
+ * Same as fixed length prefix extractor, except that when slice is
+ * shorter than the fixed length, it will use the full key.
+ *
+ * @param n use the first n bytes of a key as its prefix.
+ * @return the reference to the current option.
+ */
+ T useCappedPrefixExtractor(int n);
+
+ /**
+ * Number of files to trigger level-0 compaction. A value &lt; 0 means that
+ * level-0 compaction will not be triggered by number of files at all.
+ * Default: 4
+ *
+ * @param numFiles the number of files in level-0 to trigger compaction.
+ * @return the reference to the current option.
+ */
+ T setLevelZeroFileNumCompactionTrigger(
+ int numFiles);
+
+ /**
+ * The number of files in level 0 to trigger compaction from level-0 to
+ * level-1. A value &lt; 0 means that level-0 compaction will not be
+ * triggered by number of files at all.
+ * Default: 4
+ *
+ * @return the number of files in level 0 to trigger compaction.
+ */
+ int levelZeroFileNumCompactionTrigger();
+
+ /**
+ * Soft limit on number of level-0 files. We start slowing down writes at this
+ * point. A value &lt; 0 means that no writing slow down will be triggered by
+ * number of files in level-0.
+ *
+ * @param numFiles soft limit on number of level-0 files.
+ * @return the reference to the current option.
+ */
+ T setLevelZeroSlowdownWritesTrigger(
+ int numFiles);
+
+ /**
+ * Soft limit on the number of level-0 files. We start slowing down writes
+ * at this point. A value &lt; 0 means that no writing slow down will be
+ * triggered by number of files in level-0.
+ *
+ * @return the soft limit on the number of level-0 files.
+ */
+ int levelZeroSlowdownWritesTrigger();
+
+ /**
+ * Maximum number of level-0 files. We stop writes at this point.
+ *
+ * @param numFiles the hard limit of the number of level-0 files.
+ * @return the reference to the current option.
+ */
+ T setLevelZeroStopWritesTrigger(int numFiles);
+
+ /**
+ * Maximum number of level-0 files. We stop writes at this point.
+ *
+ * @return the hard limit of the number of level-0 file.
+ */
+ int levelZeroStopWritesTrigger();
+
+ /**
+ * The ratio between the total size of level-(L+1) files and the total
+ * size of level-L files for all L.
+ * DEFAULT: 10
+ *
+ * @param multiplier the ratio between the total size of level-(L+1)
+ * files and the total size of level-L files for all L.
+ * @return the reference to the current option.
+ */
+ T setMaxBytesForLevelMultiplier(
+ double multiplier);
+
+ /**
+ * The ratio between the total size of level-(L+1) files and the total
+ * size of level-L files for all L.
+ * DEFAULT: 10
+ *
+ * @return the ratio between the total size of level-(L+1) files and
+ * the total size of level-L files for all L.
+ */
+ double maxBytesForLevelMultiplier();
+
+ /**
+ * FIFO compaction option.
+ * The oldest table file will be deleted
+ * once the sum of table files reaches this size.
+ * The default value is 1GB (1 * 1024 * 1024 * 1024).
+ *
+ * @param maxTableFilesSize the size limit of the total sum of table files.
+ * @return the instance of the current object.
+ */
+ T setMaxTableFilesSizeFIFO(
+ long maxTableFilesSize);
+
+ /**
+ * FIFO compaction option.
+ * The oldest table file will be deleted
+ * once the sum of table files reaches this size.
+ * The default value is 1GB (1 * 1024 * 1024 * 1024).
+ *
+ * @return the size limit of the total sum of table files.
+ */
+ long maxTableFilesSizeFIFO();
+
+ /**
+ * Get the config for mem-table.
+ *
+ * @return the mem-table config.
+ */
+ MemTableConfig memTableConfig();
+
+ /**
+ * Set the config for mem-table.
+ *
+ * @param memTableConfig the mem-table config.
+ * @return the instance of the current object.
+ * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
+ * while overflowing the underlying platform specific value.
+ */
+ T setMemTableConfig(MemTableConfig memTableConfig);
+
+ /**
+ * Returns the name of the current mem table representation.
+ * Memtable format can be set using setTableFormatConfig.
+ *
+ * @return the name of the currently-used memtable factory.
+ * @see #setTableFormatConfig(org.rocksdb.TableFormatConfig)
+ */
+ String memTableFactoryName();
+
+ /**
+ * Get the config for table format.
+ *
+ * @return the table format config.
+ */
+ TableFormatConfig tableFormatConfig();
+
+ /**
+ * Set the config for table format.
+ *
+ * @param config the table format config.
+ * @return the reference of the current options.
+ */
+ T setTableFormatConfig(TableFormatConfig config);
+
+ /**
+ * @return the name of the currently used table factory.
+ */
+ String tableFactoryName();
+
+ /**
+ * Compression algorithm that will be used for the bottommost level that
+ * contain files. If level-compaction is used, this option will only affect
+ * levels after base level.
+ *
+ * Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION}
+ *
+ * @param bottommostCompressionType The compression type to use for the
+ * bottommost level
+ *
+ * @return the reference of the current options.
+ */
+ T setBottommostCompressionType(
+ final CompressionType bottommostCompressionType);
+
+ /**
+ * Compression algorithm that will be used for the bottommost level that
+ * contain files. If level-compaction is used, this option will only affect
+ * levels after base level.
+ *
+ * Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION}
+ *
+ * @return The compression type used for the bottommost level
+ */
+ CompressionType bottommostCompressionType();
+
+ /**
+ * Set the options for compression algorithms used by
+ * {@link #bottommostCompressionType()} if it is enabled.
+ *
+ * To enable it, please see the definition of
+ * {@link CompressionOptions}.
+ *
+ * @param compressionOptions the bottom most compression options.
+ *
+ * @return the reference of the current options.
+ */
+ T setBottommostCompressionOptions(
+ final CompressionOptions compressionOptions);
+
+ /**
+ * Get the bottom most compression options.
+ *
+ * See {@link #setBottommostCompressionOptions(CompressionOptions)}.
+ *
+ * @return the bottom most compression options.
+ */
+ CompressionOptions bottommostCompressionOptions();
+
+ /**
+ * Set the different options for compression algorithms
+ *
+ * @param compressionOptions The compression options
+ *
+ * @return the reference of the current options.
+ */
+ T setCompressionOptions(
+ CompressionOptions compressionOptions);
+
+ /**
+ * Get the different options for compression algorithms
+ *
+ * @return The compression options
+ */
+ CompressionOptions compressionOptions();
+
+ /**
+ * Default memtable memory budget used with the following methods:
+ *
+ * <ol>
+ * <li>{@link #optimizeLevelStyleCompaction()}</li>
+ * <li>{@link #optimizeUniversalStyleCompaction()}</li>
+ * </ol>
+ */
+ long DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET = 512 * 1024 * 1024;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactRangeOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactRangeOptions.java
new file mode 100644
index 000000000..c07bd96a5
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactRangeOptions.java
@@ -0,0 +1,237 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * CompactRangeOptions is used by CompactRange() call. In the documentation of the methods "the compaction" refers to
+ * any compaction that is using this CompactRangeOptions.
+ */
+public class CompactRangeOptions extends RocksObject {
+
+ private final static byte VALUE_kSkip = 0;
+ private final static byte VALUE_kIfHaveCompactionFilter = 1;
+ private final static byte VALUE_kForce = 2;
+
+ // For level based compaction, we can configure if we want to skip/force bottommost level compaction.
+ // The order of this neum MUST follow the C++ layer. See BottommostLevelCompaction in db/options.h
+ public enum BottommostLevelCompaction {
+ /**
+ * Skip bottommost level compaction
+ */
+ kSkip((byte)VALUE_kSkip),
+ /**
+ * Only compact bottommost level if there is a compaction filter. This is the default option
+ */
+ kIfHaveCompactionFilter(VALUE_kIfHaveCompactionFilter),
+ /**
+ * Always compact bottommost level
+ */
+ kForce(VALUE_kForce);
+
+ private final byte value;
+
+ BottommostLevelCompaction(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * <p>Returns the byte value of the enumerations value.</p>
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value;
+ }
+
+ /**
+ * Returns the BottommostLevelCompaction for the given C++ rocks enum value.
+ * @param bottommostLevelCompaction The value of the BottommostLevelCompaction
+ * @return BottommostLevelCompaction instance, or null if none matches
+ */
+ public static BottommostLevelCompaction fromRocksId(final int bottommostLevelCompaction) {
+ switch (bottommostLevelCompaction) {
+ case VALUE_kSkip: return kSkip;
+ case VALUE_kIfHaveCompactionFilter: return kIfHaveCompactionFilter;
+ case VALUE_kForce: return kForce;
+ default: return null;
+ }
+ }
+ }
+
+ /**
+ * Construct CompactRangeOptions.
+ */
+ public CompactRangeOptions() {
+ super(newCompactRangeOptions());
+ }
+
+ /**
+ * Returns whether the compaction is exclusive or other compactions may run concurrently at the same time.
+ *
+ * @return true if exclusive, false if concurrent
+ */
+ public boolean exclusiveManualCompaction() {
+ return exclusiveManualCompaction(nativeHandle_);
+ }
+
+ /**
+ * Sets whether the compaction is exclusive or other compaction are allowed run concurrently at the same time.
+ *
+ * @param exclusiveCompaction true if compaction should be exclusive
+ * @return This CompactRangeOptions
+ */
+ public CompactRangeOptions setExclusiveManualCompaction(final boolean exclusiveCompaction) {
+ setExclusiveManualCompaction(nativeHandle_, exclusiveCompaction);
+ return this;
+ }
+
+ /**
+ * Returns whether compacted files will be moved to the minimum level capable of holding the data or given level
+ * (specified non-negative target_level).
+ * @return true, if compacted files will be moved to the minimum level
+ */
+ public boolean changeLevel() {
+ return changeLevel(nativeHandle_);
+ }
+
+ /**
+ * Whether compacted files will be moved to the minimum level capable of holding the data or given level
+ * (specified non-negative target_level).
+ *
+ * @param changeLevel If true, compacted files will be moved to the minimum level
+ * @return This CompactRangeOptions
+ */
+ public CompactRangeOptions setChangeLevel(final boolean changeLevel) {
+ setChangeLevel(nativeHandle_, changeLevel);
+ return this;
+ }
+
+ /**
+ * If change_level is true and target_level have non-negative value, compacted files will be moved to target_level.
+ * @return The target level for the compacted files
+ */
+ public int targetLevel() {
+ return targetLevel(nativeHandle_);
+ }
+
+
+ /**
+ * If change_level is true and target_level have non-negative value, compacted files will be moved to target_level.
+ *
+ * @param targetLevel target level for the compacted files
+ * @return This CompactRangeOptions
+ */
+ public CompactRangeOptions setTargetLevel(final int targetLevel) {
+ setTargetLevel(nativeHandle_, targetLevel);
+ return this;
+ }
+
+ /**
+ * target_path_id for compaction output. Compaction outputs will be placed in options.db_paths[target_path_id].
+ *
+ * @return target_path_id
+ */
+ public int targetPathId() {
+ return targetPathId(nativeHandle_);
+ }
+
+ /**
+ * Compaction outputs will be placed in options.db_paths[target_path_id]. Behavior is undefined if target_path_id is
+ * out of range.
+ *
+ * @param targetPathId target path id
+ * @return This CompactRangeOptions
+ */
+ public CompactRangeOptions setTargetPathId(final int targetPathId) {
+ setTargetPathId(nativeHandle_, targetPathId);
+ return this;
+ }
+
+ /**
+ * Returns the policy for compacting the bottommost level
+ * @return The BottommostLevelCompaction policy
+ */
+ public BottommostLevelCompaction bottommostLevelCompaction() {
+ return BottommostLevelCompaction.fromRocksId(bottommostLevelCompaction(nativeHandle_));
+ }
+
+ /**
+ * Sets the policy for compacting the bottommost level
+ *
+ * @param bottommostLevelCompaction The policy for compacting the bottommost level
+ * @return This CompactRangeOptions
+ */
+ public CompactRangeOptions setBottommostLevelCompaction(final BottommostLevelCompaction bottommostLevelCompaction) {
+ setBottommostLevelCompaction(nativeHandle_, bottommostLevelCompaction.getValue());
+ return this;
+ }
+
+ /**
+ * If true, compaction will execute immediately even if doing so would cause the DB to
+ * enter write stall mode. Otherwise, it'll sleep until load is low enough.
+ * @return true if compaction will execute immediately
+ */
+ public boolean allowWriteStall() {
+ return allowWriteStall(nativeHandle_);
+ }
+
+
+ /**
+ * If true, compaction will execute immediately even if doing so would cause the DB to
+ * enter write stall mode. Otherwise, it'll sleep until load is low enough.
+ *
+ * @return This CompactRangeOptions
+ * @param allowWriteStall true if compaction should execute immediately
+ */
+ public CompactRangeOptions setAllowWriteStall(final boolean allowWriteStall) {
+ setAllowWriteStall(nativeHandle_, allowWriteStall);
+ return this;
+ }
+
+ /**
+ * If &gt; 0, it will replace the option in the DBOptions for this compaction
+ * @return number of subcompactions
+ */
+ public int maxSubcompactions() {
+ return maxSubcompactions(nativeHandle_);
+ }
+
+ /**
+ * If &gt; 0, it will replace the option in the DBOptions for this compaction
+ *
+ * @param maxSubcompactions number of subcompactions
+ * @return This CompactRangeOptions
+ */
+ public CompactRangeOptions setMaxSubcompactions(final int maxSubcompactions) {
+ setMaxSubcompactions(nativeHandle_, maxSubcompactions);
+ return this;
+ }
+
+ private native static long newCompactRangeOptions();
+ @Override protected final native void disposeInternal(final long handle);
+
+ private native boolean exclusiveManualCompaction(final long handle);
+ private native void setExclusiveManualCompaction(final long handle,
+ final boolean exclusive_manual_compaction);
+ private native boolean changeLevel(final long handle);
+ private native void setChangeLevel(final long handle,
+ final boolean changeLevel);
+ private native int targetLevel(final long handle);
+ private native void setTargetLevel(final long handle,
+ final int targetLevel);
+ private native int targetPathId(final long handle);
+ private native void setTargetPathId(final long handle,
+ final int targetPathId);
+ private native int bottommostLevelCompaction(final long handle);
+ private native void setBottommostLevelCompaction(final long handle,
+ final int bottommostLevelCompaction);
+ private native boolean allowWriteStall(final long handle);
+ private native void setAllowWriteStall(final long handle,
+ final boolean allowWriteStall);
+ private native void setMaxSubcompactions(final long handle,
+ final int maxSubcompactions);
+ private native int maxSubcompactions(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactionJobInfo.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionJobInfo.java
new file mode 100644
index 000000000..8b59edc91
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionJobInfo.java
@@ -0,0 +1,159 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+public class CompactionJobInfo extends RocksObject {
+
+ public CompactionJobInfo() {
+ super(newCompactionJobInfo());
+ }
+
+ /**
+ * Private as called from JNI C++
+ */
+ private CompactionJobInfo(final long nativeHandle) {
+ super(nativeHandle);
+ }
+
+ /**
+ * Get the name of the column family where the compaction happened.
+ *
+ * @return the name of the column family
+ */
+ public byte[] columnFamilyName() {
+ return columnFamilyName(nativeHandle_);
+ }
+
+ /**
+ * Get the status indicating whether the compaction was successful or not.
+ *
+ * @return the status
+ */
+ public Status status() {
+ return status(nativeHandle_);
+ }
+
+ /**
+ * Get the id of the thread that completed this compaction job.
+ *
+ * @return the id of the thread
+ */
+ public long threadId() {
+ return threadId(nativeHandle_);
+ }
+
+ /**
+ * Get the job id, which is unique in the same thread.
+ *
+ * @return the id of the thread
+ */
+ public int jobId() {
+ return jobId(nativeHandle_);
+ }
+
+ /**
+ * Get the smallest input level of the compaction.
+ *
+ * @return the input level
+ */
+ public int baseInputLevel() {
+ return baseInputLevel(nativeHandle_);
+ }
+
+ /**
+ * Get the output level of the compaction.
+ *
+ * @return the output level
+ */
+ public int outputLevel() {
+ return outputLevel(nativeHandle_);
+ }
+
+ /**
+ * Get the names of the compaction input files.
+ *
+ * @return the names of the input files.
+ */
+ public List<String> inputFiles() {
+ return Arrays.asList(inputFiles(nativeHandle_));
+ }
+
+ /**
+ * Get the names of the compaction output files.
+ *
+ * @return the names of the output files.
+ */
+ public List<String> outputFiles() {
+ return Arrays.asList(outputFiles(nativeHandle_));
+ }
+
+ /**
+ * Get the table properties for the input and output tables.
+ *
+ * The map is keyed by values from {@link #inputFiles()} and
+ * {@link #outputFiles()}.
+ *
+ * @return the table properties
+ */
+ public Map<String, TableProperties> tableProperties() {
+ return tableProperties(nativeHandle_);
+ }
+
+ /**
+ * Get the Reason for running the compaction.
+ *
+ * @return the reason.
+ */
+ public CompactionReason compactionReason() {
+ return CompactionReason.fromValue(compactionReason(nativeHandle_));
+ }
+
+ //
+ /**
+ * Get the compression algorithm used for output files.
+ *
+ * @return the compression algorithm
+ */
+ public CompressionType compression() {
+ return CompressionType.getCompressionType(compression(nativeHandle_));
+ }
+
+ /**
+ * Get detailed information about this compaction.
+ *
+ * @return the detailed information, or null if not available.
+ */
+ public /* @Nullable */ CompactionJobStats stats() {
+ final long statsHandle = stats(nativeHandle_);
+ if (statsHandle == 0) {
+ return null;
+ }
+
+ return new CompactionJobStats(statsHandle);
+ }
+
+
+ private static native long newCompactionJobInfo();
+ @Override protected native void disposeInternal(final long handle);
+
+ private static native byte[] columnFamilyName(final long handle);
+ private static native Status status(final long handle);
+ private static native long threadId(final long handle);
+ private static native int jobId(final long handle);
+ private static native int baseInputLevel(final long handle);
+ private static native int outputLevel(final long handle);
+ private static native String[] inputFiles(final long handle);
+ private static native String[] outputFiles(final long handle);
+ private static native Map<String, TableProperties> tableProperties(
+ final long handle);
+ private static native byte compactionReason(final long handle);
+ private static native byte compression(final long handle);
+ private static native long stats(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactionJobStats.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionJobStats.java
new file mode 100644
index 000000000..3d53b5565
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionJobStats.java
@@ -0,0 +1,295 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+public class CompactionJobStats extends RocksObject {
+
+ public CompactionJobStats() {
+ super(newCompactionJobStats());
+ }
+
+ /**
+ * Private as called from JNI C++
+ */
+ CompactionJobStats(final long nativeHandle) {
+ super(nativeHandle);
+ }
+
+ /**
+ * Reset the stats.
+ */
+ public void reset() {
+ reset(nativeHandle_);
+ }
+
+ /**
+ * Aggregate the CompactionJobStats from another instance with this one.
+ *
+ * @param compactionJobStats another instance of stats.
+ */
+ public void add(final CompactionJobStats compactionJobStats) {
+ add(nativeHandle_, compactionJobStats.nativeHandle_);
+ }
+
+ /**
+ * Get the elapsed time in micro of this compaction.
+ *
+ * @return the elapsed time in micro of this compaction.
+ */
+ public long elapsedMicros() {
+ return elapsedMicros(nativeHandle_);
+ }
+
+ /**
+ * Get the number of compaction input records.
+ *
+ * @return the number of compaction input records.
+ */
+ public long numInputRecords() {
+ return numInputRecords(nativeHandle_);
+ }
+
+ /**
+ * Get the number of compaction input files.
+ *
+ * @return the number of compaction input files.
+ */
+ public long numInputFiles() {
+ return numInputFiles(nativeHandle_);
+ }
+
+ /**
+ * Get the number of compaction input files at the output level.
+ *
+ * @return the number of compaction input files at the output level.
+ */
+ public long numInputFilesAtOutputLevel() {
+ return numInputFilesAtOutputLevel(nativeHandle_);
+ }
+
+ /**
+ * Get the number of compaction output records.
+ *
+ * @return the number of compaction output records.
+ */
+ public long numOutputRecords() {
+ return numOutputRecords(nativeHandle_);
+ }
+
+ /**
+ * Get the number of compaction output files.
+ *
+ * @return the number of compaction output files.
+ */
+ public long numOutputFiles() {
+ return numOutputFiles(nativeHandle_);
+ }
+
+ /**
+ * Determine if the compaction is a manual compaction.
+ *
+ * @return true if the compaction is a manual compaction, false otherwise.
+ */
+ public boolean isManualCompaction() {
+ return isManualCompaction(nativeHandle_);
+ }
+
+ /**
+ * Get the size of the compaction input in bytes.
+ *
+ * @return the size of the compaction input in bytes.
+ */
+ public long totalInputBytes() {
+ return totalInputBytes(nativeHandle_);
+ }
+
+ /**
+ * Get the size of the compaction output in bytes.
+ *
+ * @return the size of the compaction output in bytes.
+ */
+ public long totalOutputBytes() {
+ return totalOutputBytes(nativeHandle_);
+ }
+
+ /**
+ * Get the number of records being replaced by newer record associated
+ * with same key.
+ *
+ * This could be a new value or a deletion entry for that key so this field
+ * sums up all updated and deleted keys.
+ *
+ * @return the number of records being replaced by newer record associated
+ * with same key.
+ */
+ public long numRecordsReplaced() {
+ return numRecordsReplaced(nativeHandle_);
+ }
+
+ /**
+ * Get the sum of the uncompressed input keys in bytes.
+ *
+ * @return the sum of the uncompressed input keys in bytes.
+ */
+ public long totalInputRawKeyBytes() {
+ return totalInputRawKeyBytes(nativeHandle_);
+ }
+
+ /**
+ * Get the sum of the uncompressed input values in bytes.
+ *
+ * @return the sum of the uncompressed input values in bytes.
+ */
+ public long totalInputRawValueBytes() {
+ return totalInputRawValueBytes(nativeHandle_);
+ }
+
+ /**
+ * Get the number of deletion entries before compaction.
+ *
+ * Deletion entries can disappear after compaction because they expired.
+ *
+ * @return the number of deletion entries before compaction.
+ */
+ public long numInputDeletionRecords() {
+ return numInputDeletionRecords(nativeHandle_);
+ }
+
+ /**
+ * Get the number of deletion records that were found obsolete and discarded
+ * because it is not possible to delete any more keys with this entry.
+ * (i.e. all possible deletions resulting from it have been completed)
+ *
+ * @return the number of deletion records that were found obsolete and
+ * discarded.
+ */
+ public long numExpiredDeletionRecords() {
+ return numExpiredDeletionRecords(nativeHandle_);
+ }
+
+ /**
+ * Get the number of corrupt keys (ParseInternalKey returned false when
+ * applied to the key) encountered and written out.
+ *
+ * @return the number of corrupt keys.
+ */
+ public long numCorruptKeys() {
+ return numCorruptKeys(nativeHandle_);
+ }
+
+ /**
+ * Get the Time spent on file's Append() call.
+ *
+ * Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
+ *
+ * @return the Time spent on file's Append() call.
+ */
+ public long fileWriteNanos() {
+ return fileWriteNanos(nativeHandle_);
+ }
+
+ /**
+ * Get the Time spent on sync file range.
+ *
+ * Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
+ *
+ * @return the Time spent on sync file range.
+ */
+ public long fileRangeSyncNanos() {
+ return fileRangeSyncNanos(nativeHandle_);
+ }
+
+ /**
+ * Get the Time spent on file fsync.
+ *
+ * Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
+ *
+ * @return the Time spent on file fsync.
+ */
+ public long fileFsyncNanos() {
+ return fileFsyncNanos(nativeHandle_);
+ }
+
+ /**
+ * Get the Time spent on preparing file write (falocate, etc)
+ *
+ * Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
+ *
+ * @return the Time spent on preparing file write (falocate, etc).
+ */
+ public long filePrepareWriteNanos() {
+ return filePrepareWriteNanos(nativeHandle_);
+ }
+
+ /**
+ * Get the smallest output key prefix.
+ *
+ * @return the smallest output key prefix.
+ */
+ public byte[] smallestOutputKeyPrefix() {
+ return smallestOutputKeyPrefix(nativeHandle_);
+ }
+
+ /**
+ * Get the largest output key prefix.
+ *
+ * @return the smallest output key prefix.
+ */
+ public byte[] largestOutputKeyPrefix() {
+ return largestOutputKeyPrefix(nativeHandle_);
+ }
+
+ /**
+ * Get the number of single-deletes which do not meet a put.
+ *
+ * @return number of single-deletes which do not meet a put.
+ */
+ @Experimental("Performance optimization for a very specific workload")
+ public long numSingleDelFallthru() {
+ return numSingleDelFallthru(nativeHandle_);
+ }
+
+ /**
+ * Get the number of single-deletes which meet something other than a put.
+ *
+ * @return the number of single-deletes which meet something other than a put.
+ */
+ @Experimental("Performance optimization for a very specific workload")
+ public long numSingleDelMismatch() {
+ return numSingleDelMismatch(nativeHandle_);
+ }
+
+ private static native long newCompactionJobStats();
+ @Override protected native void disposeInternal(final long handle);
+
+
+ private static native void reset(final long handle);
+ private static native void add(final long handle,
+ final long compactionJobStatsHandle);
+ private static native long elapsedMicros(final long handle);
+ private static native long numInputRecords(final long handle);
+ private static native long numInputFiles(final long handle);
+ private static native long numInputFilesAtOutputLevel(final long handle);
+ private static native long numOutputRecords(final long handle);
+ private static native long numOutputFiles(final long handle);
+ private static native boolean isManualCompaction(final long handle);
+ private static native long totalInputBytes(final long handle);
+ private static native long totalOutputBytes(final long handle);
+ private static native long numRecordsReplaced(final long handle);
+ private static native long totalInputRawKeyBytes(final long handle);
+ private static native long totalInputRawValueBytes(final long handle);
+ private static native long numInputDeletionRecords(final long handle);
+ private static native long numExpiredDeletionRecords(final long handle);
+ private static native long numCorruptKeys(final long handle);
+ private static native long fileWriteNanos(final long handle);
+ private static native long fileRangeSyncNanos(final long handle);
+ private static native long fileFsyncNanos(final long handle);
+ private static native long filePrepareWriteNanos(final long handle);
+ private static native byte[] smallestOutputKeyPrefix(final long handle);
+ private static native byte[] largestOutputKeyPrefix(final long handle);
+ private static native long numSingleDelFallthru(final long handle);
+ private static native long numSingleDelMismatch(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptions.java
new file mode 100644
index 000000000..2c7e391fb
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptions.java
@@ -0,0 +1,121 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.List;
+
+/**
+ * CompactionOptions are used in
+ * {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int, CompactionJobInfo)}
+ * calls.
+ */
+public class CompactionOptions extends RocksObject {
+
+ public CompactionOptions() {
+ super(newCompactionOptions());
+ }
+
+ /**
+ * Get the compaction output compression type.
+ *
+ * See {@link #setCompression(CompressionType)}.
+ *
+ * @return the compression type.
+ */
+ public CompressionType compression() {
+ return CompressionType.getCompressionType(
+ compression(nativeHandle_));
+ }
+
+ /**
+ * Set the compaction output compression type.
+ *
+ * Default: snappy
+ *
+ * If set to {@link CompressionType#DISABLE_COMPRESSION_OPTION},
+ * RocksDB will choose compression type according to the
+ * {@link ColumnFamilyOptions#compressionType()}, taking into account
+ * the output level if {@link ColumnFamilyOptions#compressionPerLevel()}
+ * is specified.
+ *
+ * @param compression the compression type to use for compaction output.
+ *
+ * @return the instance of the current Options.
+ */
+ public CompactionOptions setCompression(final CompressionType compression) {
+ setCompression(nativeHandle_, compression.getValue());
+ return this;
+ }
+
+ /**
+ * Get the compaction output file size limit.
+ *
+ * See {@link #setOutputFileSizeLimit(long)}.
+ *
+ * @return the file size limit.
+ */
+ public long outputFileSizeLimit() {
+ return outputFileSizeLimit(nativeHandle_);
+ }
+
+ /**
+ * Compaction will create files of size {@link #outputFileSizeLimit()}.
+ *
+ * Default: 2^64-1, which means that compaction will create a single file
+ *
+ * @param outputFileSizeLimit the size limit
+ *
+ * @return the instance of the current Options.
+ */
+ public CompactionOptions setOutputFileSizeLimit(
+ final long outputFileSizeLimit) {
+ setOutputFileSizeLimit(nativeHandle_, outputFileSizeLimit);
+ return this;
+ }
+
+ /**
+ * Get the maximum number of threads that will concurrently perform a
+ * compaction job.
+ *
+ * @return the maximum number of threads.
+ */
+ public int maxSubcompactions() {
+ return maxSubcompactions(nativeHandle_);
+ }
+
+ /**
+ * This value represents the maximum number of threads that will
+ * concurrently perform a compaction job by breaking it into multiple,
+ * smaller ones that are run simultaneously.
+ *
+ * Default: 0 (i.e. no subcompactions)
+ *
+ * If &gt; 0, it will replace the option in
+ * {@link DBOptions#maxSubcompactions()} for this compaction.
+ *
+ * @param maxSubcompactions The maximum number of threads that will
+ * concurrently perform a compaction job
+ *
+ * @return the instance of the current Options.
+ */
+ public CompactionOptions setMaxSubcompactions(final int maxSubcompactions) {
+ setMaxSubcompactions(nativeHandle_, maxSubcompactions);
+ return this;
+ }
+
+ private static native long newCompactionOptions();
+ @Override protected final native void disposeInternal(final long handle);
+
+ private static native byte compression(final long handle);
+ private static native void setCompression(final long handle,
+ final byte compressionTypeValue);
+ private static native long outputFileSizeLimit(final long handle);
+ private static native void setOutputFileSizeLimit(final long handle,
+ final long outputFileSizeLimit);
+ private static native int maxSubcompactions(final long handle);
+ private static native void setMaxSubcompactions(final long handle,
+ final int maxSubcompactions);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java
new file mode 100644
index 000000000..4c8d6545c
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java
@@ -0,0 +1,89 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Options for FIFO Compaction
+ */
+public class CompactionOptionsFIFO extends RocksObject {
+
+ public CompactionOptionsFIFO() {
+ super(newCompactionOptionsFIFO());
+ }
+
+ /**
+ * Once the total sum of table files reaches this, we will delete the oldest
+ * table file
+ *
+ * Default: 1GB
+ *
+ * @param maxTableFilesSize The maximum size of the table files
+ *
+ * @return the reference to the current options.
+ */
+ public CompactionOptionsFIFO setMaxTableFilesSize(
+ final long maxTableFilesSize) {
+ setMaxTableFilesSize(nativeHandle_, maxTableFilesSize);
+ return this;
+ }
+
+ /**
+ * Once the total sum of table files reaches this, we will delete the oldest
+ * table file
+ *
+ * Default: 1GB
+ *
+ * @return max table file size in bytes
+ */
+ public long maxTableFilesSize() {
+ return maxTableFilesSize(nativeHandle_);
+ }
+
+ /**
+ * If true, try to do compaction to compact smaller files into larger ones.
+ * Minimum files to compact follows options.level0_file_num_compaction_trigger
+ * and compaction won't trigger if average compact bytes per del file is
+ * larger than options.write_buffer_size. This is to protect large files
+ * from being compacted again.
+ *
+ * Default: false
+ *
+ * @param allowCompaction true to allow intra-L0 compaction
+ *
+ * @return the reference to the current options.
+ */
+ public CompactionOptionsFIFO setAllowCompaction(
+ final boolean allowCompaction) {
+ setAllowCompaction(nativeHandle_, allowCompaction);
+ return this;
+ }
+
+
+ /**
+ * Check if intra-L0 compaction is enabled.
+ * When enabled, we try to compact smaller files into larger ones.
+ *
+ * See {@link #setAllowCompaction(boolean)}.
+ *
+ * Default: false
+ *
+ * @return true if intra-L0 compaction is enabled, false otherwise.
+ */
+ public boolean allowCompaction() {
+ return allowCompaction(nativeHandle_);
+ }
+
+
+ private native static long newCompactionOptionsFIFO();
+ @Override protected final native void disposeInternal(final long handle);
+
+ private native void setMaxTableFilesSize(final long handle,
+ final long maxTableFilesSize);
+ private native long maxTableFilesSize(final long handle);
+ private native void setAllowCompaction(final long handle,
+ final boolean allowCompaction);
+ private native boolean allowCompaction(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java
new file mode 100644
index 000000000..d2dfa4eef
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java
@@ -0,0 +1,273 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Options for Universal Compaction
+ */
+public class CompactionOptionsUniversal extends RocksObject {
+
+ public CompactionOptionsUniversal() {
+ super(newCompactionOptionsUniversal());
+ }
+
+ /**
+ * Percentage flexibility while comparing file size. If the candidate file(s)
+ * size is 1% smaller than the next file's size, then include next file into
+ * this candidate set.
+ *
+ * Default: 1
+ *
+ * @param sizeRatio The size ratio to use
+ *
+ * @return the reference to the current options.
+ */
+ public CompactionOptionsUniversal setSizeRatio(final int sizeRatio) {
+ setSizeRatio(nativeHandle_, sizeRatio);
+ return this;
+ }
+
+ /**
+ * Percentage flexibility while comparing file size. If the candidate file(s)
+ * size is 1% smaller than the next file's size, then include next file into
+ * this candidate set.
+ *
+ * Default: 1
+ *
+ * @return The size ratio in use
+ */
+ public int sizeRatio() {
+ return sizeRatio(nativeHandle_);
+ }
+
+ /**
+ * The minimum number of files in a single compaction run.
+ *
+ * Default: 2
+ *
+ * @param minMergeWidth minimum number of files in a single compaction run
+ *
+ * @return the reference to the current options.
+ */
+ public CompactionOptionsUniversal setMinMergeWidth(final int minMergeWidth) {
+ setMinMergeWidth(nativeHandle_, minMergeWidth);
+ return this;
+ }
+
+ /**
+ * The minimum number of files in a single compaction run.
+ *
+ * Default: 2
+ *
+ * @return minimum number of files in a single compaction run
+ */
+ public int minMergeWidth() {
+ return minMergeWidth(nativeHandle_);
+ }
+
+ /**
+ * The maximum number of files in a single compaction run.
+ *
+ * Default: {@link Long#MAX_VALUE}
+ *
+ * @param maxMergeWidth maximum number of files in a single compaction run
+ *
+ * @return the reference to the current options.
+ */
+ public CompactionOptionsUniversal setMaxMergeWidth(final int maxMergeWidth) {
+ setMaxMergeWidth(nativeHandle_, maxMergeWidth);
+ return this;
+ }
+
+ /**
+ * The maximum number of files in a single compaction run.
+ *
+ * Default: {@link Long#MAX_VALUE}
+ *
+ * @return maximum number of files in a single compaction run
+ */
+ public int maxMergeWidth() {
+ return maxMergeWidth(nativeHandle_);
+ }
+
+ /**
+ * The size amplification is defined as the amount (in percentage) of
+ * additional storage needed to store a single byte of data in the database.
+ * For example, a size amplification of 2% means that a database that
+ * contains 100 bytes of user-data may occupy upto 102 bytes of
+ * physical storage. By this definition, a fully compacted database has
+ * a size amplification of 0%. Rocksdb uses the following heuristic
+ * to calculate size amplification: it assumes that all files excluding
+ * the earliest file contribute to the size amplification.
+ *
+ * Default: 200, which means that a 100 byte database could require upto
+ * 300 bytes of storage.
+ *
+ * @param maxSizeAmplificationPercent the amount of additional storage needed
+ * (as a percentage) to store a single byte in the database
+ *
+ * @return the reference to the current options.
+ */
+ public CompactionOptionsUniversal setMaxSizeAmplificationPercent(
+ final int maxSizeAmplificationPercent) {
+ setMaxSizeAmplificationPercent(nativeHandle_, maxSizeAmplificationPercent);
+ return this;
+ }
+
+ /**
+ * The size amplification is defined as the amount (in percentage) of
+ * additional storage needed to store a single byte of data in the database.
+ * For example, a size amplification of 2% means that a database that
+ * contains 100 bytes of user-data may occupy upto 102 bytes of
+ * physical storage. By this definition, a fully compacted database has
+ * a size amplification of 0%. Rocksdb uses the following heuristic
+ * to calculate size amplification: it assumes that all files excluding
+ * the earliest file contribute to the size amplification.
+ *
+ * Default: 200, which means that a 100 byte database could require upto
+ * 300 bytes of storage.
+ *
+ * @return the amount of additional storage needed (as a percentage) to store
+ * a single byte in the database
+ */
+ public int maxSizeAmplificationPercent() {
+ return maxSizeAmplificationPercent(nativeHandle_);
+ }
+
+ /**
+ * If this option is set to be -1 (the default value), all the output files
+ * will follow compression type specified.
+ *
+ * If this option is not negative, we will try to make sure compressed
+ * size is just above this value. In normal cases, at least this percentage
+ * of data will be compressed.
+ *
+ * When we are compacting to a new file, here is the criteria whether
+ * it needs to be compressed: assuming here are the list of files sorted
+ * by generation time:
+ * A1...An B1...Bm C1...Ct
+ * where A1 is the newest and Ct is the oldest, and we are going to compact
+ * B1...Bm, we calculate the total size of all the files as total_size, as
+ * well as the total size of C1...Ct as total_C, the compaction output file
+ * will be compressed iff
+ * total_C / total_size &lt; this percentage
+ *
+ * Default: -1
+ *
+ * @param compressionSizePercent percentage of size for compression
+ *
+ * @return the reference to the current options.
+ */
+ public CompactionOptionsUniversal setCompressionSizePercent(
+ final int compressionSizePercent) {
+ setCompressionSizePercent(nativeHandle_, compressionSizePercent);
+ return this;
+ }
+
+ /**
+ * If this option is set to be -1 (the default value), all the output files
+ * will follow compression type specified.
+ *
+ * If this option is not negative, we will try to make sure compressed
+ * size is just above this value. In normal cases, at least this percentage
+ * of data will be compressed.
+ *
+ * When we are compacting to a new file, here is the criteria whether
+ * it needs to be compressed: assuming here are the list of files sorted
+ * by generation time:
+ * A1...An B1...Bm C1...Ct
+ * where A1 is the newest and Ct is the oldest, and we are going to compact
+ * B1...Bm, we calculate the total size of all the files as total_size, as
+ * well as the total size of C1...Ct as total_C, the compaction output file
+ * will be compressed iff
+ * total_C / total_size &lt; this percentage
+ *
+ * Default: -1
+ *
+ * @return percentage of size for compression
+ */
+ public int compressionSizePercent() {
+ return compressionSizePercent(nativeHandle_);
+ }
+
+ /**
+ * The algorithm used to stop picking files into a single compaction run
+ *
+ * Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize}
+ *
+ * @param compactionStopStyle The compaction algorithm
+ *
+ * @return the reference to the current options.
+ */
+ public CompactionOptionsUniversal setStopStyle(
+ final CompactionStopStyle compactionStopStyle) {
+ setStopStyle(nativeHandle_, compactionStopStyle.getValue());
+ return this;
+ }
+
+ /**
+ * The algorithm used to stop picking files into a single compaction run
+ *
+ * Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize}
+ *
+ * @return The compaction algorithm
+ */
+ public CompactionStopStyle stopStyle() {
+ return CompactionStopStyle.getCompactionStopStyle(stopStyle(nativeHandle_));
+ }
+
+ /**
+ * Option to optimize the universal multi level compaction by enabling
+ * trivial move for non overlapping files.
+ *
+ * Default: false
+ *
+ * @param allowTrivialMove true if trivial move is allowed
+ *
+ * @return the reference to the current options.
+ */
+ public CompactionOptionsUniversal setAllowTrivialMove(
+ final boolean allowTrivialMove) {
+ setAllowTrivialMove(nativeHandle_, allowTrivialMove);
+ return this;
+ }
+
+ /**
+ * Option to optimize the universal multi level compaction by enabling
+ * trivial move for non overlapping files.
+ *
+ * Default: false
+ *
+ * @return true if trivial move is allowed
+ */
+ public boolean allowTrivialMove() {
+ return allowTrivialMove(nativeHandle_);
+ }
+
+ private native static long newCompactionOptionsUniversal();
+ @Override protected final native void disposeInternal(final long handle);
+
+ private native void setSizeRatio(final long handle, final int sizeRatio);
+ private native int sizeRatio(final long handle);
+ private native void setMinMergeWidth(
+ final long handle, final int minMergeWidth);
+ private native int minMergeWidth(final long handle);
+ private native void setMaxMergeWidth(
+ final long handle, final int maxMergeWidth);
+ private native int maxMergeWidth(final long handle);
+ private native void setMaxSizeAmplificationPercent(
+ final long handle, final int maxSizeAmplificationPercent);
+ private native int maxSizeAmplificationPercent(final long handle);
+ private native void setCompressionSizePercent(
+ final long handle, final int compressionSizePercent);
+ private native int compressionSizePercent(final long handle);
+ private native void setStopStyle(
+ final long handle, final byte stopStyle);
+ private native byte stopStyle(final long handle);
+ private native void setAllowTrivialMove(
+ final long handle, final boolean allowTrivialMove);
+ private native boolean allowTrivialMove(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactionPriority.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionPriority.java
new file mode 100644
index 000000000..a4f53cd64
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionPriority.java
@@ -0,0 +1,73 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Compaction Priorities
+ */
+public enum CompactionPriority {
+
+ /**
+ * Slightly Prioritize larger files by size compensated by #deletes
+ */
+ ByCompensatedSize((byte)0x0),
+
+ /**
+ * First compact files whose data's latest update time is oldest.
+ * Try this if you only update some hot keys in small ranges.
+ */
+ OldestLargestSeqFirst((byte)0x1),
+
+ /**
+ * First compact files whose range hasn't been compacted to the next level
+ * for the longest. If your updates are random across the key space,
+ * write amplification is slightly better with this option.
+ */
+ OldestSmallestSeqFirst((byte)0x2),
+
+ /**
+ * First compact files whose ratio between overlapping size in next level
+ * and its size is the smallest. It in many cases can optimize write
+ * amplification.
+ */
+ MinOverlappingRatio((byte)0x3);
+
+
+ private final byte value;
+
+ CompactionPriority(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Returns the byte value of the enumerations value
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get CompactionPriority by byte value.
+ *
+ * @param value byte representation of CompactionPriority.
+ *
+ * @return {@link org.rocksdb.CompactionPriority} instance or null.
+ * @throws java.lang.IllegalArgumentException if an invalid
+ * value is provided.
+ */
+ public static CompactionPriority getCompactionPriority(final byte value) {
+ for (final CompactionPriority compactionPriority :
+ CompactionPriority.values()) {
+ if (compactionPriority.getValue() == value){
+ return compactionPriority;
+ }
+ }
+ throw new IllegalArgumentException(
+ "Illegal value provided for CompactionPriority.");
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactionReason.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionReason.java
new file mode 100644
index 000000000..f18c48122
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionReason.java
@@ -0,0 +1,115 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+public enum CompactionReason {
+ kUnknown((byte)0x0),
+
+ /**
+ * [Level] number of L0 files &gt; level0_file_num_compaction_trigger
+ */
+ kLevelL0FilesNum((byte)0x1),
+
+ /**
+ * [Level] total size of level &gt; MaxBytesForLevel()
+ */
+ kLevelMaxLevelSize((byte)0x2),
+
+ /**
+ * [Universal] Compacting for size amplification
+ */
+ kUniversalSizeAmplification((byte)0x3),
+
+ /**
+ * [Universal] Compacting for size ratio
+ */
+ kUniversalSizeRatio((byte)0x4),
+
+ /**
+ * [Universal] number of sorted runs &gt; level0_file_num_compaction_trigger
+ */
+ kUniversalSortedRunNum((byte)0x5),
+
+ /**
+ * [FIFO] total size &gt; max_table_files_size
+ */
+ kFIFOMaxSize((byte)0x6),
+
+ /**
+ * [FIFO] reduce number of files.
+ */
+ kFIFOReduceNumFiles((byte)0x7),
+
+ /**
+ * [FIFO] files with creation time &lt; (current_time - interval)
+ */
+ kFIFOTtl((byte)0x8),
+
+ /**
+ * Manual compaction
+ */
+ kManualCompaction((byte)0x9),
+
+ /**
+ * DB::SuggestCompactRange() marked files for compaction
+ */
+ kFilesMarkedForCompaction((byte)0x10),
+
+ /**
+ * [Level] Automatic compaction within bottommost level to cleanup duplicate
+ * versions of same user key, usually due to a released snapshot.
+ */
+ kBottommostFiles((byte)0x0A),
+
+ /**
+ * Compaction based on TTL
+ */
+ kTtl((byte)0x0B),
+
+ /**
+ * According to the comments in flush_job.cc, RocksDB treats flush as
+ * a level 0 compaction in internal stats.
+ */
+ kFlush((byte)0x0C),
+
+ /**
+ * Compaction caused by external sst file ingestion
+ */
+ kExternalSstIngestion((byte)0x0D);
+
+ private final byte value;
+
+ CompactionReason(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the internal representation value.
+ *
+ * @return the internal representation value
+ */
+ byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get the CompactionReason from the internal representation value.
+ *
+ * @return the compaction reason.
+ *
+ * @throws IllegalArgumentException if the value is unknown.
+ */
+ static CompactionReason fromValue(final byte value) {
+ for (final CompactionReason compactionReason : CompactionReason.values()) {
+ if(compactionReason.value == value) {
+ return compactionReason;
+ }
+ }
+
+ throw new IllegalArgumentException(
+ "Illegal value provided for CompactionReason: " + value);
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactionStopStyle.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionStopStyle.java
new file mode 100644
index 000000000..f6e63209c
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionStopStyle.java
@@ -0,0 +1,55 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+/**
+ * Algorithm used to make a compaction request stop picking new files
+ * into a single compaction run
+ */
+public enum CompactionStopStyle {
+
+ /**
+ * Pick files of similar size
+ */
+ CompactionStopStyleSimilarSize((byte)0x0),
+
+ /**
+ * Total size of picked files &gt; next file
+ */
+ CompactionStopStyleTotalSize((byte)0x1);
+
+
+ private final byte value;
+
+ CompactionStopStyle(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Returns the byte value of the enumerations value
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get CompactionStopStyle by byte value.
+ *
+ * @param value byte representation of CompactionStopStyle.
+ *
+ * @return {@link org.rocksdb.CompactionStopStyle} instance or null.
+ * @throws java.lang.IllegalArgumentException if an invalid
+ * value is provided.
+ */
+ public static CompactionStopStyle getCompactionStopStyle(final byte value) {
+ for (final CompactionStopStyle compactionStopStyle :
+ CompactionStopStyle.values()) {
+ if (compactionStopStyle.getValue() == value){
+ return compactionStopStyle;
+ }
+ }
+ throw new IllegalArgumentException(
+ "Illegal value provided for CompactionStopStyle.");
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactionStyle.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionStyle.java
new file mode 100644
index 000000000..b24bbf850
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionStyle.java
@@ -0,0 +1,80 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.List;
+
+/**
+ * Enum CompactionStyle
+ *
+ * RocksDB supports different styles of compaction. Available
+ * compaction styles can be chosen using this enumeration.
+ *
+ * <ol>
+ * <li><strong>LEVEL</strong> - Level based Compaction style</li>
+ * <li><strong>UNIVERSAL</strong> - Universal Compaction Style is a
+ * compaction style, targeting the use cases requiring lower write
+ * amplification, trading off read amplification and space
+ * amplification.</li>
+ * <li><strong>FIFO</strong> - FIFO compaction style is the simplest
+ * compaction strategy. It is suited for keeping event log data with
+ * very low overhead (query log for example). It periodically deletes
+ * the old data, so it's basically a TTL compaction style.</li>
+ * <li><strong>NONE</strong> - Disable background compaction.
+ * Compaction jobs are submitted
+ * {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int, CompactionJobInfo)} ()}.</li>
+ * </ol>
+ *
+ * @see <a
+ * href="https://github.com/facebook/rocksdb/wiki/Universal-Compaction">
+ * Universal Compaction</a>
+ * @see <a
+ * href="https://github.com/facebook/rocksdb/wiki/FIFO-compaction-style">
+ * FIFO Compaction</a>
+ */
+public enum CompactionStyle {
+ LEVEL((byte) 0x0),
+ UNIVERSAL((byte) 0x1),
+ FIFO((byte) 0x2),
+ NONE((byte) 0x3);
+
+ private final byte value;
+
+ CompactionStyle(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the internal representation value.
+ *
+ * @return the internal representation value.
+ */
+ //TODO(AR) should be made package-private
+ public byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get the Compaction style from the internal representation value.
+ *
+ * @param value the internal representation value.
+ *
+ * @return the Compaction style
+ *
+ * @throws IllegalArgumentException if the value does not match a
+ * CompactionStyle
+ */
+ static CompactionStyle fromValue(final byte value)
+ throws IllegalArgumentException {
+ for (final CompactionStyle compactionStyle : CompactionStyle.values()) {
+ if (compactionStyle.value == value) {
+ return compactionStyle;
+ }
+ }
+ throw new IllegalArgumentException("Unknown value for CompactionStyle: "
+ + value);
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ComparatorOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/ComparatorOptions.java
new file mode 100644
index 000000000..8c3162858
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/ComparatorOptions.java
@@ -0,0 +1,133 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+/**
+ * This class controls the behaviour
+ * of Java implementations of
+ * AbstractComparator
+ *
+ * Note that dispose() must be called before a ComparatorOptions
+ * instance becomes out-of-scope to release the allocated memory in C++.
+ */
+public class ComparatorOptions extends RocksObject {
+ public ComparatorOptions() {
+ super(newComparatorOptions());
+ }
+
+ /**
+ * Get the synchronisation type used to guard the reused buffers.
+ * Only used if {@link #maxReusedBufferSize()} &gt; 0
+ * Default: {@link ReusedSynchronisationType#ADAPTIVE_MUTEX}
+ *
+ * @return the synchronisation type
+ */
+ public ReusedSynchronisationType reusedSynchronisationType() {
+ assert(isOwningHandle());
+ return ReusedSynchronisationType.getReusedSynchronisationType(
+ reusedSynchronisationType(nativeHandle_));
+ }
+
+ /**
+ * Set the synchronisation type used to guard the reused buffers.
+ * Only used if {@link #maxReusedBufferSize()} &gt; 0
+ * Default: {@link ReusedSynchronisationType#ADAPTIVE_MUTEX}
+ *
+ * @param reusedSynchronisationType the synchronisation type
+ *
+ * @return the reference to the current comparator options.
+ */
+ public ComparatorOptions setReusedSynchronisationType(
+ final ReusedSynchronisationType reusedSynchronisationType) {
+ assert (isOwningHandle());
+ setReusedSynchronisationType(nativeHandle_,
+ reusedSynchronisationType.getValue());
+ return this;
+ }
+
+ /**
+ * Indicates if a direct byte buffer (i.e. outside of the normal
+ * garbage-collected heap) is used, as opposed to a non-direct byte buffer
+ * which is a wrapper around an on-heap byte[].
+ *
+ * Default: true
+ *
+ * @return true if a direct byte buffer will be used, false otherwise
+ */
+ public boolean useDirectBuffer() {
+ assert(isOwningHandle());
+ return useDirectBuffer(nativeHandle_);
+ }
+
+ /**
+ * Controls whether a direct byte buffer (i.e. outside of the normal
+ * garbage-collected heap) is used, as opposed to a non-direct byte buffer
+ * which is a wrapper around an on-heap byte[].
+ *
+ * Default: true
+ *
+ * @param useDirectBuffer true if a direct byte buffer should be used,
+ * false otherwise
+ * @return the reference to the current comparator options.
+ */
+ public ComparatorOptions setUseDirectBuffer(final boolean useDirectBuffer) {
+ assert(isOwningHandle());
+ setUseDirectBuffer(nativeHandle_, useDirectBuffer);
+ return this;
+ }
+
+ /**
+ * Maximum size of a buffer (in bytes) that will be reused.
+ * Comparators will use 5 of these buffers,
+ * so the retained memory size will be 5 * max_reused_buffer_size.
+ * When a buffer is needed for transferring data to a callback,
+ * if it requires less than {@code maxReuseBufferSize}, then an
+ * existing buffer will be reused, else a new buffer will be
+ * allocated just for that callback.
+ *
+ * Default: 64 bytes
+ *
+ * @return the maximum size of a buffer which is reused,
+ * or 0 if reuse is disabled
+ */
+ public int maxReusedBufferSize() {
+ assert(isOwningHandle());
+ return maxReusedBufferSize(nativeHandle_);
+ }
+
+ /**
+ * Sets the maximum size of a buffer (in bytes) that will be reused.
+ * Comparators will use 5 of these buffers,
+ * so the retained memory size will be 5 * max_reused_buffer_size.
+ * When a buffer is needed for transferring data to a callback,
+ * if it requires less than {@code maxReuseBufferSize}, then an
+ * existing buffer will be reused, else a new buffer will be
+ * allocated just for that callback.
+ *
+ * Default: 64 bytes
+ *
+ * @param maxReusedBufferSize the maximum size for a buffer to reuse, or 0 to
+ * disable reuse
+ *
+ * @return the maximum size of a buffer which is reused
+ */
+ public ComparatorOptions setMaxReusedBufferSize(final int maxReusedBufferSize) {
+ assert(isOwningHandle());
+ setMaxReusedBufferSize(nativeHandle_, maxReusedBufferSize);
+ return this;
+ }
+
+ private native static long newComparatorOptions();
+ private native byte reusedSynchronisationType(final long handle);
+ private native void setReusedSynchronisationType(final long handle,
+ final byte reusedSynchronisationType);
+ private native boolean useDirectBuffer(final long handle);
+ private native void setUseDirectBuffer(final long handle,
+ final boolean useDirectBuffer);
+ private native int maxReusedBufferSize(final long handle);
+ private native void setMaxReusedBufferSize(final long handle,
+ final int maxReuseBufferSize);
+ @Override protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ComparatorType.java b/src/rocksdb/java/src/main/java/org/rocksdb/ComparatorType.java
new file mode 100644
index 000000000..199980b6e
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/ComparatorType.java
@@ -0,0 +1,48 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+enum ComparatorType {
+ JAVA_COMPARATOR((byte)0x0),
+ JAVA_NATIVE_COMPARATOR_WRAPPER((byte)0x1);
+
+ private final byte value;
+
+ ComparatorType(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * <p>Returns the byte value of the enumerations value.</p>
+ *
+ * @return byte representation
+ */
+ byte getValue() {
+ return value;
+ }
+
+ /**
+ * <p>Get the ComparatorType enumeration value by
+ * passing the byte identifier to this method.</p>
+ *
+ * @param byteIdentifier of ComparatorType.
+ *
+ * @return ComparatorType instance.
+ *
+ * @throws IllegalArgumentException if the comparator type for the byteIdentifier
+ * cannot be found
+ */
+ static ComparatorType getComparatorType(final byte byteIdentifier) {
+ for (final ComparatorType comparatorType : ComparatorType.values()) {
+ if (comparatorType.getValue() == byteIdentifier) {
+ return comparatorType;
+ }
+ }
+
+ throw new IllegalArgumentException(
+ "Illegal value provided for ComparatorType.");
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompressionOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompressionOptions.java
new file mode 100644
index 000000000..a9072bbb9
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompressionOptions.java
@@ -0,0 +1,151 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Options for Compression
+ */
+public class CompressionOptions extends RocksObject {
+
+ public CompressionOptions() {
+ super(newCompressionOptions());
+ }
+
+ public CompressionOptions setWindowBits(final int windowBits) {
+ setWindowBits(nativeHandle_, windowBits);
+ return this;
+ }
+
+ public int windowBits() {
+ return windowBits(nativeHandle_);
+ }
+
+ public CompressionOptions setLevel(final int level) {
+ setLevel(nativeHandle_, level);
+ return this;
+ }
+
+ public int level() {
+ return level(nativeHandle_);
+ }
+
+ public CompressionOptions setStrategy(final int strategy) {
+ setStrategy(nativeHandle_, strategy);
+ return this;
+ }
+
+ public int strategy() {
+ return strategy(nativeHandle_);
+ }
+
+ /**
+ * Maximum size of dictionary used to prime the compression library. Currently
+ * this dictionary will be constructed by sampling the first output file in a
+ * subcompaction when the target level is bottommost. This dictionary will be
+ * loaded into the compression library before compressing/uncompressing each
+ * data block of subsequent files in the subcompaction. Effectively, this
+ * improves compression ratios when there are repetitions across data blocks.
+ *
+ * A value of 0 indicates the feature is disabled.
+ *
+ * Default: 0.
+ *
+ * @param maxDictBytes Maximum bytes to use for the dictionary
+ *
+ * @return the reference to the current options
+ */
+ public CompressionOptions setMaxDictBytes(final int maxDictBytes) {
+ setMaxDictBytes(nativeHandle_, maxDictBytes);
+ return this;
+ }
+
+ /**
+ * Maximum size of dictionary used to prime the compression library.
+ *
+ * @return The maximum bytes to use for the dictionary
+ */
+ public int maxDictBytes() {
+ return maxDictBytes(nativeHandle_);
+ }
+
+ /**
+ * Maximum size of training data passed to zstd's dictionary trainer. Using
+ * zstd's dictionary trainer can achieve even better compression ratio
+ * improvements than using {@link #setMaxDictBytes(int)} alone.
+ *
+ * The training data will be used to generate a dictionary
+ * of {@link #maxDictBytes()}.
+ *
+ * Default: 0.
+ *
+ * @param zstdMaxTrainBytes Maximum bytes to use for training ZStd.
+ *
+ * @return the reference to the current options
+ */
+ public CompressionOptions setZStdMaxTrainBytes(final int zstdMaxTrainBytes) {
+ setZstdMaxTrainBytes(nativeHandle_, zstdMaxTrainBytes);
+ return this;
+ }
+
+ /**
+ * Maximum size of training data passed to zstd's dictionary trainer.
+ *
+ * @return Maximum bytes to use for training ZStd
+ */
+ public int zstdMaxTrainBytes() {
+ return zstdMaxTrainBytes(nativeHandle_);
+ }
+
+ /**
+ * When the compression options are set by the user, it will be set to "true".
+ * For bottommost_compression_opts, to enable it, user must set enabled=true.
+ * Otherwise, bottommost compression will use compression_opts as default
+ * compression options.
+ *
+ * For compression_opts, if compression_opts.enabled=false, it is still
+ * used as compression options for compression process.
+ *
+ * Default: false.
+ *
+ * @param enabled true to use these compression options
+ * for the bottommost_compression_opts, false otherwise
+ *
+ * @return the reference to the current options
+ */
+ public CompressionOptions setEnabled(final boolean enabled) {
+ setEnabled(nativeHandle_, enabled);
+ return this;
+ }
+
+ /**
+ * Determine whether these compression options
+ * are used for the bottommost_compression_opts.
+ *
+ * @return true if these compression options are used
+ * for the bottommost_compression_opts, false otherwise
+ */
+ public boolean enabled() {
+ return enabled(nativeHandle_);
+ }
+
+
+ private native static long newCompressionOptions();
+ @Override protected final native void disposeInternal(final long handle);
+
+ private native void setWindowBits(final long handle, final int windowBits);
+ private native int windowBits(final long handle);
+ private native void setLevel(final long handle, final int level);
+ private native int level(final long handle);
+ private native void setStrategy(final long handle, final int strategy);
+ private native int strategy(final long handle);
+ private native void setMaxDictBytes(final long handle, final int maxDictBytes);
+ private native int maxDictBytes(final long handle);
+ private native void setZstdMaxTrainBytes(final long handle,
+ final int zstdMaxTrainBytes);
+ private native int zstdMaxTrainBytes(final long handle);
+ private native void setEnabled(final long handle, final boolean enabled);
+ private native boolean enabled(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompressionType.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompressionType.java
new file mode 100644
index 000000000..2781537c8
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompressionType.java
@@ -0,0 +1,99 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Enum CompressionType
+ *
+ * <p>DB contents are stored in a set of blocks, each of which holds a
+ * sequence of key,value pairs. Each block may be compressed before
+ * being stored in a file. The following enum describes which
+ * compression method (if any) is used to compress a block.</p>
+ */
+public enum CompressionType {
+
+ NO_COMPRESSION((byte) 0x0, null),
+ SNAPPY_COMPRESSION((byte) 0x1, "snappy"),
+ ZLIB_COMPRESSION((byte) 0x2, "z"),
+ BZLIB2_COMPRESSION((byte) 0x3, "bzip2"),
+ LZ4_COMPRESSION((byte) 0x4, "lz4"),
+ LZ4HC_COMPRESSION((byte) 0x5, "lz4hc"),
+ XPRESS_COMPRESSION((byte) 0x6, "xpress"),
+ ZSTD_COMPRESSION((byte)0x7, "zstd"),
+ DISABLE_COMPRESSION_OPTION((byte)0x7F, null);
+
+ /**
+ * <p>Get the CompressionType enumeration value by
+ * passing the library name to this method.</p>
+ *
+ * <p>If library cannot be found the enumeration
+ * value {@code NO_COMPRESSION} will be returned.</p>
+ *
+ * @param libraryName compression library name.
+ *
+ * @return CompressionType instance.
+ */
+ public static CompressionType getCompressionType(String libraryName) {
+ if (libraryName != null) {
+ for (CompressionType compressionType : CompressionType.values()) {
+ if (compressionType.getLibraryName() != null &&
+ compressionType.getLibraryName().equals(libraryName)) {
+ return compressionType;
+ }
+ }
+ }
+ return CompressionType.NO_COMPRESSION;
+ }
+
+ /**
+ * <p>Get the CompressionType enumeration value by
+ * passing the byte identifier to this method.</p>
+ *
+ * @param byteIdentifier of CompressionType.
+ *
+ * @return CompressionType instance.
+ *
+ * @throws IllegalArgumentException If CompressionType cannot be found for the
+ * provided byteIdentifier
+ */
+ public static CompressionType getCompressionType(byte byteIdentifier) {
+ for (final CompressionType compressionType : CompressionType.values()) {
+ if (compressionType.getValue() == byteIdentifier) {
+ return compressionType;
+ }
+ }
+
+ throw new IllegalArgumentException(
+ "Illegal value provided for CompressionType.");
+ }
+
+ /**
+ * <p>Returns the byte value of the enumerations value.</p>
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value_;
+ }
+
+ /**
+ * <p>Returns the library name of the compression type
+ * identified by the enumeration value.</p>
+ *
+ * @return library name
+ */
+ public String getLibraryName() {
+ return libraryName_;
+ }
+
+ CompressionType(final byte value, final String libraryName) {
+ value_ = value;
+ libraryName_ = libraryName;
+ }
+
+ private final byte value_;
+ private final String libraryName_;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/DBOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/DBOptions.java
new file mode 100644
index 000000000..36cc4abd9
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/DBOptions.java
@@ -0,0 +1,1403 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.file.Paths;
+import java.util.*;
+
+/**
+ * DBOptions to control the behavior of a database. It will be used
+ * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
+ *
+ * If {@link #dispose()} function is not called, then it will be GC'd
+ * automatically and native resources will be released as part of the process.
+ */
+public class DBOptions extends RocksObject
+ implements DBOptionsInterface<DBOptions>,
+ MutableDBOptionsInterface<DBOptions> {
+ static {
+ RocksDB.loadLibrary();
+ }
+
+ /**
+ * Construct DBOptions.
+ *
+ * This constructor will create (by allocating a block of memory)
+ * an {@code rocksdb::DBOptions} in the c++ side.
+ */
+ public DBOptions() {
+ super(newDBOptions());
+ numShardBits_ = DEFAULT_NUM_SHARD_BITS;
+ }
+
+ /**
+ * Copy constructor for DBOptions.
+ *
+ * NOTE: This does a shallow copy, which means env, rate_limiter, sst_file_manager,
+ * info_log and other pointers will be cloned!
+ *
+ * @param other The DBOptions to copy.
+ */
+ public DBOptions(DBOptions other) {
+ super(copyDBOptions(other.nativeHandle_));
+ this.env_ = other.env_;
+ this.numShardBits_ = other.numShardBits_;
+ this.rateLimiter_ = other.rateLimiter_;
+ this.rowCache_ = other.rowCache_;
+ this.walFilter_ = other.walFilter_;
+ this.writeBufferManager_ = other.writeBufferManager_;
+ }
+
+ /**
+ * Constructor from Options
+ *
+ * @param options The options.
+ */
+ public DBOptions(final Options options) {
+ super(newDBOptionsFromOptions(options.nativeHandle_));
+ }
+
+ /**
+ * <p>Method to get a options instance by using pre-configured
+ * property values. If one or many values are undefined in
+ * the context of RocksDB the method will return a null
+ * value.</p>
+ *
+ * <p><strong>Note</strong>: Property keys can be derived from
+ * getter methods within the options class. Example: the method
+ * {@code allowMmapReads()} has a property key:
+ * {@code allow_mmap_reads}.</p>
+ *
+ * @param properties {@link java.util.Properties} instance.
+ *
+ * @return {@link org.rocksdb.DBOptions instance}
+ * or null.
+ *
+ * @throws java.lang.IllegalArgumentException if null or empty
+ * {@link java.util.Properties} instance is passed to the method call.
+ */
+ public static DBOptions getDBOptionsFromProps(
+ final Properties properties) {
+ if (properties == null || properties.size() == 0) {
+ throw new IllegalArgumentException(
+ "Properties value must contain at least one value.");
+ }
+ DBOptions dbOptions = null;
+ StringBuilder stringBuilder = new StringBuilder();
+ for (final String name : properties.stringPropertyNames()){
+ stringBuilder.append(name);
+ stringBuilder.append("=");
+ stringBuilder.append(properties.getProperty(name));
+ stringBuilder.append(";");
+ }
+ long handle = getDBOptionsFromProps(
+ stringBuilder.toString());
+ if (handle != 0){
+ dbOptions = new DBOptions(handle);
+ }
+ return dbOptions;
+ }
+
+ @Override
+ public DBOptions optimizeForSmallDb() {
+ optimizeForSmallDb(nativeHandle_);
+ return this;
+ }
+
+ @Override
+ public DBOptions setIncreaseParallelism(
+ final int totalThreads) {
+ assert(isOwningHandle());
+ setIncreaseParallelism(nativeHandle_, totalThreads);
+ return this;
+ }
+
+ @Override
+ public DBOptions setCreateIfMissing(final boolean flag) {
+ assert(isOwningHandle());
+ setCreateIfMissing(nativeHandle_, flag);
+ return this;
+ }
+
+ @Override
+ public boolean createIfMissing() {
+ assert(isOwningHandle());
+ return createIfMissing(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setCreateMissingColumnFamilies(
+ final boolean flag) {
+ assert(isOwningHandle());
+ setCreateMissingColumnFamilies(nativeHandle_, flag);
+ return this;
+ }
+
+ @Override
+ public boolean createMissingColumnFamilies() {
+ assert(isOwningHandle());
+ return createMissingColumnFamilies(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setErrorIfExists(
+ final boolean errorIfExists) {
+ assert(isOwningHandle());
+ setErrorIfExists(nativeHandle_, errorIfExists);
+ return this;
+ }
+
+ @Override
+ public boolean errorIfExists() {
+ assert(isOwningHandle());
+ return errorIfExists(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setParanoidChecks(
+ final boolean paranoidChecks) {
+ assert(isOwningHandle());
+ setParanoidChecks(nativeHandle_, paranoidChecks);
+ return this;
+ }
+
+ @Override
+ public boolean paranoidChecks() {
+ assert(isOwningHandle());
+ return paranoidChecks(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setEnv(final Env env) {
+ setEnv(nativeHandle_, env.nativeHandle_);
+ this.env_ = env;
+ return this;
+ }
+
+ @Override
+ public Env getEnv() {
+ return env_;
+ }
+
+ @Override
+ public DBOptions setRateLimiter(final RateLimiter rateLimiter) {
+ assert(isOwningHandle());
+ rateLimiter_ = rateLimiter;
+ setRateLimiter(nativeHandle_, rateLimiter.nativeHandle_);
+ return this;
+ }
+
+ @Override
+ public DBOptions setSstFileManager(final SstFileManager sstFileManager) {
+ assert(isOwningHandle());
+ setSstFileManager(nativeHandle_, sstFileManager.nativeHandle_);
+ return this;
+ }
+
+ @Override
+ public DBOptions setLogger(final Logger logger) {
+ assert(isOwningHandle());
+ setLogger(nativeHandle_, logger.nativeHandle_);
+ return this;
+ }
+
+ @Override
+ public DBOptions setInfoLogLevel(
+ final InfoLogLevel infoLogLevel) {
+ assert(isOwningHandle());
+ setInfoLogLevel(nativeHandle_, infoLogLevel.getValue());
+ return this;
+ }
+
+ @Override
+ public InfoLogLevel infoLogLevel() {
+ assert(isOwningHandle());
+ return InfoLogLevel.getInfoLogLevel(
+ infoLogLevel(nativeHandle_));
+ }
+
+ @Override
+ public DBOptions setMaxOpenFiles(
+ final int maxOpenFiles) {
+ assert(isOwningHandle());
+ setMaxOpenFiles(nativeHandle_, maxOpenFiles);
+ return this;
+ }
+
+ @Override
+ public int maxOpenFiles() {
+ assert(isOwningHandle());
+ return maxOpenFiles(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setMaxFileOpeningThreads(final int maxFileOpeningThreads) {
+ assert(isOwningHandle());
+ setMaxFileOpeningThreads(nativeHandle_, maxFileOpeningThreads);
+ return this;
+ }
+
+ @Override
+ public int maxFileOpeningThreads() {
+ assert(isOwningHandle());
+ return maxFileOpeningThreads(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setMaxTotalWalSize(
+ final long maxTotalWalSize) {
+ assert(isOwningHandle());
+ setMaxTotalWalSize(nativeHandle_, maxTotalWalSize);
+ return this;
+ }
+
+ @Override
+ public long maxTotalWalSize() {
+ assert(isOwningHandle());
+ return maxTotalWalSize(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setStatistics(final Statistics statistics) {
+ assert(isOwningHandle());
+ setStatistics(nativeHandle_, statistics.nativeHandle_);
+ return this;
+ }
+
+ @Override
+ public Statistics statistics() {
+ assert(isOwningHandle());
+ final long statisticsNativeHandle = statistics(nativeHandle_);
+ if(statisticsNativeHandle == 0) {
+ return null;
+ } else {
+ return new Statistics(statisticsNativeHandle);
+ }
+ }
+
+ @Override
+ public DBOptions setUseFsync(
+ final boolean useFsync) {
+ assert(isOwningHandle());
+ setUseFsync(nativeHandle_, useFsync);
+ return this;
+ }
+
+ @Override
+ public boolean useFsync() {
+ assert(isOwningHandle());
+ return useFsync(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setDbPaths(final Collection<DbPath> dbPaths) {
+ assert(isOwningHandle());
+
+ final int len = dbPaths.size();
+ final String[] paths = new String[len];
+ final long[] targetSizes = new long[len];
+
+ int i = 0;
+ for(final DbPath dbPath : dbPaths) {
+ paths[i] = dbPath.path.toString();
+ targetSizes[i] = dbPath.targetSize;
+ i++;
+ }
+ setDbPaths(nativeHandle_, paths, targetSizes);
+ return this;
+ }
+
+ @Override
+ public List<DbPath> dbPaths() {
+ final int len = (int)dbPathsLen(nativeHandle_);
+ if(len == 0) {
+ return Collections.emptyList();
+ } else {
+ final String[] paths = new String[len];
+ final long[] targetSizes = new long[len];
+
+ dbPaths(nativeHandle_, paths, targetSizes);
+
+ final List<DbPath> dbPaths = new ArrayList<>();
+ for(int i = 0; i < len; i++) {
+ dbPaths.add(new DbPath(Paths.get(paths[i]), targetSizes[i]));
+ }
+ return dbPaths;
+ }
+ }
+
+ @Override
+ public DBOptions setDbLogDir(
+ final String dbLogDir) {
+ assert(isOwningHandle());
+ setDbLogDir(nativeHandle_, dbLogDir);
+ return this;
+ }
+
+ @Override
+ public String dbLogDir() {
+ assert(isOwningHandle());
+ return dbLogDir(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setWalDir(
+ final String walDir) {
+ assert(isOwningHandle());
+ setWalDir(nativeHandle_, walDir);
+ return this;
+ }
+
+ @Override
+ public String walDir() {
+ assert(isOwningHandle());
+ return walDir(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setDeleteObsoleteFilesPeriodMicros(
+ final long micros) {
+ assert(isOwningHandle());
+ setDeleteObsoleteFilesPeriodMicros(nativeHandle_, micros);
+ return this;
+ }
+
+ @Override
+ public long deleteObsoleteFilesPeriodMicros() {
+ assert(isOwningHandle());
+ return deleteObsoleteFilesPeriodMicros(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setMaxBackgroundJobs(final int maxBackgroundJobs) {
+ assert(isOwningHandle());
+ setMaxBackgroundJobs(nativeHandle_, maxBackgroundJobs);
+ return this;
+ }
+
+ @Override
+ public int maxBackgroundJobs() {
+ assert(isOwningHandle());
+ return maxBackgroundJobs(nativeHandle_);
+ }
+
+ @Override
+ @Deprecated
+ public void setBaseBackgroundCompactions(
+ final int baseBackgroundCompactions) {
+ assert(isOwningHandle());
+ setBaseBackgroundCompactions(nativeHandle_, baseBackgroundCompactions);
+ }
+
+ @Override
+ public int baseBackgroundCompactions() {
+ assert(isOwningHandle());
+ return baseBackgroundCompactions(nativeHandle_);
+ }
+
+ @Override
+ @Deprecated
+ public DBOptions setMaxBackgroundCompactions(
+ final int maxBackgroundCompactions) {
+ assert(isOwningHandle());
+ setMaxBackgroundCompactions(nativeHandle_, maxBackgroundCompactions);
+ return this;
+ }
+
+ @Override
+ @Deprecated
+ public int maxBackgroundCompactions() {
+ assert(isOwningHandle());
+ return maxBackgroundCompactions(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setMaxSubcompactions(final int maxSubcompactions) {
+ assert(isOwningHandle());
+ setMaxSubcompactions(nativeHandle_, maxSubcompactions);
+ return this;
+ }
+
+ @Override
+ public int maxSubcompactions() {
+ assert(isOwningHandle());
+ return maxSubcompactions(nativeHandle_);
+ }
+
+ @Override
+ @Deprecated
+ public DBOptions setMaxBackgroundFlushes(
+ final int maxBackgroundFlushes) {
+ assert(isOwningHandle());
+ setMaxBackgroundFlushes(nativeHandle_, maxBackgroundFlushes);
+ return this;
+ }
+
+ @Override
+ @Deprecated
+ public int maxBackgroundFlushes() {
+ assert(isOwningHandle());
+ return maxBackgroundFlushes(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setMaxLogFileSize(final long maxLogFileSize) {
+ assert(isOwningHandle());
+ setMaxLogFileSize(nativeHandle_, maxLogFileSize);
+ return this;
+ }
+
+ @Override
+ public long maxLogFileSize() {
+ assert(isOwningHandle());
+ return maxLogFileSize(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setLogFileTimeToRoll(
+ final long logFileTimeToRoll) {
+ assert(isOwningHandle());
+ setLogFileTimeToRoll(nativeHandle_, logFileTimeToRoll);
+ return this;
+ }
+
+ @Override
+ public long logFileTimeToRoll() {
+ assert(isOwningHandle());
+ return logFileTimeToRoll(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setKeepLogFileNum(
+ final long keepLogFileNum) {
+ assert(isOwningHandle());
+ setKeepLogFileNum(nativeHandle_, keepLogFileNum);
+ return this;
+ }
+
+ @Override
+ public long keepLogFileNum() {
+ assert(isOwningHandle());
+ return keepLogFileNum(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setRecycleLogFileNum(final long recycleLogFileNum) {
+ assert(isOwningHandle());
+ setRecycleLogFileNum(nativeHandle_, recycleLogFileNum);
+ return this;
+ }
+
+ @Override
+ public long recycleLogFileNum() {
+ assert(isOwningHandle());
+ return recycleLogFileNum(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setMaxManifestFileSize(
+ final long maxManifestFileSize) {
+ assert(isOwningHandle());
+ setMaxManifestFileSize(nativeHandle_, maxManifestFileSize);
+ return this;
+ }
+
+ @Override
+ public long maxManifestFileSize() {
+ assert(isOwningHandle());
+ return maxManifestFileSize(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setTableCacheNumshardbits(
+ final int tableCacheNumshardbits) {
+ assert(isOwningHandle());
+ setTableCacheNumshardbits(nativeHandle_, tableCacheNumshardbits);
+ return this;
+ }
+
+ @Override
+ public int tableCacheNumshardbits() {
+ assert(isOwningHandle());
+ return tableCacheNumshardbits(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setWalTtlSeconds(
+ final long walTtlSeconds) {
+ assert(isOwningHandle());
+ setWalTtlSeconds(nativeHandle_, walTtlSeconds);
+ return this;
+ }
+
+ @Override
+ public long walTtlSeconds() {
+ assert(isOwningHandle());
+ return walTtlSeconds(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setWalSizeLimitMB(
+ final long sizeLimitMB) {
+ assert(isOwningHandle());
+ setWalSizeLimitMB(nativeHandle_, sizeLimitMB);
+ return this;
+ }
+
+ @Override
+ public long walSizeLimitMB() {
+ assert(isOwningHandle());
+ return walSizeLimitMB(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setManifestPreallocationSize(
+ final long size) {
+ assert(isOwningHandle());
+ setManifestPreallocationSize(nativeHandle_, size);
+ return this;
+ }
+
+ @Override
+ public long manifestPreallocationSize() {
+ assert(isOwningHandle());
+ return manifestPreallocationSize(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setAllowMmapReads(
+ final boolean allowMmapReads) {
+ assert(isOwningHandle());
+ setAllowMmapReads(nativeHandle_, allowMmapReads);
+ return this;
+ }
+
+ @Override
+ public boolean allowMmapReads() {
+ assert(isOwningHandle());
+ return allowMmapReads(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setAllowMmapWrites(
+ final boolean allowMmapWrites) {
+ assert(isOwningHandle());
+ setAllowMmapWrites(nativeHandle_, allowMmapWrites);
+ return this;
+ }
+
+ @Override
+ public boolean allowMmapWrites() {
+ assert(isOwningHandle());
+ return allowMmapWrites(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setUseDirectReads(
+ final boolean useDirectReads) {
+ assert(isOwningHandle());
+ setUseDirectReads(nativeHandle_, useDirectReads);
+ return this;
+ }
+
+ @Override
+ public boolean useDirectReads() {
+ assert(isOwningHandle());
+ return useDirectReads(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setUseDirectIoForFlushAndCompaction(
+ final boolean useDirectIoForFlushAndCompaction) {
+ assert(isOwningHandle());
+ setUseDirectIoForFlushAndCompaction(nativeHandle_,
+ useDirectIoForFlushAndCompaction);
+ return this;
+ }
+
+ @Override
+ public boolean useDirectIoForFlushAndCompaction() {
+ assert(isOwningHandle());
+ return useDirectIoForFlushAndCompaction(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setAllowFAllocate(final boolean allowFAllocate) {
+ assert(isOwningHandle());
+ setAllowFAllocate(nativeHandle_, allowFAllocate);
+ return this;
+ }
+
+ @Override
+ public boolean allowFAllocate() {
+ assert(isOwningHandle());
+ return allowFAllocate(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setIsFdCloseOnExec(
+ final boolean isFdCloseOnExec) {
+ assert(isOwningHandle());
+ setIsFdCloseOnExec(nativeHandle_, isFdCloseOnExec);
+ return this;
+ }
+
+ @Override
+ public boolean isFdCloseOnExec() {
+ assert(isOwningHandle());
+ return isFdCloseOnExec(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setStatsDumpPeriodSec(
+ final int statsDumpPeriodSec) {
+ assert(isOwningHandle());
+ setStatsDumpPeriodSec(nativeHandle_, statsDumpPeriodSec);
+ return this;
+ }
+
+ @Override
+ public int statsDumpPeriodSec() {
+ assert(isOwningHandle());
+ return statsDumpPeriodSec(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setStatsPersistPeriodSec(
+ final int statsPersistPeriodSec) {
+ assert(isOwningHandle());
+ setStatsPersistPeriodSec(nativeHandle_, statsPersistPeriodSec);
+ return this;
+ }
+
+ @Override
+ public int statsPersistPeriodSec() {
+ assert(isOwningHandle());
+ return statsPersistPeriodSec(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setStatsHistoryBufferSize(
+ final long statsHistoryBufferSize) {
+ assert(isOwningHandle());
+ setStatsHistoryBufferSize(nativeHandle_, statsHistoryBufferSize);
+ return this;
+ }
+
+ @Override
+ public long statsHistoryBufferSize() {
+ assert(isOwningHandle());
+ return statsHistoryBufferSize(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setAdviseRandomOnOpen(
+ final boolean adviseRandomOnOpen) {
+ assert(isOwningHandle());
+ setAdviseRandomOnOpen(nativeHandle_, adviseRandomOnOpen);
+ return this;
+ }
+
+ @Override
+ public boolean adviseRandomOnOpen() {
+ return adviseRandomOnOpen(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setDbWriteBufferSize(final long dbWriteBufferSize) {
+ assert(isOwningHandle());
+ setDbWriteBufferSize(nativeHandle_, dbWriteBufferSize);
+ return this;
+ }
+
+ @Override
+ public DBOptions setWriteBufferManager(final WriteBufferManager writeBufferManager) {
+ assert(isOwningHandle());
+ setWriteBufferManager(nativeHandle_, writeBufferManager.nativeHandle_);
+ this.writeBufferManager_ = writeBufferManager;
+ return this;
+ }
+
+ @Override
+ public WriteBufferManager writeBufferManager() {
+ assert(isOwningHandle());
+ return this.writeBufferManager_;
+ }
+
+ @Override
+ public long dbWriteBufferSize() {
+ assert(isOwningHandle());
+ return dbWriteBufferSize(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setAccessHintOnCompactionStart(final AccessHint accessHint) {
+ assert(isOwningHandle());
+ setAccessHintOnCompactionStart(nativeHandle_, accessHint.getValue());
+ return this;
+ }
+
+ @Override
+ public AccessHint accessHintOnCompactionStart() {
+ assert(isOwningHandle());
+ return AccessHint.getAccessHint(accessHintOnCompactionStart(nativeHandle_));
+ }
+
+ @Override
+ public DBOptions setNewTableReaderForCompactionInputs(
+ final boolean newTableReaderForCompactionInputs) {
+ assert(isOwningHandle());
+ setNewTableReaderForCompactionInputs(nativeHandle_,
+ newTableReaderForCompactionInputs);
+ return this;
+ }
+
+ @Override
+ public boolean newTableReaderForCompactionInputs() {
+ assert(isOwningHandle());
+ return newTableReaderForCompactionInputs(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setCompactionReadaheadSize(final long compactionReadaheadSize) {
+ assert(isOwningHandle());
+ setCompactionReadaheadSize(nativeHandle_, compactionReadaheadSize);
+ return this;
+ }
+
+ @Override
+ public long compactionReadaheadSize() {
+ assert(isOwningHandle());
+ return compactionReadaheadSize(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setRandomAccessMaxBufferSize(final long randomAccessMaxBufferSize) {
+ assert(isOwningHandle());
+ setRandomAccessMaxBufferSize(nativeHandle_, randomAccessMaxBufferSize);
+ return this;
+ }
+
+ @Override
+ public long randomAccessMaxBufferSize() {
+ assert(isOwningHandle());
+ return randomAccessMaxBufferSize(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setWritableFileMaxBufferSize(final long writableFileMaxBufferSize) {
+ assert(isOwningHandle());
+ setWritableFileMaxBufferSize(nativeHandle_, writableFileMaxBufferSize);
+ return this;
+ }
+
+ @Override
+ public long writableFileMaxBufferSize() {
+ assert(isOwningHandle());
+ return writableFileMaxBufferSize(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setUseAdaptiveMutex(
+ final boolean useAdaptiveMutex) {
+ assert(isOwningHandle());
+ setUseAdaptiveMutex(nativeHandle_, useAdaptiveMutex);
+ return this;
+ }
+
+ @Override
+ public boolean useAdaptiveMutex() {
+ assert(isOwningHandle());
+ return useAdaptiveMutex(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setBytesPerSync(
+ final long bytesPerSync) {
+ assert(isOwningHandle());
+ setBytesPerSync(nativeHandle_, bytesPerSync);
+ return this;
+ }
+
+ @Override
+ public long bytesPerSync() {
+ return bytesPerSync(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setWalBytesPerSync(final long walBytesPerSync) {
+ assert(isOwningHandle());
+ setWalBytesPerSync(nativeHandle_, walBytesPerSync);
+ return this;
+ }
+
+ @Override
+ public long walBytesPerSync() {
+ assert(isOwningHandle());
+ return walBytesPerSync(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setStrictBytesPerSync(final boolean strictBytesPerSync) {
+ assert(isOwningHandle());
+ setStrictBytesPerSync(nativeHandle_, strictBytesPerSync);
+ return this;
+ }
+
+ @Override
+ public boolean strictBytesPerSync() {
+ assert(isOwningHandle());
+ return strictBytesPerSync(nativeHandle_);
+ }
+
+ //TODO(AR) NOW
+// @Override
+// public DBOptions setListeners(final List<EventListener> listeners) {
+// assert(isOwningHandle());
+// final long[] eventListenerHandlers = new long[listeners.size()];
+// for (int i = 0; i < eventListenerHandlers.length; i++) {
+// eventListenerHandlers[i] = listeners.get(i).nativeHandle_;
+// }
+// setEventListeners(nativeHandle_, eventListenerHandlers);
+// return this;
+// }
+//
+// @Override
+// public Collection<EventListener> listeners() {
+// assert(isOwningHandle());
+// final long[] eventListenerHandlers = listeners(nativeHandle_);
+// if (eventListenerHandlers == null || eventListenerHandlers.length == 0) {
+// return Collections.emptyList();
+// }
+//
+// final List<EventListener> eventListeners = new ArrayList<>();
+// for (final long eventListenerHandle : eventListenerHandlers) {
+// eventListeners.add(new EventListener(eventListenerHandle)); //TODO(AR) check ownership is set to false!
+// }
+// return eventListeners;
+// }
+
+ @Override
+ public DBOptions setEnableThreadTracking(final boolean enableThreadTracking) {
+ assert(isOwningHandle());
+ setEnableThreadTracking(nativeHandle_, enableThreadTracking);
+ return this;
+ }
+
+ @Override
+ public boolean enableThreadTracking() {
+ assert(isOwningHandle());
+ return enableThreadTracking(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setDelayedWriteRate(final long delayedWriteRate) {
+ assert(isOwningHandle());
+ setDelayedWriteRate(nativeHandle_, delayedWriteRate);
+ return this;
+ }
+
+ @Override
+ public long delayedWriteRate(){
+ return delayedWriteRate(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setEnablePipelinedWrite(final boolean enablePipelinedWrite) {
+ assert(isOwningHandle());
+ setEnablePipelinedWrite(nativeHandle_, enablePipelinedWrite);
+ return this;
+ }
+
+ @Override
+ public boolean enablePipelinedWrite() {
+ assert(isOwningHandle());
+ return enablePipelinedWrite(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setUnorderedWrite(final boolean unorderedWrite) {
+ setUnorderedWrite(nativeHandle_, unorderedWrite);
+ return this;
+ }
+
+ @Override
+ public boolean unorderedWrite() {
+ return unorderedWrite(nativeHandle_);
+ }
+
+
+ @Override
+ public DBOptions setAllowConcurrentMemtableWrite(
+ final boolean allowConcurrentMemtableWrite) {
+ setAllowConcurrentMemtableWrite(nativeHandle_,
+ allowConcurrentMemtableWrite);
+ return this;
+ }
+
+ @Override
+ public boolean allowConcurrentMemtableWrite() {
+ return allowConcurrentMemtableWrite(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setEnableWriteThreadAdaptiveYield(
+ final boolean enableWriteThreadAdaptiveYield) {
+ setEnableWriteThreadAdaptiveYield(nativeHandle_,
+ enableWriteThreadAdaptiveYield);
+ return this;
+ }
+
+ @Override
+ public boolean enableWriteThreadAdaptiveYield() {
+ return enableWriteThreadAdaptiveYield(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setWriteThreadMaxYieldUsec(final long writeThreadMaxYieldUsec) {
+ setWriteThreadMaxYieldUsec(nativeHandle_, writeThreadMaxYieldUsec);
+ return this;
+ }
+
+ @Override
+ public long writeThreadMaxYieldUsec() {
+ return writeThreadMaxYieldUsec(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setWriteThreadSlowYieldUsec(final long writeThreadSlowYieldUsec) {
+ setWriteThreadSlowYieldUsec(nativeHandle_, writeThreadSlowYieldUsec);
+ return this;
+ }
+
+ @Override
+ public long writeThreadSlowYieldUsec() {
+ return writeThreadSlowYieldUsec(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setSkipStatsUpdateOnDbOpen(final boolean skipStatsUpdateOnDbOpen) {
+ assert(isOwningHandle());
+ setSkipStatsUpdateOnDbOpen(nativeHandle_, skipStatsUpdateOnDbOpen);
+ return this;
+ }
+
+ @Override
+ public boolean skipStatsUpdateOnDbOpen() {
+ assert(isOwningHandle());
+ return skipStatsUpdateOnDbOpen(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setWalRecoveryMode(final WALRecoveryMode walRecoveryMode) {
+ assert(isOwningHandle());
+ setWalRecoveryMode(nativeHandle_, walRecoveryMode.getValue());
+ return this;
+ }
+
+ @Override
+ public WALRecoveryMode walRecoveryMode() {
+ assert(isOwningHandle());
+ return WALRecoveryMode.getWALRecoveryMode(walRecoveryMode(nativeHandle_));
+ }
+
+ @Override
+ public DBOptions setAllow2pc(final boolean allow2pc) {
+ assert(isOwningHandle());
+ setAllow2pc(nativeHandle_, allow2pc);
+ return this;
+ }
+
+ @Override
+ public boolean allow2pc() {
+ assert(isOwningHandle());
+ return allow2pc(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setRowCache(final Cache rowCache) {
+ assert(isOwningHandle());
+ setRowCache(nativeHandle_, rowCache.nativeHandle_);
+ this.rowCache_ = rowCache;
+ return this;
+ }
+
+ @Override
+ public Cache rowCache() {
+ assert(isOwningHandle());
+ return this.rowCache_;
+ }
+
+ @Override
+ public DBOptions setWalFilter(final AbstractWalFilter walFilter) {
+ assert(isOwningHandle());
+ setWalFilter(nativeHandle_, walFilter.nativeHandle_);
+ this.walFilter_ = walFilter;
+ return this;
+ }
+
+ @Override
+ public WalFilter walFilter() {
+ assert(isOwningHandle());
+ return this.walFilter_;
+ }
+
+ @Override
+ public DBOptions setFailIfOptionsFileError(final boolean failIfOptionsFileError) {
+ assert(isOwningHandle());
+ setFailIfOptionsFileError(nativeHandle_, failIfOptionsFileError);
+ return this;
+ }
+
+ @Override
+ public boolean failIfOptionsFileError() {
+ assert(isOwningHandle());
+ return failIfOptionsFileError(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setDumpMallocStats(final boolean dumpMallocStats) {
+ assert(isOwningHandle());
+ setDumpMallocStats(nativeHandle_, dumpMallocStats);
+ return this;
+ }
+
+ @Override
+ public boolean dumpMallocStats() {
+ assert(isOwningHandle());
+ return dumpMallocStats(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setAvoidFlushDuringRecovery(final boolean avoidFlushDuringRecovery) {
+ assert(isOwningHandle());
+ setAvoidFlushDuringRecovery(nativeHandle_, avoidFlushDuringRecovery);
+ return this;
+ }
+
+ @Override
+ public boolean avoidFlushDuringRecovery() {
+ assert(isOwningHandle());
+ return avoidFlushDuringRecovery(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setAvoidFlushDuringShutdown(final boolean avoidFlushDuringShutdown) {
+ assert(isOwningHandle());
+ setAvoidFlushDuringShutdown(nativeHandle_, avoidFlushDuringShutdown);
+ return this;
+ }
+
+ @Override
+ public boolean avoidFlushDuringShutdown() {
+ assert(isOwningHandle());
+ return avoidFlushDuringShutdown(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setAllowIngestBehind(final boolean allowIngestBehind) {
+ assert(isOwningHandle());
+ setAllowIngestBehind(nativeHandle_, allowIngestBehind);
+ return this;
+ }
+
+ @Override
+ public boolean allowIngestBehind() {
+ assert(isOwningHandle());
+ return allowIngestBehind(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setPreserveDeletes(final boolean preserveDeletes) {
+ assert(isOwningHandle());
+ setPreserveDeletes(nativeHandle_, preserveDeletes);
+ return this;
+ }
+
+ @Override
+ public boolean preserveDeletes() {
+ assert(isOwningHandle());
+ return preserveDeletes(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setTwoWriteQueues(final boolean twoWriteQueues) {
+ assert(isOwningHandle());
+ setTwoWriteQueues(nativeHandle_, twoWriteQueues);
+ return this;
+ }
+
+ @Override
+ public boolean twoWriteQueues() {
+ assert(isOwningHandle());
+ return twoWriteQueues(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setManualWalFlush(final boolean manualWalFlush) {
+ assert(isOwningHandle());
+ setManualWalFlush(nativeHandle_, manualWalFlush);
+ return this;
+ }
+
+ @Override
+ public boolean manualWalFlush() {
+ assert(isOwningHandle());
+ return manualWalFlush(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setAtomicFlush(final boolean atomicFlush) {
+ setAtomicFlush(nativeHandle_, atomicFlush);
+ return this;
+ }
+
+ @Override
+ public boolean atomicFlush() {
+ return atomicFlush(nativeHandle_);
+ }
+
+ static final int DEFAULT_NUM_SHARD_BITS = -1;
+
+
+
+
+ /**
+ * <p>Private constructor to be used by
+ * {@link #getDBOptionsFromProps(java.util.Properties)}</p>
+ *
+ * @param nativeHandle native handle to DBOptions instance.
+ */
+ private DBOptions(final long nativeHandle) {
+ super(nativeHandle);
+ }
+
+ private static native long getDBOptionsFromProps(
+ String optString);
+
+ private static native long newDBOptions();
+ private static native long copyDBOptions(final long handle);
+ private static native long newDBOptionsFromOptions(final long optionsHandle);
+ @Override protected final native void disposeInternal(final long handle);
+
+ private native void optimizeForSmallDb(final long handle);
+ private native void setIncreaseParallelism(long handle, int totalThreads);
+ private native void setCreateIfMissing(long handle, boolean flag);
+ private native boolean createIfMissing(long handle);
+ private native void setCreateMissingColumnFamilies(
+ long handle, boolean flag);
+ private native boolean createMissingColumnFamilies(long handle);
+ private native void setEnv(long handle, long envHandle);
+ private native void setErrorIfExists(long handle, boolean errorIfExists);
+ private native boolean errorIfExists(long handle);
+ private native void setParanoidChecks(
+ long handle, boolean paranoidChecks);
+ private native boolean paranoidChecks(long handle);
+ private native void setRateLimiter(long handle,
+ long rateLimiterHandle);
+ private native void setSstFileManager(final long handle,
+ final long sstFileManagerHandle);
+ private native void setLogger(long handle,
+ long loggerHandle);
+ private native void setInfoLogLevel(long handle, byte logLevel);
+ private native byte infoLogLevel(long handle);
+ private native void setMaxOpenFiles(long handle, int maxOpenFiles);
+ private native int maxOpenFiles(long handle);
+ private native void setMaxFileOpeningThreads(final long handle,
+ final int maxFileOpeningThreads);
+ private native int maxFileOpeningThreads(final long handle);
+ private native void setMaxTotalWalSize(long handle,
+ long maxTotalWalSize);
+ private native long maxTotalWalSize(long handle);
+ private native void setStatistics(final long handle, final long statisticsHandle);
+ private native long statistics(final long handle);
+ private native boolean useFsync(long handle);
+ private native void setUseFsync(long handle, boolean useFsync);
+ private native void setDbPaths(final long handle, final String[] paths,
+ final long[] targetSizes);
+ private native long dbPathsLen(final long handle);
+ private native void dbPaths(final long handle, final String[] paths,
+ final long[] targetSizes);
+ private native void setDbLogDir(long handle, String dbLogDir);
+ private native String dbLogDir(long handle);
+ private native void setWalDir(long handle, String walDir);
+ private native String walDir(long handle);
+ private native void setDeleteObsoleteFilesPeriodMicros(
+ long handle, long micros);
+ private native long deleteObsoleteFilesPeriodMicros(long handle);
+ private native void setBaseBackgroundCompactions(long handle,
+ int baseBackgroundCompactions);
+ private native int baseBackgroundCompactions(long handle);
+ private native void setMaxBackgroundCompactions(
+ long handle, int maxBackgroundCompactions);
+ private native int maxBackgroundCompactions(long handle);
+ private native void setMaxSubcompactions(long handle, int maxSubcompactions);
+ private native int maxSubcompactions(long handle);
+ private native void setMaxBackgroundFlushes(
+ long handle, int maxBackgroundFlushes);
+ private native int maxBackgroundFlushes(long handle);
+ private native void setMaxBackgroundJobs(long handle, int maxBackgroundJobs);
+ private native int maxBackgroundJobs(long handle);
+ private native void setMaxLogFileSize(long handle, long maxLogFileSize)
+ throws IllegalArgumentException;
+ private native long maxLogFileSize(long handle);
+ private native void setLogFileTimeToRoll(
+ long handle, long logFileTimeToRoll) throws IllegalArgumentException;
+ private native long logFileTimeToRoll(long handle);
+ private native void setKeepLogFileNum(long handle, long keepLogFileNum)
+ throws IllegalArgumentException;
+ private native long keepLogFileNum(long handle);
+ private native void setRecycleLogFileNum(long handle, long recycleLogFileNum);
+ private native long recycleLogFileNum(long handle);
+ private native void setMaxManifestFileSize(
+ long handle, long maxManifestFileSize);
+ private native long maxManifestFileSize(long handle);
+ private native void setTableCacheNumshardbits(
+ long handle, int tableCacheNumshardbits);
+ private native int tableCacheNumshardbits(long handle);
+ private native void setWalTtlSeconds(long handle, long walTtlSeconds);
+ private native long walTtlSeconds(long handle);
+ private native void setWalSizeLimitMB(long handle, long sizeLimitMB);
+ private native long walSizeLimitMB(long handle);
+ private native void setManifestPreallocationSize(
+ long handle, long size) throws IllegalArgumentException;
+ private native long manifestPreallocationSize(long handle);
+ private native void setUseDirectReads(long handle, boolean useDirectReads);
+ private native boolean useDirectReads(long handle);
+ private native void setUseDirectIoForFlushAndCompaction(
+ long handle, boolean useDirectIoForFlushAndCompaction);
+ private native boolean useDirectIoForFlushAndCompaction(long handle);
+ private native void setAllowFAllocate(final long handle,
+ final boolean allowFAllocate);
+ private native boolean allowFAllocate(final long handle);
+ private native void setAllowMmapReads(
+ long handle, boolean allowMmapReads);
+ private native boolean allowMmapReads(long handle);
+ private native void setAllowMmapWrites(
+ long handle, boolean allowMmapWrites);
+ private native boolean allowMmapWrites(long handle);
+ private native void setIsFdCloseOnExec(
+ long handle, boolean isFdCloseOnExec);
+ private native boolean isFdCloseOnExec(long handle);
+ private native void setStatsDumpPeriodSec(
+ long handle, int statsDumpPeriodSec);
+ private native int statsDumpPeriodSec(long handle);
+ private native void setStatsPersistPeriodSec(
+ final long handle, final int statsPersistPeriodSec);
+ private native int statsPersistPeriodSec(
+ final long handle);
+ private native void setStatsHistoryBufferSize(
+ final long handle, final long statsHistoryBufferSize);
+ private native long statsHistoryBufferSize(
+ final long handle);
+ private native void setAdviseRandomOnOpen(
+ long handle, boolean adviseRandomOnOpen);
+ private native boolean adviseRandomOnOpen(long handle);
+ private native void setDbWriteBufferSize(final long handle,
+ final long dbWriteBufferSize);
+ private native void setWriteBufferManager(final long dbOptionsHandle,
+ final long writeBufferManagerHandle);
+ private native long dbWriteBufferSize(final long handle);
+ private native void setAccessHintOnCompactionStart(final long handle,
+ final byte accessHintOnCompactionStart);
+ private native byte accessHintOnCompactionStart(final long handle);
+ private native void setNewTableReaderForCompactionInputs(final long handle,
+ final boolean newTableReaderForCompactionInputs);
+ private native boolean newTableReaderForCompactionInputs(final long handle);
+ private native void setCompactionReadaheadSize(final long handle,
+ final long compactionReadaheadSize);
+ private native long compactionReadaheadSize(final long handle);
+ private native void setRandomAccessMaxBufferSize(final long handle,
+ final long randomAccessMaxBufferSize);
+ private native long randomAccessMaxBufferSize(final long handle);
+ private native void setWritableFileMaxBufferSize(final long handle,
+ final long writableFileMaxBufferSize);
+ private native long writableFileMaxBufferSize(final long handle);
+ private native void setUseAdaptiveMutex(
+ long handle, boolean useAdaptiveMutex);
+ private native boolean useAdaptiveMutex(long handle);
+ private native void setBytesPerSync(
+ long handle, long bytesPerSync);
+ private native long bytesPerSync(long handle);
+ private native void setWalBytesPerSync(long handle, long walBytesPerSync);
+ private native long walBytesPerSync(long handle);
+ private native void setStrictBytesPerSync(
+ final long handle, final boolean strictBytesPerSync);
+ private native boolean strictBytesPerSync(
+ final long handle);
+ private native void setEnableThreadTracking(long handle,
+ boolean enableThreadTracking);
+ private native boolean enableThreadTracking(long handle);
+ private native void setDelayedWriteRate(long handle, long delayedWriteRate);
+ private native long delayedWriteRate(long handle);
+ private native void setEnablePipelinedWrite(final long handle,
+ final boolean enablePipelinedWrite);
+ private native boolean enablePipelinedWrite(final long handle);
+ private native void setUnorderedWrite(final long handle,
+ final boolean unorderedWrite);
+ private native boolean unorderedWrite(final long handle);
+ private native void setAllowConcurrentMemtableWrite(long handle,
+ boolean allowConcurrentMemtableWrite);
+ private native boolean allowConcurrentMemtableWrite(long handle);
+ private native void setEnableWriteThreadAdaptiveYield(long handle,
+ boolean enableWriteThreadAdaptiveYield);
+ private native boolean enableWriteThreadAdaptiveYield(long handle);
+ private native void setWriteThreadMaxYieldUsec(long handle,
+ long writeThreadMaxYieldUsec);
+ private native long writeThreadMaxYieldUsec(long handle);
+ private native void setWriteThreadSlowYieldUsec(long handle,
+ long writeThreadSlowYieldUsec);
+ private native long writeThreadSlowYieldUsec(long handle);
+ private native void setSkipStatsUpdateOnDbOpen(final long handle,
+ final boolean skipStatsUpdateOnDbOpen);
+ private native boolean skipStatsUpdateOnDbOpen(final long handle);
+ private native void setWalRecoveryMode(final long handle,
+ final byte walRecoveryMode);
+ private native byte walRecoveryMode(final long handle);
+ private native void setAllow2pc(final long handle,
+ final boolean allow2pc);
+ private native boolean allow2pc(final long handle);
+ private native void setRowCache(final long handle,
+ final long rowCacheHandle);
+ private native void setWalFilter(final long handle,
+ final long walFilterHandle);
+ private native void setFailIfOptionsFileError(final long handle,
+ final boolean failIfOptionsFileError);
+ private native boolean failIfOptionsFileError(final long handle);
+ private native void setDumpMallocStats(final long handle,
+ final boolean dumpMallocStats);
+ private native boolean dumpMallocStats(final long handle);
+ private native void setAvoidFlushDuringRecovery(final long handle,
+ final boolean avoidFlushDuringRecovery);
+ private native boolean avoidFlushDuringRecovery(final long handle);
+ private native void setAvoidFlushDuringShutdown(final long handle,
+ final boolean avoidFlushDuringShutdown);
+ private native boolean avoidFlushDuringShutdown(final long handle);
+ private native void setAllowIngestBehind(final long handle,
+ final boolean allowIngestBehind);
+ private native boolean allowIngestBehind(final long handle);
+ private native void setPreserveDeletes(final long handle,
+ final boolean preserveDeletes);
+ private native boolean preserveDeletes(final long handle);
+ private native void setTwoWriteQueues(final long handle,
+ final boolean twoWriteQueues);
+ private native boolean twoWriteQueues(final long handle);
+ private native void setManualWalFlush(final long handle,
+ final boolean manualWalFlush);
+ private native boolean manualWalFlush(final long handle);
+ private native void setAtomicFlush(final long handle,
+ final boolean atomicFlush);
+ private native boolean atomicFlush(final long handle);
+
+ // instance variables
+ // NOTE: If you add new member variables, please update the copy constructor above!
+ private Env env_;
+ private int numShardBits_;
+ private RateLimiter rateLimiter_;
+ private Cache rowCache_;
+ private WalFilter walFilter_;
+ private WriteBufferManager writeBufferManager_;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/DBOptionsInterface.java b/src/rocksdb/java/src/main/java/org/rocksdb/DBOptionsInterface.java
new file mode 100644
index 000000000..99e24f3d4
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/DBOptionsInterface.java
@@ -0,0 +1,1564 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.Collection;
+import java.util.List;
+
+public interface DBOptionsInterface<T extends DBOptionsInterface<T>> {
+ /**
+ * Use this if your DB is very small (like under 1GB) and you don't want to
+ * spend lots of memory for memtables.
+ *
+ * @return the instance of the current object.
+ */
+ T optimizeForSmallDb();
+
+ /**
+ * Use the specified object to interact with the environment,
+ * e.g. to read/write files, schedule background work, etc.
+ * Default: {@link Env#getDefault()}
+ *
+ * @param env {@link Env} instance.
+ * @return the instance of the current Options.
+ */
+ T setEnv(final Env env);
+
+ /**
+ * Returns the set RocksEnv instance.
+ *
+ * @return {@link RocksEnv} instance set in the options.
+ */
+ Env getEnv();
+
+ /**
+ * <p>By default, RocksDB uses only one background thread for flush and
+ * compaction. Calling this function will set it up such that total of
+ * `total_threads` is used.</p>
+ *
+ * <p>You almost definitely want to call this function if your system is
+ * bottlenecked by RocksDB.</p>
+ *
+ * @param totalThreads The total number of threads to be used by RocksDB.
+ * A good value is the number of cores.
+ *
+ * @return the instance of the current Options
+ */
+ T setIncreaseParallelism(int totalThreads);
+
+ /**
+ * If this value is set to true, then the database will be created
+ * if it is missing during {@code RocksDB.open()}.
+ * Default: false
+ *
+ * @param flag a flag indicating whether to create a database the
+ * specified database in {@link RocksDB#open(org.rocksdb.Options, String)} operation
+ * is missing.
+ * @return the instance of the current Options
+ * @see RocksDB#open(org.rocksdb.Options, String)
+ */
+ T setCreateIfMissing(boolean flag);
+
+ /**
+ * Return true if the create_if_missing flag is set to true.
+ * If true, the database will be created if it is missing.
+ *
+ * @return true if the createIfMissing option is set to true.
+ * @see #setCreateIfMissing(boolean)
+ */
+ boolean createIfMissing();
+
+ /**
+ * <p>If true, missing column families will be automatically created</p>
+ *
+ * <p>Default: false</p>
+ *
+ * @param flag a flag indicating if missing column families shall be
+ * created automatically.
+ * @return true if missing column families shall be created automatically
+ * on open.
+ */
+ T setCreateMissingColumnFamilies(boolean flag);
+
+ /**
+ * Return true if the create_missing_column_families flag is set
+ * to true. If true column families be created if missing.
+ *
+ * @return true if the createMissingColumnFamilies is set to
+ * true.
+ * @see #setCreateMissingColumnFamilies(boolean)
+ */
+ boolean createMissingColumnFamilies();
+
+ /**
+ * If true, an error will be thrown during RocksDB.open() if the
+ * database already exists.
+ * Default: false
+ *
+ * @param errorIfExists if true, an exception will be thrown
+ * during {@code RocksDB.open()} if the database already exists.
+ * @return the reference to the current option.
+ * @see RocksDB#open(org.rocksdb.Options, String)
+ */
+ T setErrorIfExists(boolean errorIfExists);
+
+ /**
+ * If true, an error will be thrown during RocksDB.open() if the
+ * database already exists.
+ *
+ * @return if true, an error is raised when the specified database
+ * already exists before open.
+ */
+ boolean errorIfExists();
+
+ /**
+ * If true, the implementation will do aggressive checking of the
+ * data it is processing and will stop early if it detects any
+ * errors. This may have unforeseen ramifications: for example, a
+ * corruption of one DB entry may cause a large number of entries to
+ * become unreadable or for the entire DB to become unopenable.
+ * If any of the writes to the database fails (Put, Delete, Merge, Write),
+ * the database will switch to read-only mode and fail all other
+ * Write operations.
+ * Default: true
+ *
+ * @param paranoidChecks a flag to indicate whether paranoid-check
+ * is on.
+ * @return the reference to the current option.
+ */
+ T setParanoidChecks(boolean paranoidChecks);
+
+ /**
+ * If true, the implementation will do aggressive checking of the
+ * data it is processing and will stop early if it detects any
+ * errors. This may have unforeseen ramifications: for example, a
+ * corruption of one DB entry may cause a large number of entries to
+ * become unreadable or for the entire DB to become unopenable.
+ * If any of the writes to the database fails (Put, Delete, Merge, Write),
+ * the database will switch to read-only mode and fail all other
+ * Write operations.
+ *
+ * @return a boolean indicating whether paranoid-check is on.
+ */
+ boolean paranoidChecks();
+
+ /**
+ * Use to control write rate of flush and compaction. Flush has higher
+ * priority than compaction. Rate limiting is disabled if nullptr.
+ * Default: nullptr
+ *
+ * @param rateLimiter {@link org.rocksdb.RateLimiter} instance.
+ * @return the instance of the current object.
+ *
+ * @since 3.10.0
+ */
+ T setRateLimiter(RateLimiter rateLimiter);
+
+ /**
+ * Use to track SST files and control their file deletion rate.
+ *
+ * Features:
+ * - Throttle the deletion rate of the SST files.
+ * - Keep track the total size of all SST files.
+ * - Set a maximum allowed space limit for SST files that when reached
+ * the DB wont do any further flushes or compactions and will set the
+ * background error.
+ * - Can be shared between multiple dbs.
+ *
+ * Limitations:
+ * - Only track and throttle deletes of SST files in
+ * first db_path (db_name if db_paths is empty).
+ *
+ * @param sstFileManager The SST File Manager for the db.
+ * @return the instance of the current object.
+ */
+ T setSstFileManager(SstFileManager sstFileManager);
+
+ /**
+ * <p>Any internal progress/error information generated by
+ * the db will be written to the Logger if it is non-nullptr,
+ * or to a file stored in the same directory as the DB
+ * contents if info_log is nullptr.</p>
+ *
+ * <p>Default: nullptr</p>
+ *
+ * @param logger {@link Logger} instance.
+ * @return the instance of the current object.
+ */
+ T setLogger(Logger logger);
+
+ /**
+ * <p>Sets the RocksDB log level. Default level is INFO</p>
+ *
+ * @param infoLogLevel log level to set.
+ * @return the instance of the current object.
+ */
+ T setInfoLogLevel(InfoLogLevel infoLogLevel);
+
+ /**
+ * <p>Returns currently set log level.</p>
+ * @return {@link org.rocksdb.InfoLogLevel} instance.
+ */
+ InfoLogLevel infoLogLevel();
+
+ /**
+ * If {@link MutableDBOptionsInterface#maxOpenFiles()} is -1, DB will open
+ * all files on DB::Open(). You can use this option to increase the number
+ * of threads used to open the files.
+ *
+ * Default: 16
+ *
+ * @param maxFileOpeningThreads the maximum number of threads to use to
+ * open files
+ *
+ * @return the reference to the current options.
+ */
+ T setMaxFileOpeningThreads(int maxFileOpeningThreads);
+
+ /**
+ * If {@link MutableDBOptionsInterface#maxOpenFiles()} is -1, DB will open all
+ * files on DB::Open(). You can use this option to increase the number of
+ * threads used to open the files.
+ *
+ * Default: 16
+ *
+ * @return the maximum number of threads to use to open files
+ */
+ int maxFileOpeningThreads();
+
+ /**
+ * <p>Sets the statistics object which collects metrics about database operations.
+ * Statistics objects should not be shared between DB instances as
+ * it does not use any locks to prevent concurrent updates.</p>
+ *
+ * @param statistics The statistics to set
+ *
+ * @return the instance of the current object.
+ *
+ * @see RocksDB#open(org.rocksdb.Options, String)
+ */
+ T setStatistics(final Statistics statistics);
+
+ /**
+ * <p>Returns statistics object.</p>
+ *
+ * @return the instance of the statistics object or null if there is no
+ * statistics object.
+ *
+ * @see #setStatistics(Statistics)
+ */
+ Statistics statistics();
+
+ /**
+ * <p>If true, then every store to stable storage will issue a fsync.</p>
+ * <p>If false, then every store to stable storage will issue a fdatasync.
+ * This parameter should be set to true while storing data to
+ * filesystem like ext3 that can lose files after a reboot.</p>
+ * <p>Default: false</p>
+ *
+ * @param useFsync a boolean flag to specify whether to use fsync
+ * @return the instance of the current object.
+ */
+ T setUseFsync(boolean useFsync);
+
+ /**
+ * <p>If true, then every store to stable storage will issue a fsync.</p>
+ * <p>If false, then every store to stable storage will issue a fdatasync.
+ * This parameter should be set to true while storing data to
+ * filesystem like ext3 that can lose files after a reboot.</p>
+ *
+ * @return boolean value indicating if fsync is used.
+ */
+ boolean useFsync();
+
+ /**
+ * A list of paths where SST files can be put into, with its target size.
+ * Newer data is placed into paths specified earlier in the vector while
+ * older data gradually moves to paths specified later in the vector.
+ *
+ * For example, you have a flash device with 10GB allocated for the DB,
+ * as well as a hard drive of 2TB, you should config it to be:
+ * [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
+ *
+ * The system will try to guarantee data under each path is close to but
+ * not larger than the target size. But current and future file sizes used
+ * by determining where to place a file are based on best-effort estimation,
+ * which means there is a chance that the actual size under the directory
+ * is slightly more than target size under some workloads. User should give
+ * some buffer room for those cases.
+ *
+ * If none of the paths has sufficient room to place a file, the file will
+ * be placed to the last path anyway, despite to the target size.
+ *
+ * Placing newer data to earlier paths is also best-efforts. User should
+ * expect user files to be placed in higher levels in some extreme cases.
+ *
+ * If left empty, only one path will be used, which is db_name passed when
+ * opening the DB.
+ *
+ * Default: empty
+ *
+ * @param dbPaths the paths and target sizes
+ *
+ * @return the reference to the current options
+ */
+ T setDbPaths(final Collection<DbPath> dbPaths);
+
+ /**
+ * A list of paths where SST files can be put into, with its target size.
+ * Newer data is placed into paths specified earlier in the vector while
+ * older data gradually moves to paths specified later in the vector.
+ *
+ * For example, you have a flash device with 10GB allocated for the DB,
+ * as well as a hard drive of 2TB, you should config it to be:
+ * [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
+ *
+ * The system will try to guarantee data under each path is close to but
+ * not larger than the target size. But current and future file sizes used
+ * by determining where to place a file are based on best-effort estimation,
+ * which means there is a chance that the actual size under the directory
+ * is slightly more than target size under some workloads. User should give
+ * some buffer room for those cases.
+ *
+ * If none of the paths has sufficient room to place a file, the file will
+ * be placed to the last path anyway, despite to the target size.
+ *
+ * Placing newer data to earlier paths is also best-efforts. User should
+ * expect user files to be placed in higher levels in some extreme cases.
+ *
+ * If left empty, only one path will be used, which is db_name passed when
+ * opening the DB.
+ *
+ * Default: {@link java.util.Collections#emptyList()}
+ *
+ * @return dbPaths the paths and target sizes
+ */
+ List<DbPath> dbPaths();
+
+ /**
+ * This specifies the info LOG dir.
+ * If it is empty, the log files will be in the same dir as data.
+ * If it is non empty, the log files will be in the specified dir,
+ * and the db data dir's absolute path will be used as the log file
+ * name's prefix.
+ *
+ * @param dbLogDir the path to the info log directory
+ * @return the instance of the current object.
+ */
+ T setDbLogDir(String dbLogDir);
+
+ /**
+ * Returns the directory of info log.
+ *
+ * If it is empty, the log files will be in the same dir as data.
+ * If it is non empty, the log files will be in the specified dir,
+ * and the db data dir's absolute path will be used as the log file
+ * name's prefix.
+ *
+ * @return the path to the info log directory
+ */
+ String dbLogDir();
+
+ /**
+ * This specifies the absolute dir path for write-ahead logs (WAL).
+ * If it is empty, the log files will be in the same dir as data,
+ * dbname is used as the data dir by default
+ * If it is non empty, the log files will be in kept the specified dir.
+ * When destroying the db,
+ * all log files in wal_dir and the dir itself is deleted
+ *
+ * @param walDir the path to the write-ahead-log directory.
+ * @return the instance of the current object.
+ */
+ T setWalDir(String walDir);
+
+ /**
+ * Returns the path to the write-ahead-logs (WAL) directory.
+ *
+ * If it is empty, the log files will be in the same dir as data,
+ * dbname is used as the data dir by default
+ * If it is non empty, the log files will be in kept the specified dir.
+ * When destroying the db,
+ * all log files in wal_dir and the dir itself is deleted
+ *
+ * @return the path to the write-ahead-logs (WAL) directory.
+ */
+ String walDir();
+
+ /**
+ * The periodicity when obsolete files get deleted. The default
+ * value is 6 hours. The files that get out of scope by compaction
+ * process will still get automatically delete on every compaction,
+ * regardless of this setting
+ *
+ * @param micros the time interval in micros
+ * @return the instance of the current object.
+ */
+ T setDeleteObsoleteFilesPeriodMicros(long micros);
+
+ /**
+ * The periodicity when obsolete files get deleted. The default
+ * value is 6 hours. The files that get out of scope by compaction
+ * process will still get automatically delete on every compaction,
+ * regardless of this setting
+ *
+ * @return the time interval in micros when obsolete files will be deleted.
+ */
+ long deleteObsoleteFilesPeriodMicros();
+
+ /**
+ * This value represents the maximum number of threads that will
+ * concurrently perform a compaction job by breaking it into multiple,
+ * smaller ones that are run simultaneously.
+ * Default: 1 (i.e. no subcompactions)
+ *
+ * @param maxSubcompactions The maximum number of threads that will
+ * concurrently perform a compaction job
+ *
+ * @return the instance of the current object.
+ */
+ T setMaxSubcompactions(int maxSubcompactions);
+
+ /**
+ * This value represents the maximum number of threads that will
+ * concurrently perform a compaction job by breaking it into multiple,
+ * smaller ones that are run simultaneously.
+ * Default: 1 (i.e. no subcompactions)
+ *
+ * @return The maximum number of threads that will concurrently perform a
+ * compaction job
+ */
+ int maxSubcompactions();
+
+ /**
+ * NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the
+ * value of max_background_jobs. For backwards compatibility we will set
+ * `max_background_jobs = max_background_compactions + max_background_flushes`
+ * in the case where user sets at least one of `max_background_compactions` or
+ * `max_background_flushes`.
+ *
+ * Specifies the maximum number of concurrent background flush jobs.
+ * If you're increasing this, also consider increasing number of threads in
+ * HIGH priority thread pool. For more information, see
+ * Default: -1
+ *
+ * @param maxBackgroundFlushes number of max concurrent flush jobs
+ * @return the instance of the current object.
+ *
+ * @see RocksEnv#setBackgroundThreads(int)
+ * @see RocksEnv#setBackgroundThreads(int, Priority)
+ * @see MutableDBOptionsInterface#maxBackgroundCompactions()
+ *
+ * @deprecated Use {@link MutableDBOptionsInterface#setMaxBackgroundJobs(int)}
+ */
+ @Deprecated
+ T setMaxBackgroundFlushes(int maxBackgroundFlushes);
+
+ /**
+ * NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the
+ * value of max_background_jobs. For backwards compatibility we will set
+ * `max_background_jobs = max_background_compactions + max_background_flushes`
+ * in the case where user sets at least one of `max_background_compactions` or
+ * `max_background_flushes`.
+ *
+ * Returns the maximum number of concurrent background flush jobs.
+ * If you're increasing this, also consider increasing number of threads in
+ * HIGH priority thread pool. For more information, see
+ * Default: -1
+ *
+ * @return the maximum number of concurrent background flush jobs.
+ * @see RocksEnv#setBackgroundThreads(int)
+ * @see RocksEnv#setBackgroundThreads(int, Priority)
+ */
+ @Deprecated
+ int maxBackgroundFlushes();
+
+ /**
+ * Specifies the maximum size of a info log file. If the current log file
+ * is larger than `max_log_file_size`, a new info log file will
+ * be created.
+ * If 0, all logs will be written to one log file.
+ *
+ * @param maxLogFileSize the maximum size of a info log file.
+ * @return the instance of the current object.
+ * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
+ * while overflowing the underlying platform specific value.
+ */
+ T setMaxLogFileSize(long maxLogFileSize);
+
+ /**
+ * Returns the maximum size of a info log file. If the current log file
+ * is larger than this size, a new info log file will be created.
+ * If 0, all logs will be written to one log file.
+ *
+ * @return the maximum size of the info log file.
+ */
+ long maxLogFileSize();
+
+ /**
+ * Specifies the time interval for the info log file to roll (in seconds).
+ * If specified with non-zero value, log file will be rolled
+ * if it has been active longer than `log_file_time_to_roll`.
+ * Default: 0 (disabled)
+ *
+ * @param logFileTimeToRoll the time interval in seconds.
+ * @return the instance of the current object.
+ * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
+ * while overflowing the underlying platform specific value.
+ */
+ T setLogFileTimeToRoll(long logFileTimeToRoll);
+
+ /**
+ * Returns the time interval for the info log file to roll (in seconds).
+ * If specified with non-zero value, log file will be rolled
+ * if it has been active longer than `log_file_time_to_roll`.
+ * Default: 0 (disabled)
+ *
+ * @return the time interval in seconds.
+ */
+ long logFileTimeToRoll();
+
+ /**
+ * Specifies the maximum number of info log files to be kept.
+ * Default: 1000
+ *
+ * @param keepLogFileNum the maximum number of info log files to be kept.
+ * @return the instance of the current object.
+ * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
+ * while overflowing the underlying platform specific value.
+ */
+ T setKeepLogFileNum(long keepLogFileNum);
+
+ /**
+ * Returns the maximum number of info log files to be kept.
+ * Default: 1000
+ *
+ * @return the maximum number of info log files to be kept.
+ */
+ long keepLogFileNum();
+
+ /**
+ * Recycle log files.
+ *
+ * If non-zero, we will reuse previously written log files for new
+ * logs, overwriting the old data. The value indicates how many
+ * such files we will keep around at any point in time for later
+ * use.
+ *
+ * This is more efficient because the blocks are already
+ * allocated and fdatasync does not need to update the inode after
+ * each write.
+ *
+ * Default: 0
+ *
+ * @param recycleLogFileNum the number of log files to keep for recycling
+ *
+ * @return the reference to the current options
+ */
+ T setRecycleLogFileNum(long recycleLogFileNum);
+
+ /**
+ * Recycle log files.
+ *
+ * If non-zero, we will reuse previously written log files for new
+ * logs, overwriting the old data. The value indicates how many
+ * such files we will keep around at any point in time for later
+ * use.
+ *
+ * This is more efficient because the blocks are already
+ * allocated and fdatasync does not need to update the inode after
+ * each write.
+ *
+ * Default: 0
+ *
+ * @return the number of log files kept for recycling
+ */
+ long recycleLogFileNum();
+
+ /**
+ * Manifest file is rolled over on reaching this limit.
+ * The older manifest file be deleted.
+ * The default value is 1GB so that the manifest file can grow, but not
+ * reach the limit of storage capacity.
+ *
+ * @param maxManifestFileSize the size limit of a manifest file.
+ * @return the instance of the current object.
+ */
+ T setMaxManifestFileSize(long maxManifestFileSize);
+
+ /**
+ * Manifest file is rolled over on reaching this limit.
+ * The older manifest file be deleted.
+ * The default value is 1GB so that the manifest file can grow, but not
+ * reach the limit of storage capacity.
+ *
+ * @return the size limit of a manifest file.
+ */
+ long maxManifestFileSize();
+
+ /**
+ * Number of shards used for table cache.
+ *
+ * @param tableCacheNumshardbits the number of chards
+ * @return the instance of the current object.
+ */
+ T setTableCacheNumshardbits(int tableCacheNumshardbits);
+
+ /**
+ * Number of shards used for table cache.
+ *
+ * @return the number of shards used for table cache.
+ */
+ int tableCacheNumshardbits();
+
+ /**
+ * {@link #walTtlSeconds()} and {@link #walSizeLimitMB()} affect how archived logs
+ * will be deleted.
+ * <ol>
+ * <li>If both set to 0, logs will be deleted asap and will not get into
+ * the archive.</li>
+ * <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
+ * WAL files will be checked every 10 min and if total size is greater
+ * then WAL_size_limit_MB, they will be deleted starting with the
+ * earliest until size_limit is met. All empty files will be deleted.</li>
+ * <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
+ * WAL files will be checked every WAL_ttl_secondsi / 2 and those that
+ * are older than WAL_ttl_seconds will be deleted.</li>
+ * <li>If both are not 0, WAL files will be checked every 10 min and both
+ * checks will be performed with ttl being first.</li>
+ * </ol>
+ *
+ * @param walTtlSeconds the ttl seconds
+ * @return the instance of the current object.
+ * @see #setWalSizeLimitMB(long)
+ */
+ T setWalTtlSeconds(long walTtlSeconds);
+
+ /**
+ * WalTtlSeconds() and walSizeLimitMB() affect how archived logs
+ * will be deleted.
+ * <ol>
+ * <li>If both set to 0, logs will be deleted asap and will not get into
+ * the archive.</li>
+ * <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
+ * WAL files will be checked every 10 min and if total size is greater
+ * then WAL_size_limit_MB, they will be deleted starting with the
+ * earliest until size_limit is met. All empty files will be deleted.</li>
+ * <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
+ * WAL files will be checked every WAL_ttl_secondsi / 2 and those that
+ * are older than WAL_ttl_seconds will be deleted.</li>
+ * <li>If both are not 0, WAL files will be checked every 10 min and both
+ * checks will be performed with ttl being first.</li>
+ * </ol>
+ *
+ * @return the wal-ttl seconds
+ * @see #walSizeLimitMB()
+ */
+ long walTtlSeconds();
+
+ /**
+ * WalTtlSeconds() and walSizeLimitMB() affect how archived logs
+ * will be deleted.
+ * <ol>
+ * <li>If both set to 0, logs will be deleted asap and will not get into
+ * the archive.</li>
+ * <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
+ * WAL files will be checked every 10 min and if total size is greater
+ * then WAL_size_limit_MB, they will be deleted starting with the
+ * earliest until size_limit is met. All empty files will be deleted.</li>
+ * <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
+ * WAL files will be checked every WAL_ttl_secondsi / 2 and those that
+ * are older than WAL_ttl_seconds will be deleted.</li>
+ * <li>If both are not 0, WAL files will be checked every 10 min and both
+ * checks will be performed with ttl being first.</li>
+ * </ol>
+ *
+ * @param sizeLimitMB size limit in mega-bytes.
+ * @return the instance of the current object.
+ * @see #setWalSizeLimitMB(long)
+ */
+ T setWalSizeLimitMB(long sizeLimitMB);
+
+ /**
+ * {@link #walTtlSeconds()} and {@code #walSizeLimitMB()} affect how archived logs
+ * will be deleted.
+ * <ol>
+ * <li>If both set to 0, logs will be deleted asap and will not get into
+ * the archive.</li>
+ * <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
+ * WAL files will be checked every 10 min and if total size is greater
+ * then WAL_size_limit_MB, they will be deleted starting with the
+ * earliest until size_limit is met. All empty files will be deleted.</li>
+ * <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
+ * WAL files will be checked every WAL_ttl_seconds i / 2 and those that
+ * are older than WAL_ttl_seconds will be deleted.</li>
+ * <li>If both are not 0, WAL files will be checked every 10 min and both
+ * checks will be performed with ttl being first.</li>
+ * </ol>
+ * @return size limit in mega-bytes.
+ * @see #walSizeLimitMB()
+ */
+ long walSizeLimitMB();
+
+ /**
+ * Number of bytes to preallocate (via fallocate) the manifest
+ * files. Default is 4mb, which is reasonable to reduce random IO
+ * as well as prevent overallocation for mounts that preallocate
+ * large amounts of data (such as xfs's allocsize option).
+ *
+ * @param size the size in byte
+ * @return the instance of the current object.
+ * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
+ * while overflowing the underlying platform specific value.
+ */
+ T setManifestPreallocationSize(long size);
+
+ /**
+ * Number of bytes to preallocate (via fallocate) the manifest
+ * files. Default is 4mb, which is reasonable to reduce random IO
+ * as well as prevent overallocation for mounts that preallocate
+ * large amounts of data (such as xfs's allocsize option).
+ *
+ * @return size in bytes.
+ */
+ long manifestPreallocationSize();
+
+ /**
+ * Enable the OS to use direct I/O for reading sst tables.
+ * Default: false
+ *
+ * @param useDirectReads if true, then direct read is enabled
+ * @return the instance of the current object.
+ */
+ T setUseDirectReads(boolean useDirectReads);
+
+ /**
+ * Enable the OS to use direct I/O for reading sst tables.
+ * Default: false
+ *
+ * @return if true, then direct reads are enabled
+ */
+ boolean useDirectReads();
+
+ /**
+ * Enable the OS to use direct reads and writes in flush and
+ * compaction
+ * Default: false
+ *
+ * @param useDirectIoForFlushAndCompaction if true, then direct
+ * I/O will be enabled for background flush and compactions
+ * @return the instance of the current object.
+ */
+ T setUseDirectIoForFlushAndCompaction(boolean useDirectIoForFlushAndCompaction);
+
+ /**
+ * Enable the OS to use direct reads and writes in flush and
+ * compaction
+ *
+ * @return if true, then direct I/O is enabled for flush and
+ * compaction
+ */
+ boolean useDirectIoForFlushAndCompaction();
+
+ /**
+ * Whether fallocate calls are allowed
+ *
+ * @param allowFAllocate false if fallocate() calls are bypassed
+ *
+ * @return the reference to the current options.
+ */
+ T setAllowFAllocate(boolean allowFAllocate);
+
+ /**
+ * Whether fallocate calls are allowed
+ *
+ * @return false if fallocate() calls are bypassed
+ */
+ boolean allowFAllocate();
+
+ /**
+ * Allow the OS to mmap file for reading sst tables.
+ * Default: false
+ *
+ * @param allowMmapReads true if mmap reads are allowed.
+ * @return the instance of the current object.
+ */
+ T setAllowMmapReads(boolean allowMmapReads);
+
+ /**
+ * Allow the OS to mmap file for reading sst tables.
+ * Default: false
+ *
+ * @return true if mmap reads are allowed.
+ */
+ boolean allowMmapReads();
+
+ /**
+ * Allow the OS to mmap file for writing. Default: false
+ *
+ * @param allowMmapWrites true if mmap writes are allowd.
+ * @return the instance of the current object.
+ */
+ T setAllowMmapWrites(boolean allowMmapWrites);
+
+ /**
+ * Allow the OS to mmap file for writing. Default: false
+ *
+ * @return true if mmap writes are allowed.
+ */
+ boolean allowMmapWrites();
+
+ /**
+ * Disable child process inherit open files. Default: true
+ *
+ * @param isFdCloseOnExec true if child process inheriting open
+ * files is disabled.
+ * @return the instance of the current object.
+ */
+ T setIsFdCloseOnExec(boolean isFdCloseOnExec);
+
+ /**
+ * Disable child process inherit open files. Default: true
+ *
+ * @return true if child process inheriting open files is disabled.
+ */
+ boolean isFdCloseOnExec();
+
+ /**
+ * If set true, will hint the underlying file system that the file
+ * access pattern is random, when a sst file is opened.
+ * Default: true
+ *
+ * @param adviseRandomOnOpen true if hinting random access is on.
+ * @return the instance of the current object.
+ */
+ T setAdviseRandomOnOpen(boolean adviseRandomOnOpen);
+
+ /**
+ * If set true, will hint the underlying file system that the file
+ * access pattern is random, when a sst file is opened.
+ * Default: true
+ *
+ * @return true if hinting random access is on.
+ */
+ boolean adviseRandomOnOpen();
+
+ /**
+ * Amount of data to build up in memtables across all column
+ * families before writing to disk.
+ *
+ * This is distinct from {@link ColumnFamilyOptions#writeBufferSize()},
+ * which enforces a limit for a single memtable.
+ *
+ * This feature is disabled by default. Specify a non-zero value
+ * to enable it.
+ *
+ * Default: 0 (disabled)
+ *
+ * @param dbWriteBufferSize the size of the write buffer
+ *
+ * @return the reference to the current options.
+ */
+ T setDbWriteBufferSize(long dbWriteBufferSize);
+
+ /**
+ * Use passed {@link WriteBufferManager} to control memory usage across
+ * multiple column families and/or DB instances.
+ *
+ * Check <a href="https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager">
+ * https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager</a>
+ * for more details on when to use it
+ *
+ * @param writeBufferManager The WriteBufferManager to use
+ * @return the reference of the current options.
+ */
+ T setWriteBufferManager(final WriteBufferManager writeBufferManager);
+
+ /**
+ * Reference to {@link WriteBufferManager} used by it. <br>
+ *
+ * Default: null (Disabled)
+ *
+ * @return a reference to WriteBufferManager
+ */
+ WriteBufferManager writeBufferManager();
+
+ /**
+ * Amount of data to build up in memtables across all column
+ * families before writing to disk.
+ *
+ * This is distinct from {@link ColumnFamilyOptions#writeBufferSize()},
+ * which enforces a limit for a single memtable.
+ *
+ * This feature is disabled by default. Specify a non-zero value
+ * to enable it.
+ *
+ * Default: 0 (disabled)
+ *
+ * @return the size of the write buffer
+ */
+ long dbWriteBufferSize();
+
+ /**
+ * Specify the file access pattern once a compaction is started.
+ * It will be applied to all input files of a compaction.
+ *
+ * Default: {@link AccessHint#NORMAL}
+ *
+ * @param accessHint The access hint
+ *
+ * @return the reference to the current options.
+ */
+ T setAccessHintOnCompactionStart(final AccessHint accessHint);
+
+ /**
+ * Specify the file access pattern once a compaction is started.
+ * It will be applied to all input files of a compaction.
+ *
+ * Default: {@link AccessHint#NORMAL}
+ *
+ * @return The access hint
+ */
+ AccessHint accessHintOnCompactionStart();
+
+ /**
+ * If true, always create a new file descriptor and new table reader
+ * for compaction inputs. Turn this parameter on may introduce extra
+ * memory usage in the table reader, if it allocates extra memory
+ * for indexes. This will allow file descriptor prefetch options
+ * to be set for compaction input files and not to impact file
+ * descriptors for the same file used by user queries.
+ * Suggest to enable {@link BlockBasedTableConfig#cacheIndexAndFilterBlocks()}
+ * for this mode if using block-based table.
+ *
+ * Default: false
+ *
+ * @param newTableReaderForCompactionInputs true if a new file descriptor and
+ * table reader should be created for compaction inputs
+ *
+ * @return the reference to the current options.
+ */
+ T setNewTableReaderForCompactionInputs(
+ boolean newTableReaderForCompactionInputs);
+
+ /**
+ * If true, always create a new file descriptor and new table reader
+ * for compaction inputs. Turn this parameter on may introduce extra
+ * memory usage in the table reader, if it allocates extra memory
+ * for indexes. This will allow file descriptor prefetch options
+ * to be set for compaction input files and not to impact file
+ * descriptors for the same file used by user queries.
+ * Suggest to enable {@link BlockBasedTableConfig#cacheIndexAndFilterBlocks()}
+ * for this mode if using block-based table.
+ *
+ * Default: false
+ *
+ * @return true if a new file descriptor and table reader are created for
+ * compaction inputs
+ */
+ boolean newTableReaderForCompactionInputs();
+
+ /**
+ * This is a maximum buffer size that is used by WinMmapReadableFile in
+ * unbuffered disk I/O mode. We need to maintain an aligned buffer for
+ * reads. We allow the buffer to grow until the specified value and then
+ * for bigger requests allocate one shot buffers. In unbuffered mode we
+ * always bypass read-ahead buffer at ReadaheadRandomAccessFile
+ * When read-ahead is required we then make use of
+ * {@link MutableDBOptionsInterface#compactionReadaheadSize()} value and
+ * always try to read ahead.
+ * With read-ahead we always pre-allocate buffer to the size instead of
+ * growing it up to a limit.
+ *
+ * This option is currently honored only on Windows
+ *
+ * Default: 1 Mb
+ *
+ * Special value: 0 - means do not maintain per instance buffer. Allocate
+ * per request buffer and avoid locking.
+ *
+ * @param randomAccessMaxBufferSize the maximum size of the random access
+ * buffer
+ *
+ * @return the reference to the current options.
+ */
+ T setRandomAccessMaxBufferSize(long randomAccessMaxBufferSize);
+
+ /**
+ * This is a maximum buffer size that is used by WinMmapReadableFile in
+ * unbuffered disk I/O mode. We need to maintain an aligned buffer for
+ * reads. We allow the buffer to grow until the specified value and then
+ * for bigger requests allocate one shot buffers. In unbuffered mode we
+ * always bypass read-ahead buffer at ReadaheadRandomAccessFile
+ * When read-ahead is required we then make use of
+ * {@link MutableDBOptionsInterface#compactionReadaheadSize()} value and
+ * always try to read ahead. With read-ahead we always pre-allocate buffer
+ * to the size instead of growing it up to a limit.
+ *
+ * This option is currently honored only on Windows
+ *
+ * Default: 1 Mb
+ *
+ * Special value: 0 - means do not maintain per instance buffer. Allocate
+ * per request buffer and avoid locking.
+ *
+ * @return the maximum size of the random access buffer
+ */
+ long randomAccessMaxBufferSize();
+
+ /**
+ * Use adaptive mutex, which spins in the user space before resorting
+ * to kernel. This could reduce context switch when the mutex is not
+ * heavily contended. However, if the mutex is hot, we could end up
+ * wasting spin time.
+ * Default: false
+ *
+ * @param useAdaptiveMutex true if adaptive mutex is used.
+ * @return the instance of the current object.
+ */
+ T setUseAdaptiveMutex(boolean useAdaptiveMutex);
+
+ /**
+ * Use adaptive mutex, which spins in the user space before resorting
+ * to kernel. This could reduce context switch when the mutex is not
+ * heavily contended. However, if the mutex is hot, we could end up
+ * wasting spin time.
+ * Default: false
+ *
+ * @return true if adaptive mutex is used.
+ */
+ boolean useAdaptiveMutex();
+
+ //TODO(AR) NOW
+// /**
+// * Sets the {@link EventListener}s whose callback functions
+// * will be called when specific RocksDB event happens.
+// *
+// * @param listeners the listeners who should be notified on various events.
+// *
+// * @return the instance of the current object.
+// */
+// T setListeners(final List<EventListener> listeners);
+//
+// /**
+// * Gets the {@link EventListener}s whose callback functions
+// * will be called when specific RocksDB event happens.
+// *
+// * @return a collection of Event listeners.
+// */
+// Collection<EventListener> listeners();
+
+ /**
+ * If true, then the status of the threads involved in this DB will
+ * be tracked and available via GetThreadList() API.
+ *
+ * Default: false
+ *
+ * @param enableThreadTracking true to enable tracking
+ *
+ * @return the reference to the current options.
+ */
+ T setEnableThreadTracking(boolean enableThreadTracking);
+
+ /**
+ * If true, then the status of the threads involved in this DB will
+ * be tracked and available via GetThreadList() API.
+ *
+ * Default: false
+ *
+ * @return true if tracking is enabled
+ */
+ boolean enableThreadTracking();
+
+ /**
+ * By default, a single write thread queue is maintained. The thread gets
+ * to the head of the queue becomes write batch group leader and responsible
+ * for writing to WAL and memtable for the batch group.
+ *
+ * If {@link #enablePipelinedWrite()} is true, separate write thread queue is
+ * maintained for WAL write and memtable write. A write thread first enter WAL
+ * writer queue and then memtable writer queue. Pending thread on the WAL
+ * writer queue thus only have to wait for previous writers to finish their
+ * WAL writing but not the memtable writing. Enabling the feature may improve
+ * write throughput and reduce latency of the prepare phase of two-phase
+ * commit.
+ *
+ * Default: false
+ *
+ * @param enablePipelinedWrite true to enabled pipelined writes
+ *
+ * @return the reference to the current options.
+ */
+ T setEnablePipelinedWrite(final boolean enablePipelinedWrite);
+
+ /**
+ * Returns true if pipelined writes are enabled.
+ * See {@link #setEnablePipelinedWrite(boolean)}.
+ *
+ * @return true if pipelined writes are enabled, false otherwise.
+ */
+ boolean enablePipelinedWrite();
+
+ /**
+ * Setting {@link #unorderedWrite()} to true trades higher write throughput with
+ * relaxing the immutability guarantee of snapshots. This violates the
+ * repeatability one expects from ::Get from a snapshot, as well as
+ * ::MultiGet and Iterator's consistent-point-in-time view property.
+ * If the application cannot tolerate the relaxed guarantees, it can implement
+ * its own mechanisms to work around that and yet benefit from the higher
+ * throughput. Using TransactionDB with WRITE_PREPARED write policy and
+ * {@link #twoWriteQueues()} true is one way to achieve immutable snapshots despite
+ * unordered_write.
+ *
+ * By default, i.e., when it is false, rocksdb does not advance the sequence
+ * number for new snapshots unless all the writes with lower sequence numbers
+ * are already finished. This provides the immutability that we except from
+ * snapshots. Moreover, since Iterator and MultiGet internally depend on
+ * snapshots, the snapshot immutability results into Iterator and MultiGet
+ * offering consistent-point-in-time view. If set to true, although
+ * Read-Your-Own-Write property is still provided, the snapshot immutability
+ * property is relaxed: the writes issued after the snapshot is obtained (with
+ * larger sequence numbers) will be still not visible to the reads from that
+ * snapshot, however, there still might be pending writes (with lower sequence
+ * number) that will change the state visible to the snapshot after they are
+ * landed to the memtable.
+ *
+ * @param unorderedWrite true to enabled unordered write
+ *
+ * @return the reference to the current options.
+ */
+ T setUnorderedWrite(final boolean unorderedWrite);
+
+ /**
+ * Returns true if unordered write are enabled.
+ * See {@link #setUnorderedWrite(boolean)}.
+ *
+ * @return true if unordered write are enabled, false otherwise.
+ */
+ boolean unorderedWrite();
+
+ /**
+ * If true, allow multi-writers to update mem tables in parallel.
+ * Only some memtable factorys support concurrent writes; currently it
+ * is implemented only for SkipListFactory. Concurrent memtable writes
+ * are not compatible with inplace_update_support or filter_deletes.
+ * It is strongly recommended to set
+ * {@link #setEnableWriteThreadAdaptiveYield(boolean)} if you are going to use
+ * this feature.
+ * Default: true
+ *
+ * @param allowConcurrentMemtableWrite true to enable concurrent writes
+ * for the memtable
+ *
+ * @return the reference to the current options.
+ */
+ T setAllowConcurrentMemtableWrite(boolean allowConcurrentMemtableWrite);
+
+ /**
+ * If true, allow multi-writers to update mem tables in parallel.
+ * Only some memtable factorys support concurrent writes; currently it
+ * is implemented only for SkipListFactory. Concurrent memtable writes
+ * are not compatible with inplace_update_support or filter_deletes.
+ * It is strongly recommended to set
+ * {@link #setEnableWriteThreadAdaptiveYield(boolean)} if you are going to use
+ * this feature.
+ * Default: true
+ *
+ * @return true if concurrent writes are enabled for the memtable
+ */
+ boolean allowConcurrentMemtableWrite();
+
+ /**
+ * If true, threads synchronizing with the write batch group leader will
+ * wait for up to {@link #writeThreadMaxYieldUsec()} before blocking on a
+ * mutex. This can substantially improve throughput for concurrent workloads,
+ * regardless of whether {@link #allowConcurrentMemtableWrite()} is enabled.
+ * Default: true
+ *
+ * @param enableWriteThreadAdaptiveYield true to enable adaptive yield for the
+ * write threads
+ *
+ * @return the reference to the current options.
+ */
+ T setEnableWriteThreadAdaptiveYield(
+ boolean enableWriteThreadAdaptiveYield);
+
+ /**
+ * If true, threads synchronizing with the write batch group leader will
+ * wait for up to {@link #writeThreadMaxYieldUsec()} before blocking on a
+ * mutex. This can substantially improve throughput for concurrent workloads,
+ * regardless of whether {@link #allowConcurrentMemtableWrite()} is enabled.
+ * Default: true
+ *
+ * @return true if adaptive yield is enabled
+ * for the writing threads
+ */
+ boolean enableWriteThreadAdaptiveYield();
+
+ /**
+ * The maximum number of microseconds that a write operation will use
+ * a yielding spin loop to coordinate with other write threads before
+ * blocking on a mutex. (Assuming {@link #writeThreadSlowYieldUsec()} is
+ * set properly) increasing this value is likely to increase RocksDB
+ * throughput at the expense of increased CPU usage.
+ * Default: 100
+ *
+ * @param writeThreadMaxYieldUsec maximum number of microseconds
+ *
+ * @return the reference to the current options.
+ */
+ T setWriteThreadMaxYieldUsec(long writeThreadMaxYieldUsec);
+
+ /**
+ * The maximum number of microseconds that a write operation will use
+ * a yielding spin loop to coordinate with other write threads before
+ * blocking on a mutex. (Assuming {@link #writeThreadSlowYieldUsec()} is
+ * set properly) increasing this value is likely to increase RocksDB
+ * throughput at the expense of increased CPU usage.
+ * Default: 100
+ *
+ * @return the maximum number of microseconds
+ */
+ long writeThreadMaxYieldUsec();
+
+ /**
+ * The latency in microseconds after which a std::this_thread::yield
+ * call (sched_yield on Linux) is considered to be a signal that
+ * other processes or threads would like to use the current core.
+ * Increasing this makes writer threads more likely to take CPU
+ * by spinning, which will show up as an increase in the number of
+ * involuntary context switches.
+ * Default: 3
+ *
+ * @param writeThreadSlowYieldUsec the latency in microseconds
+ *
+ * @return the reference to the current options.
+ */
+ T setWriteThreadSlowYieldUsec(long writeThreadSlowYieldUsec);
+
+ /**
+ * The latency in microseconds after which a std::this_thread::yield
+ * call (sched_yield on Linux) is considered to be a signal that
+ * other processes or threads would like to use the current core.
+ * Increasing this makes writer threads more likely to take CPU
+ * by spinning, which will show up as an increase in the number of
+ * involuntary context switches.
+ * Default: 3
+ *
+ * @return writeThreadSlowYieldUsec the latency in microseconds
+ */
+ long writeThreadSlowYieldUsec();
+
+ /**
+ * If true, then DB::Open() will not update the statistics used to optimize
+ * compaction decision by loading table properties from many files.
+ * Turning off this feature will improve DBOpen time especially in
+ * disk environment.
+ *
+ * Default: false
+ *
+ * @param skipStatsUpdateOnDbOpen true if updating stats will be skipped
+ *
+ * @return the reference to the current options.
+ */
+ T setSkipStatsUpdateOnDbOpen(boolean skipStatsUpdateOnDbOpen);
+
+ /**
+ * If true, then DB::Open() will not update the statistics used to optimize
+ * compaction decision by loading table properties from many files.
+ * Turning off this feature will improve DBOpen time especially in
+ * disk environment.
+ *
+ * Default: false
+ *
+ * @return true if updating stats will be skipped
+ */
+ boolean skipStatsUpdateOnDbOpen();
+
+ /**
+ * Recovery mode to control the consistency while replaying WAL
+ *
+ * Default: {@link WALRecoveryMode#PointInTimeRecovery}
+ *
+ * @param walRecoveryMode The WAL recover mode
+ *
+ * @return the reference to the current options.
+ */
+ T setWalRecoveryMode(WALRecoveryMode walRecoveryMode);
+
+ /**
+ * Recovery mode to control the consistency while replaying WAL
+ *
+ * Default: {@link WALRecoveryMode#PointInTimeRecovery}
+ *
+ * @return The WAL recover mode
+ */
+ WALRecoveryMode walRecoveryMode();
+
+ /**
+ * if set to false then recovery will fail when a prepared
+ * transaction is encountered in the WAL
+ *
+ * Default: false
+ *
+ * @param allow2pc true if two-phase-commit is enabled
+ *
+ * @return the reference to the current options.
+ */
+ T setAllow2pc(boolean allow2pc);
+
+ /**
+ * if set to false then recovery will fail when a prepared
+ * transaction is encountered in the WAL
+ *
+ * Default: false
+ *
+ * @return true if two-phase-commit is enabled
+ */
+ boolean allow2pc();
+
+ /**
+ * A global cache for table-level rows.
+ *
+ * Default: null (disabled)
+ *
+ * @param rowCache The global row cache
+ *
+ * @return the reference to the current options.
+ */
+ T setRowCache(final Cache rowCache);
+
+ /**
+ * A global cache for table-level rows.
+ *
+ * Default: null (disabled)
+ *
+ * @return The global row cache
+ */
+ Cache rowCache();
+
+ /**
+ * A filter object supplied to be invoked while processing write-ahead-logs
+ * (WALs) during recovery. The filter provides a way to inspect log
+ * records, ignoring a particular record or skipping replay.
+ * The filter is invoked at startup and is invoked from a single-thread
+ * currently.
+ *
+ * @param walFilter the filter for processing WALs during recovery.
+ *
+ * @return the reference to the current options.
+ */
+ T setWalFilter(final AbstractWalFilter walFilter);
+
+ /**
+ * Get's the filter for processing WALs during recovery.
+ * See {@link #setWalFilter(AbstractWalFilter)}.
+ *
+ * @return the filter used for processing WALs during recovery.
+ */
+ WalFilter walFilter();
+
+ /**
+ * If true, then DB::Open / CreateColumnFamily / DropColumnFamily
+ * / SetOptions will fail if options file is not detected or properly
+ * persisted.
+ *
+ * DEFAULT: false
+ *
+ * @param failIfOptionsFileError true if we should fail if there is an error
+ * in the options file
+ *
+ * @return the reference to the current options.
+ */
+ T setFailIfOptionsFileError(boolean failIfOptionsFileError);
+
+ /**
+ * If true, then DB::Open / CreateColumnFamily / DropColumnFamily
+ * / SetOptions will fail if options file is not detected or properly
+ * persisted.
+ *
+ * DEFAULT: false
+ *
+ * @return true if we should fail if there is an error in the options file
+ */
+ boolean failIfOptionsFileError();
+
+ /**
+ * If true, then print malloc stats together with rocksdb.stats
+ * when printing to LOG.
+ *
+ * DEFAULT: false
+ *
+ * @param dumpMallocStats true if malloc stats should be printed to LOG
+ *
+ * @return the reference to the current options.
+ */
+ T setDumpMallocStats(boolean dumpMallocStats);
+
+ /**
+ * If true, then print malloc stats together with rocksdb.stats
+ * when printing to LOG.
+ *
+ * DEFAULT: false
+ *
+ * @return true if malloc stats should be printed to LOG
+ */
+ boolean dumpMallocStats();
+
+ /**
+ * By default RocksDB replay WAL logs and flush them on DB open, which may
+ * create very small SST files. If this option is enabled, RocksDB will try
+ * to avoid (but not guarantee not to) flush during recovery. Also, existing
+ * WAL logs will be kept, so that if crash happened before flush, we still
+ * have logs to recover from.
+ *
+ * DEFAULT: false
+ *
+ * @param avoidFlushDuringRecovery true to try to avoid (but not guarantee
+ * not to) flush during recovery
+ *
+ * @return the reference to the current options.
+ */
+ T setAvoidFlushDuringRecovery(boolean avoidFlushDuringRecovery);
+
+ /**
+ * By default RocksDB replay WAL logs and flush them on DB open, which may
+ * create very small SST files. If this option is enabled, RocksDB will try
+ * to avoid (but not guarantee not to) flush during recovery. Also, existing
+ * WAL logs will be kept, so that if crash happened before flush, we still
+ * have logs to recover from.
+ *
+ * DEFAULT: false
+ *
+ * @return true to try to avoid (but not guarantee not to) flush during
+ * recovery
+ */
+ boolean avoidFlushDuringRecovery();
+
+ /**
+ * Set this option to true during creation of database if you want
+ * to be able to ingest behind (call IngestExternalFile() skipping keys
+ * that already exist, rather than overwriting matching keys).
+ * Setting this option to true will affect 2 things:
+ * 1) Disable some internal optimizations around SST file compression
+ * 2) Reserve bottom-most level for ingested files only.
+ * 3) Note that num_levels should be &gt;= 3 if this option is turned on.
+ *
+ * DEFAULT: false
+ *
+ * @param allowIngestBehind true to allow ingest behind, false to disallow.
+ *
+ * @return the reference to the current options.
+ */
+ T setAllowIngestBehind(final boolean allowIngestBehind);
+
+ /**
+ * Returns true if ingest behind is allowed.
+ * See {@link #setAllowIngestBehind(boolean)}.
+ *
+ * @return true if ingest behind is allowed, false otherwise.
+ */
+ boolean allowIngestBehind();
+
+ /**
+ * Needed to support differential snapshots.
+ * If set to true then DB will only process deletes with sequence number
+ * less than what was set by SetPreserveDeletesSequenceNumber(uint64_t ts).
+ * Clients are responsible to periodically call this method to advance
+ * the cutoff time. If this method is never called and preserve_deletes
+ * is set to true NO deletes will ever be processed.
+ * At the moment this only keeps normal deletes, SingleDeletes will
+ * not be preserved.
+ *
+ * DEFAULT: false
+ *
+ * @param preserveDeletes true to preserve deletes.
+ *
+ * @return the reference to the current options.
+ */
+ T setPreserveDeletes(final boolean preserveDeletes);
+
+ /**
+ * Returns true if deletes are preserved.
+ * See {@link #setPreserveDeletes(boolean)}.
+ *
+ * @return true if deletes are preserved, false otherwise.
+ */
+ boolean preserveDeletes();
+
+ /**
+ * If enabled it uses two queues for writes, one for the ones with
+ * disable_memtable and one for the ones that also write to memtable. This
+ * allows the memtable writes not to lag behind other writes. It can be used
+ * to optimize MySQL 2PC in which only the commits, which are serial, write to
+ * memtable.
+ *
+ * DEFAULT: false
+ *
+ * @param twoWriteQueues true to enable two write queues, false otherwise.
+ *
+ * @return the reference to the current options.
+ */
+ T setTwoWriteQueues(final boolean twoWriteQueues);
+
+ /**
+ * Returns true if two write queues are enabled.
+ *
+ * @return true if two write queues are enabled, false otherwise.
+ */
+ boolean twoWriteQueues();
+
+ /**
+ * If true WAL is not flushed automatically after each write. Instead it
+ * relies on manual invocation of FlushWAL to write the WAL buffer to its
+ * file.
+ *
+ * DEFAULT: false
+ *
+ * @param manualWalFlush true to set disable automatic WAL flushing,
+ * false otherwise.
+ *
+ * @return the reference to the current options.
+ */
+ T setManualWalFlush(final boolean manualWalFlush);
+
+ /**
+ * Returns true if automatic WAL flushing is disabled.
+ * See {@link #setManualWalFlush(boolean)}.
+ *
+ * @return true if automatic WAL flushing is disabled, false otherwise.
+ */
+ boolean manualWalFlush();
+
+ /**
+ * If true, RocksDB supports flushing multiple column families and committing
+ * their results atomically to MANIFEST. Note that it is not
+ * necessary to set atomic_flush to true if WAL is always enabled since WAL
+ * allows the database to be restored to the last persistent state in WAL.
+ * This option is useful when there are column families with writes NOT
+ * protected by WAL.
+ * For manual flush, application has to specify which column families to
+ * flush atomically in {@link RocksDB#flush(FlushOptions, List)}.
+ * For auto-triggered flush, RocksDB atomically flushes ALL column families.
+ *
+ * Currently, any WAL-enabled writes after atomic flush may be replayed
+ * independently if the process crashes later and tries to recover.
+ *
+ * @param atomicFlush true to enable atomic flush of multiple column families.
+ *
+ * @return the reference to the current options.
+ */
+ T setAtomicFlush(final boolean atomicFlush);
+
+ /**
+ * Determine if atomic flush of multiple column families is enabled.
+ *
+ * See {@link #setAtomicFlush(boolean)}.
+ *
+ * @return true if atomic flush is enabled.
+ */
+ boolean atomicFlush();
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/DataBlockIndexType.java b/src/rocksdb/java/src/main/java/org/rocksdb/DataBlockIndexType.java
new file mode 100644
index 000000000..513e5b429
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/DataBlockIndexType.java
@@ -0,0 +1,32 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+
+/**
+ * DataBlockIndexType used in conjunction with BlockBasedTable.
+ */
+public enum DataBlockIndexType {
+ /**
+ * traditional block type
+ */
+ kDataBlockBinarySearch((byte)0x0),
+
+ /**
+ * additional hash index
+ */
+ kDataBlockBinaryAndHash((byte)0x1);
+
+ private final byte value;
+
+ DataBlockIndexType(final byte value) {
+ this.value = value;
+ }
+
+ byte getValue() {
+ return value;
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/DbPath.java b/src/rocksdb/java/src/main/java/org/rocksdb/DbPath.java
new file mode 100644
index 000000000..3f0b67557
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/DbPath.java
@@ -0,0 +1,47 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.file.Path;
+
+/**
+ * Tuple of database path and target size
+ */
+public class DbPath {
+ final Path path;
+ final long targetSize;
+
+ public DbPath(final Path path, final long targetSize) {
+ this.path = path;
+ this.targetSize = targetSize;
+ }
+
+ @Override
+ public boolean equals(final Object o) {
+ if (this == o) {
+ return true;
+ }
+
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ final DbPath dbPath = (DbPath) o;
+
+ if (targetSize != dbPath.targetSize) {
+ return false;
+ }
+
+ return path != null ? path.equals(dbPath.path) : dbPath.path == null;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = path != null ? path.hashCode() : 0;
+ result = 31 * result + (int) (targetSize ^ (targetSize >>> 32));
+ return result;
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/DirectSlice.java b/src/rocksdb/java/src/main/java/org/rocksdb/DirectSlice.java
new file mode 100644
index 000000000..b0d35c3cc
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/DirectSlice.java
@@ -0,0 +1,132 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.ByteBuffer;
+
+/**
+ * Base class for slices which will receive direct
+ * ByteBuffer based access to the underlying data.
+ *
+ * ByteBuffer backed slices typically perform better with
+ * larger keys and values. When using smaller keys and
+ * values consider using @see org.rocksdb.Slice
+ */
+public class DirectSlice extends AbstractSlice<ByteBuffer> {
+ public final static DirectSlice NONE = new DirectSlice();
+
+ /**
+ * Indicates whether we have to free the memory pointed to by the Slice
+ */
+ private final boolean internalBuffer;
+ private volatile boolean cleared = false;
+ private volatile long internalBufferOffset = 0;
+
+ /**
+ * Called from JNI to construct a new Java DirectSlice
+ * without an underlying C++ object set
+ * at creation time.
+ *
+ * Note: You should be aware that it is intentionally marked as
+ * package-private. This is so that developers cannot construct their own
+ * default DirectSlice objects (at present). As developers cannot construct
+ * their own DirectSlice objects through this, they are not creating
+ * underlying C++ DirectSlice objects, and so there is nothing to free
+ * (dispose) from Java.
+ */
+ DirectSlice() {
+ super();
+ this.internalBuffer = false;
+ }
+
+ /**
+ * Constructs a slice
+ * where the data is taken from
+ * a String.
+ *
+ * @param str The string
+ */
+ public DirectSlice(final String str) {
+ super(createNewSliceFromString(str));
+ this.internalBuffer = true;
+ }
+
+ /**
+ * Constructs a slice where the data is
+ * read from the provided
+ * ByteBuffer up to a certain length
+ *
+ * @param data The buffer containing the data
+ * @param length The length of the data to use for the slice
+ */
+ public DirectSlice(final ByteBuffer data, final int length) {
+ super(createNewDirectSlice0(ensureDirect(data), length));
+ this.internalBuffer = false;
+ }
+
+ /**
+ * Constructs a slice where the data is
+ * read from the provided
+ * ByteBuffer
+ *
+ * @param data The bugger containing the data
+ */
+ public DirectSlice(final ByteBuffer data) {
+ super(createNewDirectSlice1(ensureDirect(data)));
+ this.internalBuffer = false;
+ }
+
+ private static ByteBuffer ensureDirect(final ByteBuffer data) {
+ if(!data.isDirect()) {
+ throw new IllegalArgumentException("The ByteBuffer must be direct");
+ }
+ return data;
+ }
+
+ /**
+ * Retrieves the byte at a specific offset
+ * from the underlying data
+ *
+ * @param offset The (zero-based) offset of the byte to retrieve
+ *
+ * @return the requested byte
+ */
+ public byte get(final int offset) {
+ return get0(getNativeHandle(), offset);
+ }
+
+ @Override
+ public void clear() {
+ clear0(getNativeHandle(), !cleared && internalBuffer, internalBufferOffset);
+ cleared = true;
+ }
+
+ @Override
+ public void removePrefix(final int n) {
+ removePrefix0(getNativeHandle(), n);
+ this.internalBufferOffset += n;
+ }
+
+ @Override
+ protected void disposeInternal() {
+ final long nativeHandle = getNativeHandle();
+ if(!cleared && internalBuffer) {
+ disposeInternalBuf(nativeHandle, internalBufferOffset);
+ }
+ disposeInternal(nativeHandle);
+ }
+
+ private native static long createNewDirectSlice0(final ByteBuffer data,
+ final int length);
+ private native static long createNewDirectSlice1(final ByteBuffer data);
+ @Override protected final native ByteBuffer data0(long handle);
+ private native byte get0(long handle, int offset);
+ private native void clear0(long handle, boolean internalBuffer,
+ long internalBufferOffset);
+ private native void removePrefix0(long handle, int length);
+ private native void disposeInternalBuf(final long handle,
+ long internalBufferOffset);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/EncodingType.java b/src/rocksdb/java/src/main/java/org/rocksdb/EncodingType.java
new file mode 100644
index 000000000..5ceeb54c8
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/EncodingType.java
@@ -0,0 +1,55 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * EncodingType
+ *
+ * <p>The value will determine how to encode keys
+ * when writing to a new SST file.</p>
+ *
+ * <p>This value will be stored
+ * inside the SST file which will be used when reading from
+ * the file, which makes it possible for users to choose
+ * different encoding type when reopening a DB. Files with
+ * different encoding types can co-exist in the same DB and
+ * can be read.</p>
+ */
+public enum EncodingType {
+ /**
+ * Always write full keys without any special encoding.
+ */
+ kPlain((byte) 0),
+ /**
+ * <p>Find opportunity to write the same prefix once for multiple rows.
+ * In some cases, when a key follows a previous key with the same prefix,
+ * instead of writing out the full key, it just writes out the size of the
+ * shared prefix, as well as other bytes, to save some bytes.</p>
+ *
+ * <p>When using this option, the user is required to use the same prefix
+ * extractor to make sure the same prefix will be extracted from the same key.
+ * The Name() value of the prefix extractor will be stored in the file. When
+ * reopening the file, the name of the options.prefix_extractor given will be
+ * bitwise compared to the prefix extractors stored in the file. An error
+ * will be returned if the two don't match.</p>
+ */
+ kPrefix((byte) 1);
+
+ /**
+ * Returns the byte value of the enumerations value
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value_;
+ }
+
+ private EncodingType(byte value) {
+ value_ = value;
+ }
+
+ private final byte value_;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Env.java b/src/rocksdb/java/src/main/java/org/rocksdb/Env.java
new file mode 100644
index 000000000..719296a14
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/Env.java
@@ -0,0 +1,167 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Base class for all Env implementations in RocksDB.
+ */
+public abstract class Env extends RocksObject {
+
+ static {
+ RocksDB.loadLibrary();
+ }
+
+ private static final Env DEFAULT_ENV = new RocksEnv(getDefaultEnvInternal());
+ static {
+ /**
+ * The Ownership of the Default Env belongs to C++
+ * and so we disown the native handle here so that
+ * we cannot accidentally free it from Java.
+ */
+ DEFAULT_ENV.disOwnNativeHandle();
+ }
+
+ /**
+ * <p>Returns the default environment suitable for the current operating
+ * system.</p>
+ *
+ * <p>The result of {@code getDefault()} is a singleton whose ownership
+ * belongs to rocksdb c++. As a result, the returned RocksEnv will not
+ * have the ownership of its c++ resource, and calling its dispose()/close()
+ * will be no-op.</p>
+ *
+ * @return the default {@link org.rocksdb.RocksEnv} instance.
+ */
+ public static Env getDefault() {
+ return DEFAULT_ENV;
+ }
+
+ /**
+ * <p>Sets the number of background worker threads of the flush pool
+ * for this environment.</p>
+ * <p>Default number: 1</p>
+ *
+ * @param number the number of threads
+ *
+ * @return current {@link RocksEnv} instance.
+ */
+ public Env setBackgroundThreads(final int number) {
+ return setBackgroundThreads(number, Priority.LOW);
+ }
+
+ /**
+ * <p>Gets the number of background worker threads of the pool
+ * for this environment.</p>
+ *
+ * @param priority the priority id of a specified thread pool.
+ *
+ * @return the number of threads.
+ */
+ public int getBackgroundThreads(final Priority priority) {
+ return getBackgroundThreads(nativeHandle_, priority.getValue());
+ }
+
+ /**
+ * <p>Sets the number of background worker threads of the specified thread
+ * pool for this environment.</p>
+ *
+ * @param number the number of threads
+ * @param priority the priority id of a specified thread pool.
+ *
+ * <p>Default number: 1</p>
+ * @return current {@link RocksEnv} instance.
+ */
+ public Env setBackgroundThreads(final int number, final Priority priority) {
+ setBackgroundThreads(nativeHandle_, number, priority.getValue());
+ return this;
+ }
+
+ /**
+ * <p>Returns the length of the queue associated with the specified
+ * thread pool.</p>
+ *
+ * @param priority the priority id of a specified thread pool.
+ *
+ * @return the thread pool queue length.
+ */
+ public int getThreadPoolQueueLen(final Priority priority) {
+ return getThreadPoolQueueLen(nativeHandle_, priority.getValue());
+ }
+
+ /**
+ * Enlarge number of background worker threads of a specific thread pool
+ * for this environment if it is smaller than specified. 'LOW' is the default
+ * pool.
+ *
+ * @param number the number of threads.
+ * @param priority the priority id of a specified thread pool.
+ *
+ * @return current {@link RocksEnv} instance.
+ */
+ public Env incBackgroundThreadsIfNeeded(final int number,
+ final Priority priority) {
+ incBackgroundThreadsIfNeeded(nativeHandle_, number, priority.getValue());
+ return this;
+ }
+
+ /**
+ * Lower IO priority for threads from the specified pool.
+ *
+ * @param priority the priority id of a specified thread pool.
+ *
+ * @return current {@link RocksEnv} instance.
+ */
+ public Env lowerThreadPoolIOPriority(final Priority priority) {
+ lowerThreadPoolIOPriority(nativeHandle_, priority.getValue());
+ return this;
+ }
+
+ /**
+ * Lower CPU priority for threads from the specified pool.
+ *
+ * @param priority the priority id of a specified thread pool.
+ *
+ * @return current {@link RocksEnv} instance.
+ */
+ public Env lowerThreadPoolCPUPriority(final Priority priority) {
+ lowerThreadPoolCPUPriority(nativeHandle_, priority.getValue());
+ return this;
+ }
+
+ /**
+ * Returns the status of all threads that belong to the current Env.
+ *
+ * @return the status of all threads belong to this env.
+ *
+ * @throws RocksDBException if the thread list cannot be acquired.
+ */
+ public List<ThreadStatus> getThreadList() throws RocksDBException {
+ return Arrays.asList(getThreadList(nativeHandle_));
+ }
+
+ Env(final long nativeHandle) {
+ super(nativeHandle);
+ }
+
+ private static native long getDefaultEnvInternal();
+ private native void setBackgroundThreads(
+ final long handle, final int number, final byte priority);
+ private native int getBackgroundThreads(final long handle,
+ final byte priority);
+ private native int getThreadPoolQueueLen(final long handle,
+ final byte priority);
+ private native void incBackgroundThreadsIfNeeded(final long handle,
+ final int number, final byte priority);
+ private native void lowerThreadPoolIOPriority(final long handle,
+ final byte priority);
+ private native void lowerThreadPoolCPUPriority(final long handle,
+ final byte priority);
+ private native ThreadStatus[] getThreadList(final long handle)
+ throws RocksDBException;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/EnvOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/EnvOptions.java
new file mode 100644
index 000000000..6baddb310
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/EnvOptions.java
@@ -0,0 +1,366 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Options while opening a file to read/write
+ */
+public class EnvOptions extends RocksObject {
+ static {
+ RocksDB.loadLibrary();
+ }
+
+ /**
+ * Construct with default Options
+ */
+ public EnvOptions() {
+ super(newEnvOptions());
+ }
+
+ /**
+ * Construct from {@link DBOptions}.
+ *
+ * @param dbOptions the database options.
+ */
+ public EnvOptions(final DBOptions dbOptions) {
+ super(newEnvOptions(dbOptions.nativeHandle_));
+ }
+
+ /**
+ * Enable/Disable memory mapped reads.
+ *
+ * Default: false
+ *
+ * @param useMmapReads true to enable memory mapped reads, false to disable.
+ *
+ * @return the reference to these options.
+ */
+ public EnvOptions setUseMmapReads(final boolean useMmapReads) {
+ setUseMmapReads(nativeHandle_, useMmapReads);
+ return this;
+ }
+
+ /**
+ * Determine if memory mapped reads are in-use.
+ *
+ * @return true if memory mapped reads are in-use, false otherwise.
+ */
+ public boolean useMmapReads() {
+ assert(isOwningHandle());
+ return useMmapReads(nativeHandle_);
+ }
+
+ /**
+ * Enable/Disable memory mapped Writes.
+ *
+ * Default: true
+ *
+ * @param useMmapWrites true to enable memory mapped writes, false to disable.
+ *
+ * @return the reference to these options.
+ */
+ public EnvOptions setUseMmapWrites(final boolean useMmapWrites) {
+ setUseMmapWrites(nativeHandle_, useMmapWrites);
+ return this;
+ }
+
+ /**
+ * Determine if memory mapped writes are in-use.
+ *
+ * @return true if memory mapped writes are in-use, false otherwise.
+ */
+ public boolean useMmapWrites() {
+ assert(isOwningHandle());
+ return useMmapWrites(nativeHandle_);
+ }
+
+ /**
+ * Enable/Disable direct reads, i.e. {@code O_DIRECT}.
+ *
+ * Default: false
+ *
+ * @param useDirectReads true to enable direct reads, false to disable.
+ *
+ * @return the reference to these options.
+ */
+ public EnvOptions setUseDirectReads(final boolean useDirectReads) {
+ setUseDirectReads(nativeHandle_, useDirectReads);
+ return this;
+ }
+
+ /**
+ * Determine if direct reads are in-use.
+ *
+ * @return true if direct reads are in-use, false otherwise.
+ */
+ public boolean useDirectReads() {
+ assert(isOwningHandle());
+ return useDirectReads(nativeHandle_);
+ }
+
+ /**
+ * Enable/Disable direct writes, i.e. {@code O_DIRECT}.
+ *
+ * Default: false
+ *
+ * @param useDirectWrites true to enable direct writes, false to disable.
+ *
+ * @return the reference to these options.
+ */
+ public EnvOptions setUseDirectWrites(final boolean useDirectWrites) {
+ setUseDirectWrites(nativeHandle_, useDirectWrites);
+ return this;
+ }
+
+ /**
+ * Determine if direct writes are in-use.
+ *
+ * @return true if direct writes are in-use, false otherwise.
+ */
+ public boolean useDirectWrites() {
+ assert(isOwningHandle());
+ return useDirectWrites(nativeHandle_);
+ }
+
+ /**
+ * Enable/Disable fallocate calls.
+ *
+ * Default: true
+ *
+ * If false, {@code fallocate()} calls are bypassed.
+ *
+ * @param allowFallocate true to enable fallocate calls, false to disable.
+ *
+ * @return the reference to these options.
+ */
+ public EnvOptions setAllowFallocate(final boolean allowFallocate) {
+ setAllowFallocate(nativeHandle_, allowFallocate);
+ return this;
+ }
+
+ /**
+ * Determine if fallocate calls are used.
+ *
+ * @return true if fallocate calls are used, false otherwise.
+ */
+ public boolean allowFallocate() {
+ assert(isOwningHandle());
+ return allowFallocate(nativeHandle_);
+ }
+
+ /**
+ * Enable/Disable the {@code FD_CLOEXEC} bit when opening file descriptors.
+ *
+ * Default: true
+ *
+ * @param setFdCloexec true to enable the {@code FB_CLOEXEC} bit,
+ * false to disable.
+ *
+ * @return the reference to these options.
+ */
+ public EnvOptions setSetFdCloexec(final boolean setFdCloexec) {
+ setSetFdCloexec(nativeHandle_, setFdCloexec);
+ return this;
+ }
+
+ /**
+ * Determine i fthe {@code FD_CLOEXEC} bit is set when opening file
+ * descriptors.
+ *
+ * @return true if the {@code FB_CLOEXEC} bit is enabled, false otherwise.
+ */
+ public boolean setFdCloexec() {
+ assert(isOwningHandle());
+ return setFdCloexec(nativeHandle_);
+ }
+
+ /**
+ * Allows OS to incrementally sync files to disk while they are being
+ * written, in the background. Issue one request for every
+ * {@code bytesPerSync} written.
+ *
+ * Default: 0
+ *
+ * @param bytesPerSync 0 to disable, otherwise the number of bytes.
+ *
+ * @return the reference to these options.
+ */
+ public EnvOptions setBytesPerSync(final long bytesPerSync) {
+ setBytesPerSync(nativeHandle_, bytesPerSync);
+ return this;
+ }
+
+ /**
+ * Get the number of incremental bytes per sync written in the background.
+ *
+ * @return 0 if disabled, otherwise the number of bytes.
+ */
+ public long bytesPerSync() {
+ assert(isOwningHandle());
+ return bytesPerSync(nativeHandle_);
+ }
+
+ /**
+ * If true, we will preallocate the file with {@code FALLOC_FL_KEEP_SIZE}
+ * flag, which means that file size won't change as part of preallocation.
+ * If false, preallocation will also change the file size. This option will
+ * improve the performance in workloads where you sync the data on every
+ * write. By default, we set it to true for MANIFEST writes and false for
+ * WAL writes
+ *
+ * @param fallocateWithKeepSize true to preallocate, false otherwise.
+ *
+ * @return the reference to these options.
+ */
+ public EnvOptions setFallocateWithKeepSize(
+ final boolean fallocateWithKeepSize) {
+ setFallocateWithKeepSize(nativeHandle_, fallocateWithKeepSize);
+ return this;
+ }
+
+ /**
+ * Determine if file is preallocated.
+ *
+ * @return true if the file is preallocated, false otherwise.
+ */
+ public boolean fallocateWithKeepSize() {
+ assert(isOwningHandle());
+ return fallocateWithKeepSize(nativeHandle_);
+ }
+
+ /**
+ * See {@link DBOptions#setCompactionReadaheadSize(long)}.
+ *
+ * @param compactionReadaheadSize the compaction read-ahead size.
+ *
+ * @return the reference to these options.
+ */
+ public EnvOptions setCompactionReadaheadSize(
+ final long compactionReadaheadSize) {
+ setCompactionReadaheadSize(nativeHandle_, compactionReadaheadSize);
+ return this;
+ }
+
+ /**
+ * See {@link DBOptions#compactionReadaheadSize()}.
+ *
+ * @return the compaction read-ahead size.
+ */
+ public long compactionReadaheadSize() {
+ assert(isOwningHandle());
+ return compactionReadaheadSize(nativeHandle_);
+ }
+
+ /**
+ * See {@link DBOptions#setRandomAccessMaxBufferSize(long)}.
+ *
+ * @param randomAccessMaxBufferSize the max buffer size for random access.
+ *
+ * @return the reference to these options.
+ */
+ public EnvOptions setRandomAccessMaxBufferSize(
+ final long randomAccessMaxBufferSize) {
+ setRandomAccessMaxBufferSize(nativeHandle_, randomAccessMaxBufferSize);
+ return this;
+ }
+
+ /**
+ * See {@link DBOptions#randomAccessMaxBufferSize()}.
+ *
+ * @return the max buffer size for random access.
+ */
+ public long randomAccessMaxBufferSize() {
+ assert(isOwningHandle());
+ return randomAccessMaxBufferSize(nativeHandle_);
+ }
+
+ /**
+ * See {@link DBOptions#setWritableFileMaxBufferSize(long)}.
+ *
+ * @param writableFileMaxBufferSize the max buffer size.
+ *
+ * @return the reference to these options.
+ */
+ public EnvOptions setWritableFileMaxBufferSize(
+ final long writableFileMaxBufferSize) {
+ setWritableFileMaxBufferSize(nativeHandle_, writableFileMaxBufferSize);
+ return this;
+ }
+
+ /**
+ * See {@link DBOptions#writableFileMaxBufferSize()}.
+ *
+ * @return the max buffer size.
+ */
+ public long writableFileMaxBufferSize() {
+ assert(isOwningHandle());
+ return writableFileMaxBufferSize(nativeHandle_);
+ }
+
+ /**
+ * Set the write rate limiter for flush and compaction.
+ *
+ * @param rateLimiter the rate limiter.
+ *
+ * @return the reference to these options.
+ */
+ public EnvOptions setRateLimiter(final RateLimiter rateLimiter) {
+ this.rateLimiter = rateLimiter;
+ setRateLimiter(nativeHandle_, rateLimiter.nativeHandle_);
+ return this;
+ }
+
+ /**
+ * Get the write rate limiter for flush and compaction.
+ *
+ * @return the rate limiter.
+ */
+ public RateLimiter rateLimiter() {
+ assert(isOwningHandle());
+ return rateLimiter;
+ }
+
+ private native static long newEnvOptions();
+ private native static long newEnvOptions(final long dboptions_handle);
+ @Override protected final native void disposeInternal(final long handle);
+
+ private native void setUseMmapReads(final long handle,
+ final boolean useMmapReads);
+ private native boolean useMmapReads(final long handle);
+ private native void setUseMmapWrites(final long handle,
+ final boolean useMmapWrites);
+ private native boolean useMmapWrites(final long handle);
+ private native void setUseDirectReads(final long handle,
+ final boolean useDirectReads);
+ private native boolean useDirectReads(final long handle);
+ private native void setUseDirectWrites(final long handle,
+ final boolean useDirectWrites);
+ private native boolean useDirectWrites(final long handle);
+ private native void setAllowFallocate(final long handle,
+ final boolean allowFallocate);
+ private native boolean allowFallocate(final long handle);
+ private native void setSetFdCloexec(final long handle,
+ final boolean setFdCloexec);
+ private native boolean setFdCloexec(final long handle);
+ private native void setBytesPerSync(final long handle,
+ final long bytesPerSync);
+ private native long bytesPerSync(final long handle);
+ private native void setFallocateWithKeepSize(
+ final long handle, final boolean fallocateWithKeepSize);
+ private native boolean fallocateWithKeepSize(final long handle);
+ private native void setCompactionReadaheadSize(
+ final long handle, final long compactionReadaheadSize);
+ private native long compactionReadaheadSize(final long handle);
+ private native void setRandomAccessMaxBufferSize(
+ final long handle, final long randomAccessMaxBufferSize);
+ private native long randomAccessMaxBufferSize(final long handle);
+ private native void setWritableFileMaxBufferSize(
+ final long handle, final long writableFileMaxBufferSize);
+ private native long writableFileMaxBufferSize(final long handle);
+ private native void setRateLimiter(final long handle,
+ final long rateLimiterHandle);
+ private RateLimiter rateLimiter;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Experimental.java b/src/rocksdb/java/src/main/java/org/rocksdb/Experimental.java
new file mode 100644
index 000000000..64b404d6f
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/Experimental.java
@@ -0,0 +1,23 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Marks a feature as experimental, meaning that it is likely
+ * to change or even be removed/re-engineered in the future
+ */
+@Documented
+@Retention(RetentionPolicy.SOURCE)
+@Target({ElementType.TYPE, ElementType.METHOD})
+public @interface Experimental {
+ String value();
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Filter.java b/src/rocksdb/java/src/main/java/org/rocksdb/Filter.java
new file mode 100644
index 000000000..7f490cf59
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/Filter.java
@@ -0,0 +1,36 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Filters are stored in rocksdb and are consulted automatically
+ * by rocksdb to decide whether or not to read some
+ * information from disk. In many cases, a filter can cut down the
+ * number of disk seeks form a handful to a single disk seek per
+ * DB::Get() call.
+ */
+//TODO(AR) should be renamed FilterPolicy
+public abstract class Filter extends RocksObject {
+
+ protected Filter(final long nativeHandle) {
+ super(nativeHandle);
+ }
+
+ /**
+ * Deletes underlying C++ filter pointer.
+ *
+ * Note that this function should be called only after all
+ * RocksDB instances referencing the filter are closed.
+ * Otherwise an undefined behavior will occur.
+ */
+ @Override
+ protected void disposeInternal() {
+ disposeInternal(nativeHandle_);
+ }
+
+ @Override
+ protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/FlushOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/FlushOptions.java
new file mode 100644
index 000000000..760b515fd
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/FlushOptions.java
@@ -0,0 +1,90 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * FlushOptions to be passed to flush operations of
+ * {@link org.rocksdb.RocksDB}.
+ */
+public class FlushOptions extends RocksObject {
+ static {
+ RocksDB.loadLibrary();
+ }
+
+ /**
+ * Construct a new instance of FlushOptions.
+ */
+ public FlushOptions(){
+ super(newFlushOptions());
+ }
+
+ /**
+ * Set if the flush operation shall block until it terminates.
+ *
+ * @param waitForFlush boolean value indicating if the flush
+ * operations waits for termination of the flush process.
+ *
+ * @return instance of current FlushOptions.
+ */
+ public FlushOptions setWaitForFlush(final boolean waitForFlush) {
+ assert(isOwningHandle());
+ setWaitForFlush(nativeHandle_, waitForFlush);
+ return this;
+ }
+
+ /**
+ * Wait for flush to finished.
+ *
+ * @return boolean value indicating if the flush operation
+ * waits for termination of the flush process.
+ */
+ public boolean waitForFlush() {
+ assert(isOwningHandle());
+ return waitForFlush(nativeHandle_);
+ }
+
+ /**
+ * Set to true so that flush would proceeds immediately even it it means
+ * writes will stall for the duration of the flush.
+ *
+ * Set to false so that the operation will wait until it's possible to do
+ * the flush without causing stall or until required flush is performed by
+ * someone else (foreground call or background thread).
+ *
+ * Default: false
+ *
+ * @param allowWriteStall true to allow writes to stall for flush, false
+ * otherwise.
+ *
+ * @return instance of current FlushOptions.
+ */
+ public FlushOptions setAllowWriteStall(final boolean allowWriteStall) {
+ assert(isOwningHandle());
+ setAllowWriteStall(nativeHandle_, allowWriteStall);
+ return this;
+ }
+
+ /**
+ * Returns true if writes are allowed to stall for flushes to complete, false
+ * otherwise.
+ *
+ * @return true if writes are allowed to stall for flushes
+ */
+ public boolean allowWriteStall() {
+ assert(isOwningHandle());
+ return allowWriteStall(nativeHandle_);
+ }
+
+ private native static long newFlushOptions();
+ @Override protected final native void disposeInternal(final long handle);
+
+ private native void setWaitForFlush(final long handle,
+ final boolean wait);
+ private native boolean waitForFlush(final long handle);
+ private native void setAllowWriteStall(final long handle,
+ final boolean allowWriteStall);
+ private native boolean allowWriteStall(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java b/src/rocksdb/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java
new file mode 100644
index 000000000..b943cd996
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java
@@ -0,0 +1,174 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+/**
+ * The config for hash linked list memtable representation
+ * Such memtable contains a fix-sized array of buckets, where
+ * each bucket points to a sorted singly-linked
+ * list (or null if the bucket is empty).
+ *
+ * Note that since this mem-table representation relies on the
+ * key prefix, it is required to invoke one of the usePrefixExtractor
+ * functions to specify how to extract key prefix given a key.
+ * If proper prefix-extractor is not set, then RocksDB will
+ * use the default memtable representation (SkipList) instead
+ * and post a warning in the LOG.
+ */
+public class HashLinkedListMemTableConfig extends MemTableConfig {
+ public static final long DEFAULT_BUCKET_COUNT = 50000;
+ public static final long DEFAULT_HUGE_PAGE_TLB_SIZE = 0;
+ public static final int DEFAULT_BUCKET_ENTRIES_LOG_THRES = 4096;
+ public static final boolean
+ DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH = true;
+ public static final int DEFAUL_THRESHOLD_USE_SKIPLIST = 256;
+
+ /**
+ * HashLinkedListMemTableConfig constructor
+ */
+ public HashLinkedListMemTableConfig() {
+ bucketCount_ = DEFAULT_BUCKET_COUNT;
+ hugePageTlbSize_ = DEFAULT_HUGE_PAGE_TLB_SIZE;
+ bucketEntriesLoggingThreshold_ = DEFAULT_BUCKET_ENTRIES_LOG_THRES;
+ ifLogBucketDistWhenFlush_ = DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH;
+ thresholdUseSkiplist_ = DEFAUL_THRESHOLD_USE_SKIPLIST;
+ }
+
+ /**
+ * Set the number of buckets in the fixed-size array used
+ * in the hash linked-list mem-table.
+ *
+ * @param count the number of hash buckets.
+ * @return the reference to the current HashLinkedListMemTableConfig.
+ */
+ public HashLinkedListMemTableConfig setBucketCount(
+ final long count) {
+ bucketCount_ = count;
+ return this;
+ }
+
+ /**
+ * Returns the number of buckets that will be used in the memtable
+ * created based on this config.
+ *
+ * @return the number of buckets
+ */
+ public long bucketCount() {
+ return bucketCount_;
+ }
+
+ /**
+ * <p>Set the size of huge tlb or allocate the hashtable bytes from
+ * malloc if {@code size <= 0}.</p>
+ *
+ * <p>The user needs to reserve huge pages for it to be allocated,
+ * like: {@code sysctl -w vm.nr_hugepages=20}</p>
+ *
+ * <p>See linux documentation/vm/hugetlbpage.txt</p>
+ *
+ * @param size if set to {@code <= 0} hashtable bytes from malloc
+ * @return the reference to the current HashLinkedListMemTableConfig.
+ */
+ public HashLinkedListMemTableConfig setHugePageTlbSize(
+ final long size) {
+ hugePageTlbSize_ = size;
+ return this;
+ }
+
+ /**
+ * Returns the size value of hugePageTlbSize.
+ *
+ * @return the hugePageTlbSize.
+ */
+ public long hugePageTlbSize() {
+ return hugePageTlbSize_;
+ }
+
+ /**
+ * If number of entries in one bucket exceeds that setting, log
+ * about it.
+ *
+ * @param threshold - number of entries in a single bucket before
+ * logging starts.
+ * @return the reference to the current HashLinkedListMemTableConfig.
+ */
+ public HashLinkedListMemTableConfig
+ setBucketEntriesLoggingThreshold(final int threshold) {
+ bucketEntriesLoggingThreshold_ = threshold;
+ return this;
+ }
+
+ /**
+ * Returns the maximum number of entries in one bucket before
+ * logging starts.
+ *
+ * @return maximum number of entries in one bucket before logging
+ * starts.
+ */
+ public int bucketEntriesLoggingThreshold() {
+ return bucketEntriesLoggingThreshold_;
+ }
+
+ /**
+ * If true the distrubition of number of entries will be logged.
+ *
+ * @param logDistribution - boolean parameter indicating if number
+ * of entry distribution shall be logged.
+ * @return the reference to the current HashLinkedListMemTableConfig.
+ */
+ public HashLinkedListMemTableConfig
+ setIfLogBucketDistWhenFlush(final boolean logDistribution) {
+ ifLogBucketDistWhenFlush_ = logDistribution;
+ return this;
+ }
+
+ /**
+ * Returns information about logging the distribution of
+ * number of entries on flush.
+ *
+ * @return if distrubtion of number of entries shall be logged.
+ */
+ public boolean ifLogBucketDistWhenFlush() {
+ return ifLogBucketDistWhenFlush_;
+ }
+
+ /**
+ * Set maximum number of entries in one bucket. Exceeding this val
+ * leads to a switch from LinkedList to SkipList.
+ *
+ * @param threshold maximum number of entries before SkipList is
+ * used.
+ * @return the reference to the current HashLinkedListMemTableConfig.
+ */
+ public HashLinkedListMemTableConfig
+ setThresholdUseSkiplist(final int threshold) {
+ thresholdUseSkiplist_ = threshold;
+ return this;
+ }
+
+ /**
+ * Returns entries per bucket threshold before LinkedList is
+ * replaced by SkipList usage for that bucket.
+ *
+ * @return entries per bucket threshold before SkipList is used.
+ */
+ public int thresholdUseSkiplist() {
+ return thresholdUseSkiplist_;
+ }
+
+ @Override protected long newMemTableFactoryHandle() {
+ return newMemTableFactoryHandle(bucketCount_, hugePageTlbSize_,
+ bucketEntriesLoggingThreshold_, ifLogBucketDistWhenFlush_,
+ thresholdUseSkiplist_);
+ }
+
+ private native long newMemTableFactoryHandle(long bucketCount,
+ long hugePageTlbSize, int bucketEntriesLoggingThreshold,
+ boolean ifLogBucketDistWhenFlush, int thresholdUseSkiplist)
+ throws IllegalArgumentException;
+
+ private long bucketCount_;
+ private long hugePageTlbSize_;
+ private int bucketEntriesLoggingThreshold_;
+ private boolean ifLogBucketDistWhenFlush_;
+ private int thresholdUseSkiplist_;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java b/src/rocksdb/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java
new file mode 100644
index 000000000..efc78b14e
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java
@@ -0,0 +1,106 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+/**
+ * The config for hash skip-list mem-table representation.
+ * Such mem-table representation contains a fix-sized array of
+ * buckets, where each bucket points to a skiplist (or null if the
+ * bucket is empty).
+ *
+ * Note that since this mem-table representation relies on the
+ * key prefix, it is required to invoke one of the usePrefixExtractor
+ * functions to specify how to extract key prefix given a key.
+ * If proper prefix-extractor is not set, then RocksDB will
+ * use the default memtable representation (SkipList) instead
+ * and post a warning in the LOG.
+ */
+public class HashSkipListMemTableConfig extends MemTableConfig {
+ public static final int DEFAULT_BUCKET_COUNT = 1000000;
+ public static final int DEFAULT_BRANCHING_FACTOR = 4;
+ public static final int DEFAULT_HEIGHT = 4;
+
+ /**
+ * HashSkipListMemTableConfig constructor
+ */
+ public HashSkipListMemTableConfig() {
+ bucketCount_ = DEFAULT_BUCKET_COUNT;
+ branchingFactor_ = DEFAULT_BRANCHING_FACTOR;
+ height_ = DEFAULT_HEIGHT;
+ }
+
+ /**
+ * Set the number of hash buckets used in the hash skiplist memtable.
+ * Default = 1000000.
+ *
+ * @param count the number of hash buckets used in the hash
+ * skiplist memtable.
+ * @return the reference to the current HashSkipListMemTableConfig.
+ */
+ public HashSkipListMemTableConfig setBucketCount(
+ final long count) {
+ bucketCount_ = count;
+ return this;
+ }
+
+ /**
+ * @return the number of hash buckets
+ */
+ public long bucketCount() {
+ return bucketCount_;
+ }
+
+ /**
+ * Set the height of the skip list. Default = 4.
+ *
+ * @param height height to set.
+ *
+ * @return the reference to the current HashSkipListMemTableConfig.
+ */
+ public HashSkipListMemTableConfig setHeight(final int height) {
+ height_ = height;
+ return this;
+ }
+
+ /**
+ * @return the height of the skip list.
+ */
+ public int height() {
+ return height_;
+ }
+
+ /**
+ * Set the branching factor used in the hash skip-list memtable.
+ * This factor controls the probabilistic size ratio between adjacent
+ * links in the skip list.
+ *
+ * @param bf the probabilistic size ratio between adjacent link
+ * lists in the skip list.
+ * @return the reference to the current HashSkipListMemTableConfig.
+ */
+ public HashSkipListMemTableConfig setBranchingFactor(
+ final int bf) {
+ branchingFactor_ = bf;
+ return this;
+ }
+
+ /**
+ * @return branching factor, the probabilistic size ratio between
+ * adjacent links in the skip list.
+ */
+ public int branchingFactor() {
+ return branchingFactor_;
+ }
+
+ @Override protected long newMemTableFactoryHandle() {
+ return newMemTableFactoryHandle(
+ bucketCount_, height_, branchingFactor_);
+ }
+
+ private native long newMemTableFactoryHandle(
+ long bucketCount, int height, int branchingFactor)
+ throws IllegalArgumentException;
+
+ private long bucketCount_;
+ private int branchingFactor_;
+ private int height_;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/HdfsEnv.java b/src/rocksdb/java/src/main/java/org/rocksdb/HdfsEnv.java
new file mode 100644
index 000000000..4d8d3bff6
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/HdfsEnv.java
@@ -0,0 +1,27 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * HDFS environment.
+ */
+public class HdfsEnv extends Env {
+
+ /**
+ <p>Creates a new environment that is used for HDFS environment.</p>
+ *
+ * <p>The caller must delete the result when it is
+ * no longer needed.</p>
+ *
+ * @param fsName the HDFS as a string in the form "hdfs://hostname:port/"
+ */
+ public HdfsEnv(final String fsName) {
+ super(createHdfsEnv(fsName));
+ }
+
+ private static native long createHdfsEnv(final String fsName);
+ @Override protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/HistogramData.java b/src/rocksdb/java/src/main/java/org/rocksdb/HistogramData.java
new file mode 100644
index 000000000..81d890883
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/HistogramData.java
@@ -0,0 +1,75 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+public class HistogramData {
+ private final double median_;
+ private final double percentile95_;
+ private final double percentile99_;
+ private final double average_;
+ private final double standardDeviation_;
+ private final double max_;
+ private final long count_;
+ private final long sum_;
+ private final double min_;
+
+ public HistogramData(final double median, final double percentile95,
+ final double percentile99, final double average,
+ final double standardDeviation) {
+ this(median, percentile95, percentile99, average, standardDeviation, 0.0, 0, 0, 0.0);
+ }
+
+ public HistogramData(final double median, final double percentile95,
+ final double percentile99, final double average,
+ final double standardDeviation, final double max, final long count,
+ final long sum, final double min) {
+ median_ = median;
+ percentile95_ = percentile95;
+ percentile99_ = percentile99;
+ average_ = average;
+ standardDeviation_ = standardDeviation;
+ min_ = min;
+ max_ = max;
+ count_ = count;
+ sum_ = sum;
+ }
+
+ public double getMedian() {
+ return median_;
+ }
+
+ public double getPercentile95() {
+ return percentile95_;
+ }
+
+ public double getPercentile99() {
+ return percentile99_;
+ }
+
+ public double getAverage() {
+ return average_;
+ }
+
+ public double getStandardDeviation() {
+ return standardDeviation_;
+ }
+
+ public double getMax() {
+ return max_;
+ }
+
+ public long getCount() {
+ return count_;
+ }
+
+ public long getSum() {
+ return sum_;
+ }
+
+ public double getMin() {
+ return min_;
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/HistogramType.java b/src/rocksdb/java/src/main/java/org/rocksdb/HistogramType.java
new file mode 100644
index 000000000..696ee75f2
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/HistogramType.java
@@ -0,0 +1,198 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+public enum HistogramType {
+
+ DB_GET((byte) 0x0),
+
+ DB_WRITE((byte) 0x1),
+
+ COMPACTION_TIME((byte) 0x2),
+
+ SUBCOMPACTION_SETUP_TIME((byte) 0x3),
+
+ TABLE_SYNC_MICROS((byte) 0x4),
+
+ COMPACTION_OUTFILE_SYNC_MICROS((byte) 0x5),
+
+ WAL_FILE_SYNC_MICROS((byte) 0x6),
+
+ MANIFEST_FILE_SYNC_MICROS((byte) 0x7),
+
+ /**
+ * TIME SPENT IN IO DURING TABLE OPEN.
+ */
+ TABLE_OPEN_IO_MICROS((byte) 0x8),
+
+ DB_MULTIGET((byte) 0x9),
+
+ READ_BLOCK_COMPACTION_MICROS((byte) 0xA),
+
+ READ_BLOCK_GET_MICROS((byte) 0xB),
+
+ WRITE_RAW_BLOCK_MICROS((byte) 0xC),
+
+ STALL_L0_SLOWDOWN_COUNT((byte) 0xD),
+
+ STALL_MEMTABLE_COMPACTION_COUNT((byte) 0xE),
+
+ STALL_L0_NUM_FILES_COUNT((byte) 0xF),
+
+ HARD_RATE_LIMIT_DELAY_COUNT((byte) 0x10),
+
+ SOFT_RATE_LIMIT_DELAY_COUNT((byte) 0x11),
+
+ NUM_FILES_IN_SINGLE_COMPACTION((byte) 0x12),
+
+ DB_SEEK((byte) 0x13),
+
+ WRITE_STALL((byte) 0x14),
+
+ SST_READ_MICROS((byte) 0x15),
+
+ /**
+ * The number of subcompactions actually scheduled during a compaction.
+ */
+ NUM_SUBCOMPACTIONS_SCHEDULED((byte) 0x16),
+
+ /**
+ * Value size distribution in each operation.
+ */
+ BYTES_PER_READ((byte) 0x17),
+ BYTES_PER_WRITE((byte) 0x18),
+ BYTES_PER_MULTIGET((byte) 0x19),
+
+ /**
+ * number of bytes compressed.
+ */
+ BYTES_COMPRESSED((byte) 0x1A),
+
+ /**
+ * number of bytes decompressed.
+ *
+ * number of bytes is when uncompressed; i.e. before/after respectively
+ */
+ BYTES_DECOMPRESSED((byte) 0x1B),
+
+ COMPRESSION_TIMES_NANOS((byte) 0x1C),
+
+ DECOMPRESSION_TIMES_NANOS((byte) 0x1D),
+
+ READ_NUM_MERGE_OPERANDS((byte) 0x1E),
+
+ /**
+ * Time spent flushing memtable to disk.
+ */
+ FLUSH_TIME((byte) 0x20),
+
+ /**
+ * Size of keys written to BlobDB.
+ */
+ BLOB_DB_KEY_SIZE((byte) 0x21),
+
+ /**
+ * Size of values written to BlobDB.
+ */
+ BLOB_DB_VALUE_SIZE((byte) 0x22),
+
+ /**
+ * BlobDB Put/PutWithTTL/PutUntil/Write latency.
+ */
+ BLOB_DB_WRITE_MICROS((byte) 0x23),
+
+ /**
+ * BlobDB Get lagency.
+ */
+ BLOB_DB_GET_MICROS((byte) 0x24),
+
+ /**
+ * BlobDB MultiGet latency.
+ */
+ BLOB_DB_MULTIGET_MICROS((byte) 0x25),
+
+ /**
+ * BlobDB Seek/SeekToFirst/SeekToLast/SeekForPrev latency.
+ */
+ BLOB_DB_SEEK_MICROS((byte) 0x26),
+
+ /**
+ * BlobDB Next latency.
+ */
+ BLOB_DB_NEXT_MICROS((byte) 0x27),
+
+ /**
+ * BlobDB Prev latency.
+ */
+ BLOB_DB_PREV_MICROS((byte) 0x28),
+
+ /**
+ * Blob file write latency.
+ */
+ BLOB_DB_BLOB_FILE_WRITE_MICROS((byte) 0x29),
+
+ /**
+ * Blob file read latency.
+ */
+ BLOB_DB_BLOB_FILE_READ_MICROS((byte) 0x2A),
+
+ /**
+ * Blob file sync latency.
+ */
+ BLOB_DB_BLOB_FILE_SYNC_MICROS((byte) 0x2B),
+
+ /**
+ * BlobDB garbage collection time.
+ */
+ BLOB_DB_GC_MICROS((byte) 0x2C),
+
+ /**
+ * BlobDB compression time.
+ */
+ BLOB_DB_COMPRESSION_MICROS((byte) 0x2D),
+
+ /**
+ * BlobDB decompression time.
+ */
+ BLOB_DB_DECOMPRESSION_MICROS((byte) 0x2E),
+
+ // 0x1F for backwards compatibility on current minor version.
+ HISTOGRAM_ENUM_MAX((byte) 0x1F);
+
+ private final byte value;
+
+ HistogramType(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Returns the byte value of the enumerations value
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get Histogram type by byte value.
+ *
+ * @param value byte representation of HistogramType.
+ *
+ * @return {@link org.rocksdb.HistogramType} instance.
+ * @throws java.lang.IllegalArgumentException if an invalid
+ * value is provided.
+ */
+ public static HistogramType getHistogramType(final byte value) {
+ for (final HistogramType histogramType : HistogramType.values()) {
+ if (histogramType.getValue() == value) {
+ return histogramType;
+ }
+ }
+ throw new IllegalArgumentException(
+ "Illegal value provided for HistogramType.");
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Holder.java b/src/rocksdb/java/src/main/java/org/rocksdb/Holder.java
new file mode 100644
index 000000000..716a0bda0
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/Holder.java
@@ -0,0 +1,46 @@
+// Copyright (c) 2016, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Simple instance reference wrapper.
+ */
+public class Holder<T> {
+ private /* @Nullable */ T value;
+
+ /**
+ * Constructs a new Holder with null instance.
+ */
+ public Holder() {
+ }
+
+ /**
+ * Constructs a new Holder.
+ *
+ * @param value the instance or null
+ */
+ public Holder(/* @Nullable */ final T value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the instance reference.
+ *
+ * @return value the instance reference or null
+ */
+ public /* @Nullable */ T getValue() {
+ return value;
+ }
+
+ /**
+ * Set the instance reference.
+ *
+ * @param value the instance reference or null
+ */
+ public void setValue(/* @Nullable */ final T value) {
+ this.value = value;
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/IndexType.java b/src/rocksdb/java/src/main/java/org/rocksdb/IndexType.java
new file mode 100644
index 000000000..04e481465
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/IndexType.java
@@ -0,0 +1,41 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * IndexType used in conjunction with BlockBasedTable.
+ */
+public enum IndexType {
+ /**
+ * A space efficient index block that is optimized for
+ * binary-search-based index.
+ */
+ kBinarySearch((byte) 0),
+ /**
+ * The hash index, if enabled, will do the hash lookup when
+ * {@code Options.prefix_extractor} is provided.
+ */
+ kHashSearch((byte) 1),
+ /**
+ * A two-level index implementation. Both levels are binary search indexes.
+ */
+ kTwoLevelIndexSearch((byte) 2);
+
+ /**
+ * Returns the byte value of the enumerations value
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value_;
+ }
+
+ IndexType(byte value) {
+ value_ = value;
+ }
+
+ private final byte value_;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/InfoLogLevel.java b/src/rocksdb/java/src/main/java/org/rocksdb/InfoLogLevel.java
new file mode 100644
index 000000000..b7c0f0700
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/InfoLogLevel.java
@@ -0,0 +1,49 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+/**
+ * RocksDB log levels.
+ */
+public enum InfoLogLevel {
+ DEBUG_LEVEL((byte)0),
+ INFO_LEVEL((byte)1),
+ WARN_LEVEL((byte)2),
+ ERROR_LEVEL((byte)3),
+ FATAL_LEVEL((byte)4),
+ HEADER_LEVEL((byte)5),
+ NUM_INFO_LOG_LEVELS((byte)6);
+
+ private final byte value_;
+
+ private InfoLogLevel(final byte value) {
+ value_ = value;
+ }
+
+ /**
+ * Returns the byte value of the enumerations value
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value_;
+ }
+
+ /**
+ * Get InfoLogLevel by byte value.
+ *
+ * @param value byte representation of InfoLogLevel.
+ *
+ * @return {@link org.rocksdb.InfoLogLevel} instance.
+ * @throws java.lang.IllegalArgumentException if an invalid
+ * value is provided.
+ */
+ public static InfoLogLevel getInfoLogLevel(final byte value) {
+ for (final InfoLogLevel infoLogLevel : InfoLogLevel.values()) {
+ if (infoLogLevel.getValue() == value) {
+ return infoLogLevel;
+ }
+ }
+ throw new IllegalArgumentException(
+ "Illegal value provided for InfoLogLevel.");
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java
new file mode 100644
index 000000000..a6a308daa
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java
@@ -0,0 +1,227 @@
+package org.rocksdb;
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+import java.util.List;
+
+/**
+ * IngestExternalFileOptions is used by
+ * {@link RocksDB#ingestExternalFile(ColumnFamilyHandle, List, IngestExternalFileOptions)}.
+ */
+public class IngestExternalFileOptions extends RocksObject {
+
+ public IngestExternalFileOptions() {
+ super(newIngestExternalFileOptions());
+ }
+
+ /**
+ * @param moveFiles {@link #setMoveFiles(boolean)}
+ * @param snapshotConsistency {@link #setSnapshotConsistency(boolean)}
+ * @param allowGlobalSeqNo {@link #setAllowGlobalSeqNo(boolean)}
+ * @param allowBlockingFlush {@link #setAllowBlockingFlush(boolean)}
+ */
+ public IngestExternalFileOptions(final boolean moveFiles,
+ final boolean snapshotConsistency, final boolean allowGlobalSeqNo,
+ final boolean allowBlockingFlush) {
+ super(newIngestExternalFileOptions(moveFiles, snapshotConsistency,
+ allowGlobalSeqNo, allowBlockingFlush));
+ }
+
+ /**
+ * Can be set to true to move the files instead of copying them.
+ *
+ * @return true if files will be moved
+ */
+ public boolean moveFiles() {
+ return moveFiles(nativeHandle_);
+ }
+
+ /**
+ * Can be set to true to move the files instead of copying them.
+ *
+ * @param moveFiles true if files should be moved instead of copied
+ *
+ * @return the reference to the current IngestExternalFileOptions.
+ */
+ public IngestExternalFileOptions setMoveFiles(final boolean moveFiles) {
+ setMoveFiles(nativeHandle_, moveFiles);
+ return this;
+ }
+
+ /**
+ * If set to false, an ingested file keys could appear in existing snapshots
+ * that where created before the file was ingested.
+ *
+ * @return true if snapshot consistency is assured
+ */
+ public boolean snapshotConsistency() {
+ return snapshotConsistency(nativeHandle_);
+ }
+
+ /**
+ * If set to false, an ingested file keys could appear in existing snapshots
+ * that where created before the file was ingested.
+ *
+ * @param snapshotConsistency true if snapshot consistency is required
+ *
+ * @return the reference to the current IngestExternalFileOptions.
+ */
+ public IngestExternalFileOptions setSnapshotConsistency(
+ final boolean snapshotConsistency) {
+ setSnapshotConsistency(nativeHandle_, snapshotConsistency);
+ return this;
+ }
+
+ /**
+ * If set to false, {@link RocksDB#ingestExternalFile(ColumnFamilyHandle, List, IngestExternalFileOptions)}
+ * will fail if the file key range overlaps with existing keys or tombstones in the DB.
+ *
+ * @return true if global seq numbers are assured
+ */
+ public boolean allowGlobalSeqNo() {
+ return allowGlobalSeqNo(nativeHandle_);
+ }
+
+ /**
+ * If set to false, {@link RocksDB#ingestExternalFile(ColumnFamilyHandle, List, IngestExternalFileOptions)}
+ * will fail if the file key range overlaps with existing keys or tombstones in the DB.
+ *
+ * @param allowGlobalSeqNo true if global seq numbers are required
+ *
+ * @return the reference to the current IngestExternalFileOptions.
+ */
+ public IngestExternalFileOptions setAllowGlobalSeqNo(
+ final boolean allowGlobalSeqNo) {
+ setAllowGlobalSeqNo(nativeHandle_, allowGlobalSeqNo);
+ return this;
+ }
+
+ /**
+ * If set to false and the file key range overlaps with the memtable key range
+ * (memtable flush required), IngestExternalFile will fail.
+ *
+ * @return true if blocking flushes may occur
+ */
+ public boolean allowBlockingFlush() {
+ return allowBlockingFlush(nativeHandle_);
+ }
+
+ /**
+ * If set to false and the file key range overlaps with the memtable key range
+ * (memtable flush required), IngestExternalFile will fail.
+ *
+ * @param allowBlockingFlush true if blocking flushes are allowed
+ *
+ * @return the reference to the current IngestExternalFileOptions.
+ */
+ public IngestExternalFileOptions setAllowBlockingFlush(
+ final boolean allowBlockingFlush) {
+ setAllowBlockingFlush(nativeHandle_, allowBlockingFlush);
+ return this;
+ }
+
+ /**
+ * Returns true if duplicate keys in the file being ingested are
+ * to be skipped rather than overwriting existing data under that key.
+ *
+ * @return true if duplicate keys in the file being ingested are to be
+ * skipped, false otherwise.
+ */
+ public boolean ingestBehind() {
+ return ingestBehind(nativeHandle_);
+ }
+
+ /**
+ * Set to true if you would like duplicate keys in the file being ingested
+ * to be skipped rather than overwriting existing data under that key.
+ *
+ * Usecase: back-fill of some historical data in the database without
+ * over-writing existing newer version of data.
+ *
+ * This option could only be used if the DB has been running
+ * with DBOptions#allowIngestBehind() == true since the dawn of time.
+ *
+ * All files will be ingested at the bottommost level with seqno=0.
+ *
+ * Default: false
+ *
+ * @param ingestBehind true if you would like duplicate keys in the file being
+ * ingested to be skipped.
+ *
+ * @return the reference to the current IngestExternalFileOptions.
+ */
+ public IngestExternalFileOptions setIngestBehind(final boolean ingestBehind) {
+ setIngestBehind(nativeHandle_, ingestBehind);
+ return this;
+ }
+
+ /**
+ * Returns true write if the global_seqno is written to a given offset
+ * in the external SST file for backward compatibility.
+ *
+ * See {@link #setWriteGlobalSeqno(boolean)}.
+ *
+ * @return true if the global_seqno is written to a given offset,
+ * false otherwise.
+ */
+ public boolean writeGlobalSeqno() {
+ return writeGlobalSeqno(nativeHandle_);
+ }
+
+ /**
+ * Set to true if you would like to write the global_seqno to a given offset
+ * in the external SST file for backward compatibility.
+ *
+ * Older versions of RocksDB write the global_seqno to a given offset within
+ * the ingested SST files, and new versions of RocksDB do not.
+ *
+ * If you ingest an external SST using new version of RocksDB and would like
+ * to be able to downgrade to an older version of RocksDB, you should set
+ * {@link #writeGlobalSeqno()} to true.
+ *
+ * If your service is just starting to use the new RocksDB, we recommend that
+ * you set this option to false, which brings two benefits:
+ * 1. No extra random write for global_seqno during ingestion.
+ * 2. Without writing external SST file, it's possible to do checksum.
+ *
+ * We have a plan to set this option to false by default in the future.
+ *
+ * Default: true
+ *
+ * @param writeGlobalSeqno true to write the gloal_seqno to a given offset,
+ * false otherwise
+ *
+ * @return the reference to the current IngestExternalFileOptions.
+ */
+ public IngestExternalFileOptions setWriteGlobalSeqno(
+ final boolean writeGlobalSeqno) {
+ setWriteGlobalSeqno(nativeHandle_, writeGlobalSeqno);
+ return this;
+ }
+
+ private native static long newIngestExternalFileOptions();
+ private native static long newIngestExternalFileOptions(
+ final boolean moveFiles, final boolean snapshotConsistency,
+ final boolean allowGlobalSeqNo, final boolean allowBlockingFlush);
+ @Override protected final native void disposeInternal(final long handle);
+
+ private native boolean moveFiles(final long handle);
+ private native void setMoveFiles(final long handle, final boolean move_files);
+ private native boolean snapshotConsistency(final long handle);
+ private native void setSnapshotConsistency(final long handle,
+ final boolean snapshotConsistency);
+ private native boolean allowGlobalSeqNo(final long handle);
+ private native void setAllowGlobalSeqNo(final long handle,
+ final boolean allowGloablSeqNo);
+ private native boolean allowBlockingFlush(final long handle);
+ private native void setAllowBlockingFlush(final long handle,
+ final boolean allowBlockingFlush);
+ private native boolean ingestBehind(final long handle);
+ private native void setIngestBehind(final long handle,
+ final boolean ingestBehind);
+ private native boolean writeGlobalSeqno(final long handle);
+ private native void setWriteGlobalSeqno(final long handle,
+ final boolean writeGlobalSeqNo);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/LRUCache.java b/src/rocksdb/java/src/main/java/org/rocksdb/LRUCache.java
new file mode 100644
index 000000000..5e5bdeea2
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/LRUCache.java
@@ -0,0 +1,82 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Least Recently Used Cache
+ */
+public class LRUCache extends Cache {
+
+ /**
+ * Create a new cache with a fixed size capacity
+ *
+ * @param capacity The fixed size capacity of the cache
+ */
+ public LRUCache(final long capacity) {
+ this(capacity, -1, false, 0.0);
+ }
+
+ /**
+ * Create a new cache with a fixed size capacity. The cache is sharded
+ * to 2^numShardBits shards, by hash of the key. The total capacity
+ * is divided and evenly assigned to each shard.
+ * numShardBits = -1 means it is automatically determined: every shard
+ * will be at least 512KB and number of shard bits will not exceed 6.
+ *
+ * @param capacity The fixed size capacity of the cache
+ * @param numShardBits The cache is sharded to 2^numShardBits shards,
+ * by hash of the key
+ */
+ public LRUCache(final long capacity, final int numShardBits) {
+ super(newLRUCache(capacity, numShardBits, false,0.0));
+ }
+
+ /**
+ * Create a new cache with a fixed size capacity. The cache is sharded
+ * to 2^numShardBits shards, by hash of the key. The total capacity
+ * is divided and evenly assigned to each shard. If strictCapacityLimit
+ * is set, insert to the cache will fail when cache is full.
+ * numShardBits = -1 means it is automatically determined: every shard
+ * will be at least 512KB and number of shard bits will not exceed 6.
+ *
+ * @param capacity The fixed size capacity of the cache
+ * @param numShardBits The cache is sharded to 2^numShardBits shards,
+ * by hash of the key
+ * @param strictCapacityLimit insert to the cache will fail when cache is full
+ */
+ public LRUCache(final long capacity, final int numShardBits,
+ final boolean strictCapacityLimit) {
+ super(newLRUCache(capacity, numShardBits, strictCapacityLimit,0.0));
+ }
+
+ /**
+ * Create a new cache with a fixed size capacity. The cache is sharded
+ * to 2^numShardBits shards, by hash of the key. The total capacity
+ * is divided and evenly assigned to each shard. If strictCapacityLimit
+ * is set, insert to the cache will fail when cache is full. User can also
+ * set percentage of the cache reserves for high priority entries via
+ * highPriPoolRatio.
+ * numShardBits = -1 means it is automatically determined: every shard
+ * will be at least 512KB and number of shard bits will not exceed 6.
+ *
+ * @param capacity The fixed size capacity of the cache
+ * @param numShardBits The cache is sharded to 2^numShardBits shards,
+ * by hash of the key
+ * @param strictCapacityLimit insert to the cache will fail when cache is full
+ * @param highPriPoolRatio percentage of the cache reserves for high priority
+ * entries
+ */
+ public LRUCache(final long capacity, final int numShardBits,
+ final boolean strictCapacityLimit, final double highPriPoolRatio) {
+ super(newLRUCache(capacity, numShardBits, strictCapacityLimit,
+ highPriPoolRatio));
+ }
+
+ private native static long newLRUCache(final long capacity,
+ final int numShardBits, final boolean strictCapacityLimit,
+ final double highPriPoolRatio);
+ @Override protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/LevelMetaData.java b/src/rocksdb/java/src/main/java/org/rocksdb/LevelMetaData.java
new file mode 100644
index 000000000..c5685098b
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/LevelMetaData.java
@@ -0,0 +1,56 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * The metadata that describes a level.
+ */
+public class LevelMetaData {
+ private final int level;
+ private final long size;
+ private final SstFileMetaData[] files;
+
+ /**
+ * Called from JNI C++
+ */
+ private LevelMetaData(final int level, final long size,
+ final SstFileMetaData[] files) {
+ this.level = level;
+ this.size = size;
+ this.files = files;
+ }
+
+ /**
+ * The level which this meta data describes.
+ *
+ * @return the level
+ */
+ public int level() {
+ return level;
+ }
+
+ /**
+ * The size of this level in bytes, which is equal to the sum of
+ * the file size of its {@link #files()}.
+ *
+ * @return the size
+ */
+ public long size() {
+ return size;
+ }
+
+ /**
+ * The metadata of all sst files in this level.
+ *
+ * @return the metadata of the files
+ */
+ public List<SstFileMetaData> files() {
+ return Arrays.asList(files);
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/LiveFileMetaData.java b/src/rocksdb/java/src/main/java/org/rocksdb/LiveFileMetaData.java
new file mode 100644
index 000000000..35d883e18
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/LiveFileMetaData.java
@@ -0,0 +1,55 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * The full set of metadata associated with each SST file.
+ */
+public class LiveFileMetaData extends SstFileMetaData {
+ private final byte[] columnFamilyName;
+ private final int level;
+
+ /**
+ * Called from JNI C++
+ */
+ private LiveFileMetaData(
+ final byte[] columnFamilyName,
+ final int level,
+ final String fileName,
+ final String path,
+ final long size,
+ final long smallestSeqno,
+ final long largestSeqno,
+ final byte[] smallestKey,
+ final byte[] largestKey,
+ final long numReadsSampled,
+ final boolean beingCompacted,
+ final long numEntries,
+ final long numDeletions) {
+ super(fileName, path, size, smallestSeqno, largestSeqno, smallestKey,
+ largestKey, numReadsSampled, beingCompacted, numEntries, numDeletions);
+ this.columnFamilyName = columnFamilyName;
+ this.level = level;
+ }
+
+ /**
+ * Get the name of the column family.
+ *
+ * @return the name of the column family
+ */
+ public byte[] columnFamilyName() {
+ return columnFamilyName;
+ }
+
+ /**
+ * Get the level at which this file resides.
+ *
+ * @return the level at which the file resides.
+ */
+ public int level() {
+ return level;
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/LogFile.java b/src/rocksdb/java/src/main/java/org/rocksdb/LogFile.java
new file mode 100644
index 000000000..ef24a6427
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/LogFile.java
@@ -0,0 +1,75 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+public class LogFile {
+ private final String pathName;
+ private final long logNumber;
+ private final WalFileType type;
+ private final long startSequence;
+ private final long sizeFileBytes;
+
+ /**
+ * Called from JNI C++
+ */
+ private LogFile(final String pathName, final long logNumber,
+ final byte walFileTypeValue, final long startSequence,
+ final long sizeFileBytes) {
+ this.pathName = pathName;
+ this.logNumber = logNumber;
+ this.type = WalFileType.fromValue(walFileTypeValue);
+ this.startSequence = startSequence;
+ this.sizeFileBytes = sizeFileBytes;
+ }
+
+ /**
+ * Returns log file's pathname relative to the main db dir
+ * Eg. For a live-log-file = /000003.log
+ * For an archived-log-file = /archive/000003.log
+ *
+ * @return log file's pathname
+ */
+ public String pathName() {
+ return pathName;
+ }
+
+ /**
+ * Primary identifier for log file.
+ * This is directly proportional to creation time of the log file
+ *
+ * @return the log number
+ */
+ public long logNumber() {
+ return logNumber;
+ }
+
+ /**
+ * Log file can be either alive or archived.
+ *
+ * @return the type of the log file.
+ */
+ public WalFileType type() {
+ return type;
+ }
+
+ /**
+ * Starting sequence number of writebatch written in this log file.
+ *
+ * @return the stating sequence number
+ */
+ public long startSequence() {
+ return startSequence;
+ }
+
+ /**
+ * Size of log file on disk in Bytes.
+ *
+ * @return size of log file
+ */
+ public long sizeFileBytes() {
+ return sizeFileBytes;
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Logger.java b/src/rocksdb/java/src/main/java/org/rocksdb/Logger.java
new file mode 100644
index 000000000..00a5d5674
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/Logger.java
@@ -0,0 +1,122 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * <p>This class provides a custom logger functionality
+ * in Java which wraps {@code RocksDB} logging facilities.
+ * </p>
+ *
+ * <p>Using this class RocksDB can log with common
+ * Java logging APIs like Log4j or Slf4j without keeping
+ * database logs in the filesystem.</p>
+ *
+ * <strong>Performance</strong>
+ * <p>There are certain performance penalties using a Java
+ * {@code Logger} implementation within production code.
+ * </p>
+ *
+ * <p>
+ * A log level can be set using {@link org.rocksdb.Options} or
+ * {@link Logger#setInfoLogLevel(InfoLogLevel)}. The set log level
+ * influences the underlying native code. Each log message is
+ * checked against the set log level and if the log level is more
+ * verbose as the set log level, native allocations will be made
+ * and data structures are allocated.
+ * </p>
+ *
+ * <p>Every log message which will be emitted by native code will
+ * trigger expensive native to Java transitions. So the preferred
+ * setting for production use is either
+ * {@link org.rocksdb.InfoLogLevel#ERROR_LEVEL} or
+ * {@link org.rocksdb.InfoLogLevel#FATAL_LEVEL}.
+ * </p>
+ */
+public abstract class Logger extends RocksCallbackObject {
+
+ private final static long WITH_OPTIONS = 0;
+ private final static long WITH_DBOPTIONS = 1;
+
+ /**
+ * <p>AbstractLogger constructor.</p>
+ *
+ * <p><strong>Important:</strong> the log level set within
+ * the {@link org.rocksdb.Options} instance will be used as
+ * maximum log level of RocksDB.</p>
+ *
+ * @param options {@link org.rocksdb.Options} instance.
+ */
+ public Logger(final Options options) {
+ super(options.nativeHandle_, WITH_OPTIONS);
+
+ }
+
+ /**
+ * <p>AbstractLogger constructor.</p>
+ *
+ * <p><strong>Important:</strong> the log level set within
+ * the {@link org.rocksdb.DBOptions} instance will be used
+ * as maximum log level of RocksDB.</p>
+ *
+ * @param dboptions {@link org.rocksdb.DBOptions} instance.
+ */
+ public Logger(final DBOptions dboptions) {
+ super(dboptions.nativeHandle_, WITH_DBOPTIONS);
+ }
+
+ @Override
+ protected long initializeNative(long... nativeParameterHandles) {
+ if(nativeParameterHandles[1] == WITH_OPTIONS) {
+ return createNewLoggerOptions(nativeParameterHandles[0]);
+ } else if(nativeParameterHandles[1] == WITH_DBOPTIONS) {
+ return createNewLoggerDbOptions(nativeParameterHandles[0]);
+ } else {
+ throw new IllegalArgumentException();
+ }
+ }
+
+ /**
+ * Set {@link org.rocksdb.InfoLogLevel} to AbstractLogger.
+ *
+ * @param infoLogLevel {@link org.rocksdb.InfoLogLevel} instance.
+ */
+ public void setInfoLogLevel(final InfoLogLevel infoLogLevel) {
+ setInfoLogLevel(nativeHandle_, infoLogLevel.getValue());
+ }
+
+ /**
+ * Return the loggers log level.
+ *
+ * @return {@link org.rocksdb.InfoLogLevel} instance.
+ */
+ public InfoLogLevel infoLogLevel() {
+ return InfoLogLevel.getInfoLogLevel(
+ infoLogLevel(nativeHandle_));
+ }
+
+ protected abstract void log(InfoLogLevel infoLogLevel,
+ String logMsg);
+
+ protected native long createNewLoggerOptions(
+ long options);
+ protected native long createNewLoggerDbOptions(
+ long dbOptions);
+ protected native void setInfoLogLevel(long handle,
+ byte infoLogLevel);
+ protected native byte infoLogLevel(long handle);
+
+ /**
+ * We override {@link RocksCallbackObject#disposeInternal()}
+ * as disposing of a rocksdb::LoggerJniCallback requires
+ * a slightly different approach as it is a std::shared_ptr
+ */
+ @Override
+ protected void disposeInternal() {
+ disposeInternal(nativeHandle_);
+ }
+
+ private native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MemTableConfig.java b/src/rocksdb/java/src/main/java/org/rocksdb/MemTableConfig.java
new file mode 100644
index 000000000..83cee974a
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/MemTableConfig.java
@@ -0,0 +1,29 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+/**
+ * MemTableConfig is used to config the internal mem-table of a RocksDB.
+ * It is required for each memtable to have one such sub-class to allow
+ * Java developers to use it.
+ *
+ * To make a RocksDB to use a specific MemTable format, its associated
+ * MemTableConfig should be properly set and passed into Options
+ * via Options.setMemTableFactory() and open the db using that Options.
+ *
+ * @see Options
+ */
+public abstract class MemTableConfig {
+ /**
+ * This function should only be called by Options.setMemTableConfig(),
+ * which will create a c++ shared-pointer to the c++ MemTableRepFactory
+ * that associated with the Java MemTableConfig.
+ *
+ * @see Options#setMemTableConfig(MemTableConfig)
+ *
+ * @return native handle address to native memory table instance.
+ */
+ abstract protected long newMemTableFactoryHandle();
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MemoryUsageType.java b/src/rocksdb/java/src/main/java/org/rocksdb/MemoryUsageType.java
new file mode 100644
index 000000000..6010ce7af
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/MemoryUsageType.java
@@ -0,0 +1,72 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * MemoryUsageType
+ *
+ * <p>The value will be used as a key to indicate the type of memory usage
+ * described</p>
+ */
+public enum MemoryUsageType {
+ /**
+ * Memory usage of all the mem-tables.
+ */
+ kMemTableTotal((byte) 0),
+ /**
+ * Memory usage of those un-flushed mem-tables.
+ */
+ kMemTableUnFlushed((byte) 1),
+ /**
+ * Memory usage of all the table readers.
+ */
+ kTableReadersTotal((byte) 2),
+ /**
+ * Memory usage by Cache.
+ */
+ kCacheTotal((byte) 3),
+ /**
+ * Max usage types - copied to keep 1:1 with native.
+ */
+ kNumUsageTypes((byte) 4);
+
+ /**
+ * Returns the byte value of the enumerations value
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value_;
+ }
+
+ /**
+ * <p>Get the MemoryUsageType enumeration value by
+ * passing the byte identifier to this method.</p>
+ *
+ * @param byteIdentifier of MemoryUsageType.
+ *
+ * @return MemoryUsageType instance.
+ *
+ * @throws IllegalArgumentException if the usage type for the byteIdentifier
+ * cannot be found
+ */
+ public static MemoryUsageType getMemoryUsageType(final byte byteIdentifier) {
+ for (final MemoryUsageType memoryUsageType : MemoryUsageType.values()) {
+ if (memoryUsageType.getValue() == byteIdentifier) {
+ return memoryUsageType;
+ }
+ }
+
+ throw new IllegalArgumentException(
+ "Illegal value provided for MemoryUsageType.");
+ }
+
+ MemoryUsageType(byte value) {
+ value_ = value;
+ }
+
+ private final byte value_;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MemoryUtil.java b/src/rocksdb/java/src/main/java/org/rocksdb/MemoryUtil.java
new file mode 100644
index 000000000..52b2175e6
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/MemoryUtil.java
@@ -0,0 +1,60 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.*;
+
+/**
+ * JNI passthrough for MemoryUtil.
+ */
+public class MemoryUtil {
+
+ /**
+ * <p>Returns the approximate memory usage of different types in the input
+ * list of DBs and Cache set. For instance, in the output map the key
+ * kMemTableTotal will be associated with the memory
+ * usage of all the mem-tables from all the input rocksdb instances.</p>
+ *
+ * <p>Note that for memory usage inside Cache class, we will
+ * only report the usage of the input "cache_set" without
+ * including those Cache usage inside the input list "dbs"
+ * of DBs.</p>
+ *
+ * @param dbs List of dbs to collect memory usage for.
+ * @param caches Set of caches to collect memory usage for.
+ * @return Map from {@link MemoryUsageType} to memory usage as a {@link Long}.
+ */
+ public static Map<MemoryUsageType, Long> getApproximateMemoryUsageByType(final List<RocksDB> dbs, final Set<Cache> caches) {
+ int dbCount = (dbs == null) ? 0 : dbs.size();
+ int cacheCount = (caches == null) ? 0 : caches.size();
+ long[] dbHandles = new long[dbCount];
+ long[] cacheHandles = new long[cacheCount];
+ if (dbCount > 0) {
+ ListIterator<RocksDB> dbIter = dbs.listIterator();
+ while (dbIter.hasNext()) {
+ dbHandles[dbIter.nextIndex()] = dbIter.next().nativeHandle_;
+ }
+ }
+ if (cacheCount > 0) {
+ // NOTE: This index handling is super ugly but I couldn't get a clean way to track both the
+ // index and the iterator simultaneously within a Set.
+ int i = 0;
+ for (Cache cache : caches) {
+ cacheHandles[i] = cache.nativeHandle_;
+ i++;
+ }
+ }
+ Map<Byte, Long> byteOutput = getApproximateMemoryUsageByType(dbHandles, cacheHandles);
+ Map<MemoryUsageType, Long> output = new HashMap<>();
+ for(Map.Entry<Byte, Long> longEntry : byteOutput.entrySet()) {
+ output.put(MemoryUsageType.getMemoryUsageType(longEntry.getKey()), longEntry.getValue());
+ }
+ return output;
+ }
+
+ private native static Map<Byte, Long> getApproximateMemoryUsageByType(final long[] dbHandles,
+ final long[] cacheHandles);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MergeOperator.java b/src/rocksdb/java/src/main/java/org/rocksdb/MergeOperator.java
new file mode 100644
index 000000000..c299f6221
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/MergeOperator.java
@@ -0,0 +1,18 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+// Copyright (c) 2014, Vlad Balan (vlad.gm@gmail.com). All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * MergeOperator holds an operator to be applied when compacting
+ * two merge operands held under the same key in order to obtain a single
+ * value.
+ */
+public abstract class MergeOperator extends RocksObject {
+ protected MergeOperator(final long nativeHandle) {
+ super(nativeHandle);
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java
new file mode 100644
index 000000000..1d9ca0817
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java
@@ -0,0 +1,469 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.*;
+
+public class MutableColumnFamilyOptions
+ extends AbstractMutableOptions {
+
+ /**
+ * User must use builder pattern, or parser.
+ *
+ * @param keys the keys
+ * @param values the values
+ *
+ * See {@link #builder()} and {@link #parse(String)}.
+ */
+ private MutableColumnFamilyOptions(final String[] keys,
+ final String[] values) {
+ super(keys, values);
+ }
+
+ /**
+ * Creates a builder which allows you
+ * to set MutableColumnFamilyOptions in a fluent
+ * manner
+ *
+ * @return A builder for MutableColumnFamilyOptions
+ */
+ public static MutableColumnFamilyOptionsBuilder builder() {
+ return new MutableColumnFamilyOptionsBuilder();
+ }
+
+ /**
+ * Parses a String representation of MutableColumnFamilyOptions
+ *
+ * The format is: key1=value1;key2=value2;key3=value3 etc
+ *
+ * For int[] values, each int should be separated by a comma, e.g.
+ *
+ * key1=value1;intArrayKey1=1,2,3
+ *
+ * @param str The string representation of the mutable column family options
+ *
+ * @return A builder for the mutable column family options
+ */
+ public static MutableColumnFamilyOptionsBuilder parse(final String str) {
+ Objects.requireNonNull(str);
+
+ final MutableColumnFamilyOptionsBuilder builder =
+ new MutableColumnFamilyOptionsBuilder();
+
+ final String[] options = str.trim().split(KEY_VALUE_PAIR_SEPARATOR);
+ for(final String option : options) {
+ final int equalsOffset = option.indexOf(KEY_VALUE_SEPARATOR);
+ if(equalsOffset <= 0) {
+ throw new IllegalArgumentException(
+ "options string has an invalid key=value pair");
+ }
+
+ final String key = option.substring(0, equalsOffset);
+ if(key.isEmpty()) {
+ throw new IllegalArgumentException("options string is invalid");
+ }
+
+ final String value = option.substring(equalsOffset + 1);
+ if(value.isEmpty()) {
+ throw new IllegalArgumentException("options string is invalid");
+ }
+
+ builder.fromString(key, value);
+ }
+
+ return builder;
+ }
+
+ private interface MutableColumnFamilyOptionKey extends MutableOptionKey {}
+
+ public enum MemtableOption implements MutableColumnFamilyOptionKey {
+ write_buffer_size(ValueType.LONG),
+ arena_block_size(ValueType.LONG),
+ memtable_prefix_bloom_size_ratio(ValueType.DOUBLE),
+ @Deprecated memtable_prefix_bloom_bits(ValueType.INT),
+ @Deprecated memtable_prefix_bloom_probes(ValueType.INT),
+ memtable_huge_page_size(ValueType.LONG),
+ max_successive_merges(ValueType.LONG),
+ @Deprecated filter_deletes(ValueType.BOOLEAN),
+ max_write_buffer_number(ValueType.INT),
+ inplace_update_num_locks(ValueType.LONG);
+
+ private final ValueType valueType;
+ MemtableOption(final ValueType valueType) {
+ this.valueType = valueType;
+ }
+
+ @Override
+ public ValueType getValueType() {
+ return valueType;
+ }
+ }
+
+ public enum CompactionOption implements MutableColumnFamilyOptionKey {
+ disable_auto_compactions(ValueType.BOOLEAN),
+ @Deprecated soft_rate_limit(ValueType.DOUBLE),
+ soft_pending_compaction_bytes_limit(ValueType.LONG),
+ @Deprecated hard_rate_limit(ValueType.DOUBLE),
+ hard_pending_compaction_bytes_limit(ValueType.LONG),
+ level0_file_num_compaction_trigger(ValueType.INT),
+ level0_slowdown_writes_trigger(ValueType.INT),
+ level0_stop_writes_trigger(ValueType.INT),
+ max_compaction_bytes(ValueType.LONG),
+ target_file_size_base(ValueType.LONG),
+ target_file_size_multiplier(ValueType.INT),
+ max_bytes_for_level_base(ValueType.LONG),
+ max_bytes_for_level_multiplier(ValueType.INT),
+ max_bytes_for_level_multiplier_additional(ValueType.INT_ARRAY),
+ ttl(ValueType.LONG);
+
+ private final ValueType valueType;
+ CompactionOption(final ValueType valueType) {
+ this.valueType = valueType;
+ }
+
+ @Override
+ public ValueType getValueType() {
+ return valueType;
+ }
+ }
+
+ public enum MiscOption implements MutableColumnFamilyOptionKey {
+ max_sequential_skip_in_iterations(ValueType.LONG),
+ paranoid_file_checks(ValueType.BOOLEAN),
+ report_bg_io_stats(ValueType.BOOLEAN),
+ compression_type(ValueType.ENUM);
+
+ private final ValueType valueType;
+ MiscOption(final ValueType valueType) {
+ this.valueType = valueType;
+ }
+
+ @Override
+ public ValueType getValueType() {
+ return valueType;
+ }
+ }
+
+ public static class MutableColumnFamilyOptionsBuilder
+ extends AbstractMutableOptionsBuilder<MutableColumnFamilyOptions, MutableColumnFamilyOptionsBuilder, MutableColumnFamilyOptionKey>
+ implements MutableColumnFamilyOptionsInterface<MutableColumnFamilyOptionsBuilder> {
+
+ private final static Map<String, MutableColumnFamilyOptionKey> ALL_KEYS_LOOKUP = new HashMap<>();
+ static {
+ for(final MutableColumnFamilyOptionKey key : MemtableOption.values()) {
+ ALL_KEYS_LOOKUP.put(key.name(), key);
+ }
+
+ for(final MutableColumnFamilyOptionKey key : CompactionOption.values()) {
+ ALL_KEYS_LOOKUP.put(key.name(), key);
+ }
+
+ for(final MutableColumnFamilyOptionKey key : MiscOption.values()) {
+ ALL_KEYS_LOOKUP.put(key.name(), key);
+ }
+ }
+
+ private MutableColumnFamilyOptionsBuilder() {
+ super();
+ }
+
+ @Override
+ protected MutableColumnFamilyOptionsBuilder self() {
+ return this;
+ }
+
+ @Override
+ protected Map<String, MutableColumnFamilyOptionKey> allKeys() {
+ return ALL_KEYS_LOOKUP;
+ }
+
+ @Override
+ protected MutableColumnFamilyOptions build(final String[] keys,
+ final String[] values) {
+ return new MutableColumnFamilyOptions(keys, values);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setWriteBufferSize(
+ final long writeBufferSize) {
+ return setLong(MemtableOption.write_buffer_size, writeBufferSize);
+ }
+
+ @Override
+ public long writeBufferSize() {
+ return getLong(MemtableOption.write_buffer_size);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setArenaBlockSize(
+ final long arenaBlockSize) {
+ return setLong(MemtableOption.arena_block_size, arenaBlockSize);
+ }
+
+ @Override
+ public long arenaBlockSize() {
+ return getLong(MemtableOption.arena_block_size);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setMemtablePrefixBloomSizeRatio(
+ final double memtablePrefixBloomSizeRatio) {
+ return setDouble(MemtableOption.memtable_prefix_bloom_size_ratio,
+ memtablePrefixBloomSizeRatio);
+ }
+
+ @Override
+ public double memtablePrefixBloomSizeRatio() {
+ return getDouble(MemtableOption.memtable_prefix_bloom_size_ratio);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setMemtableHugePageSize(
+ final long memtableHugePageSize) {
+ return setLong(MemtableOption.memtable_huge_page_size,
+ memtableHugePageSize);
+ }
+
+ @Override
+ public long memtableHugePageSize() {
+ return getLong(MemtableOption.memtable_huge_page_size);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setMaxSuccessiveMerges(
+ final long maxSuccessiveMerges) {
+ return setLong(MemtableOption.max_successive_merges, maxSuccessiveMerges);
+ }
+
+ @Override
+ public long maxSuccessiveMerges() {
+ return getLong(MemtableOption.max_successive_merges);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setMaxWriteBufferNumber(
+ final int maxWriteBufferNumber) {
+ return setInt(MemtableOption.max_write_buffer_number,
+ maxWriteBufferNumber);
+ }
+
+ @Override
+ public int maxWriteBufferNumber() {
+ return getInt(MemtableOption.max_write_buffer_number);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setInplaceUpdateNumLocks(
+ final long inplaceUpdateNumLocks) {
+ return setLong(MemtableOption.inplace_update_num_locks,
+ inplaceUpdateNumLocks);
+ }
+
+ @Override
+ public long inplaceUpdateNumLocks() {
+ return getLong(MemtableOption.inplace_update_num_locks);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setDisableAutoCompactions(
+ final boolean disableAutoCompactions) {
+ return setBoolean(CompactionOption.disable_auto_compactions,
+ disableAutoCompactions);
+ }
+
+ @Override
+ public boolean disableAutoCompactions() {
+ return getBoolean(CompactionOption.disable_auto_compactions);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setSoftPendingCompactionBytesLimit(
+ final long softPendingCompactionBytesLimit) {
+ return setLong(CompactionOption.soft_pending_compaction_bytes_limit,
+ softPendingCompactionBytesLimit);
+ }
+
+ @Override
+ public long softPendingCompactionBytesLimit() {
+ return getLong(CompactionOption.soft_pending_compaction_bytes_limit);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setHardPendingCompactionBytesLimit(
+ final long hardPendingCompactionBytesLimit) {
+ return setLong(CompactionOption.hard_pending_compaction_bytes_limit,
+ hardPendingCompactionBytesLimit);
+ }
+
+ @Override
+ public long hardPendingCompactionBytesLimit() {
+ return getLong(CompactionOption.hard_pending_compaction_bytes_limit);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setLevel0FileNumCompactionTrigger(
+ final int level0FileNumCompactionTrigger) {
+ return setInt(CompactionOption.level0_file_num_compaction_trigger,
+ level0FileNumCompactionTrigger);
+ }
+
+ @Override
+ public int level0FileNumCompactionTrigger() {
+ return getInt(CompactionOption.level0_file_num_compaction_trigger);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setLevel0SlowdownWritesTrigger(
+ final int level0SlowdownWritesTrigger) {
+ return setInt(CompactionOption.level0_slowdown_writes_trigger,
+ level0SlowdownWritesTrigger);
+ }
+
+ @Override
+ public int level0SlowdownWritesTrigger() {
+ return getInt(CompactionOption.level0_slowdown_writes_trigger);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setLevel0StopWritesTrigger(
+ final int level0StopWritesTrigger) {
+ return setInt(CompactionOption.level0_stop_writes_trigger,
+ level0StopWritesTrigger);
+ }
+
+ @Override
+ public int level0StopWritesTrigger() {
+ return getInt(CompactionOption.level0_stop_writes_trigger);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setMaxCompactionBytes(final long maxCompactionBytes) {
+ return setLong(CompactionOption.max_compaction_bytes, maxCompactionBytes);
+ }
+
+ @Override
+ public long maxCompactionBytes() {
+ return getLong(CompactionOption.max_compaction_bytes);
+ }
+
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setTargetFileSizeBase(
+ final long targetFileSizeBase) {
+ return setLong(CompactionOption.target_file_size_base,
+ targetFileSizeBase);
+ }
+
+ @Override
+ public long targetFileSizeBase() {
+ return getLong(CompactionOption.target_file_size_base);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setTargetFileSizeMultiplier(
+ final int targetFileSizeMultiplier) {
+ return setInt(CompactionOption.target_file_size_multiplier,
+ targetFileSizeMultiplier);
+ }
+
+ @Override
+ public int targetFileSizeMultiplier() {
+ return getInt(CompactionOption.target_file_size_multiplier);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setMaxBytesForLevelBase(
+ final long maxBytesForLevelBase) {
+ return setLong(CompactionOption.max_bytes_for_level_base,
+ maxBytesForLevelBase);
+ }
+
+ @Override
+ public long maxBytesForLevelBase() {
+ return getLong(CompactionOption.max_bytes_for_level_base);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setMaxBytesForLevelMultiplier(
+ final double maxBytesForLevelMultiplier) {
+ return setDouble(CompactionOption.max_bytes_for_level_multiplier, maxBytesForLevelMultiplier);
+ }
+
+ @Override
+ public double maxBytesForLevelMultiplier() {
+ return getDouble(CompactionOption.max_bytes_for_level_multiplier);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setMaxBytesForLevelMultiplierAdditional(
+ final int[] maxBytesForLevelMultiplierAdditional) {
+ return setIntArray(
+ CompactionOption.max_bytes_for_level_multiplier_additional,
+ maxBytesForLevelMultiplierAdditional);
+ }
+
+ @Override
+ public int[] maxBytesForLevelMultiplierAdditional() {
+ return getIntArray(
+ CompactionOption.max_bytes_for_level_multiplier_additional);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setMaxSequentialSkipInIterations(
+ final long maxSequentialSkipInIterations) {
+ return setLong(MiscOption.max_sequential_skip_in_iterations,
+ maxSequentialSkipInIterations);
+ }
+
+ @Override
+ public long maxSequentialSkipInIterations() {
+ return getLong(MiscOption.max_sequential_skip_in_iterations);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setParanoidFileChecks(
+ final boolean paranoidFileChecks) {
+ return setBoolean(MiscOption.paranoid_file_checks, paranoidFileChecks);
+ }
+
+ @Override
+ public boolean paranoidFileChecks() {
+ return getBoolean(MiscOption.paranoid_file_checks);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setCompressionType(
+ final CompressionType compressionType) {
+ return setEnum(MiscOption.compression_type, compressionType);
+ }
+
+ @Override
+ public CompressionType compressionType() {
+ return (CompressionType)getEnum(MiscOption.compression_type);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setReportBgIoStats(
+ final boolean reportBgIoStats) {
+ return setBoolean(MiscOption.report_bg_io_stats, reportBgIoStats);
+ }
+
+ @Override
+ public boolean reportBgIoStats() {
+ return getBoolean(MiscOption.report_bg_io_stats);
+ }
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setTtl(final long ttl) {
+ return setLong(CompactionOption.ttl, ttl);
+ }
+
+ @Override
+ public long ttl() {
+ return getLong(CompactionOption.ttl);
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java b/src/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java
new file mode 100644
index 000000000..5f51acb89
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java
@@ -0,0 +1,158 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+public interface MutableColumnFamilyOptionsInterface<
+ T extends MutableColumnFamilyOptionsInterface<T>>
+ extends AdvancedMutableColumnFamilyOptionsInterface<T> {
+ /**
+ * Amount of data to build up in memory (backed by an unsorted log
+ * on disk) before converting to a sorted on-disk file.
+ *
+ * Larger values increase performance, especially during bulk loads.
+ * Up to {@code max_write_buffer_number} write buffers may be held in memory
+ * at the same time, so you may wish to adjust this parameter
+ * to control memory usage.
+ *
+ * Also, a larger write buffer will result in a longer recovery time
+ * the next time the database is opened.
+ *
+ * Default: 64MB
+ * @param writeBufferSize the size of write buffer.
+ * @return the instance of the current object.
+ * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
+ * while overflowing the underlying platform specific value.
+ */
+ MutableColumnFamilyOptionsInterface setWriteBufferSize(long writeBufferSize);
+
+ /**
+ * Return size of write buffer size.
+ *
+ * @return size of write buffer.
+ * @see #setWriteBufferSize(long)
+ */
+ long writeBufferSize();
+
+ /**
+ * Disable automatic compactions. Manual compactions can still
+ * be issued on this column family
+ *
+ * @param disableAutoCompactions true if auto-compactions are disabled.
+ * @return the reference to the current option.
+ */
+ MutableColumnFamilyOptionsInterface setDisableAutoCompactions(
+ boolean disableAutoCompactions);
+
+ /**
+ * Disable automatic compactions. Manual compactions can still
+ * be issued on this column family
+ *
+ * @return true if auto-compactions are disabled.
+ */
+ boolean disableAutoCompactions();
+
+ /**
+ * Number of files to trigger level-0 compaction. A value &lt; 0 means that
+ * level-0 compaction will not be triggered by number of files at all.
+ *
+ * Default: 4
+ *
+ * @param level0FileNumCompactionTrigger The number of files to trigger
+ * level-0 compaction
+ * @return the reference to the current option.
+ */
+ MutableColumnFamilyOptionsInterface setLevel0FileNumCompactionTrigger(
+ int level0FileNumCompactionTrigger);
+
+ /**
+ * Number of files to trigger level-0 compaction. A value &lt; 0 means that
+ * level-0 compaction will not be triggered by number of files at all.
+ *
+ * Default: 4
+ *
+ * @return The number of files to trigger
+ */
+ int level0FileNumCompactionTrigger();
+
+ /**
+ * We try to limit number of bytes in one compaction to be lower than this
+ * threshold. But it's not guaranteed.
+ * Value 0 will be sanitized.
+ *
+ * @param maxCompactionBytes max bytes in a compaction
+ * @return the reference to the current option.
+ * @see #maxCompactionBytes()
+ */
+ MutableColumnFamilyOptionsInterface setMaxCompactionBytes(final long maxCompactionBytes);
+
+ /**
+ * We try to limit number of bytes in one compaction to be lower than this
+ * threshold. But it's not guaranteed.
+ * Value 0 will be sanitized.
+ *
+ * @return the maximum number of bytes in for a compaction.
+ * @see #setMaxCompactionBytes(long)
+ */
+ long maxCompactionBytes();
+
+ /**
+ * The upper-bound of the total size of level-1 files in bytes.
+ * Maximum number of bytes for level L can be calculated as
+ * (maxBytesForLevelBase) * (maxBytesForLevelMultiplier ^ (L-1))
+ * For example, if maxBytesForLevelBase is 20MB, and if
+ * max_bytes_for_level_multiplier is 10, total data size for level-1
+ * will be 200MB, total file size for level-2 will be 2GB,
+ * and total file size for level-3 will be 20GB.
+ * by default 'maxBytesForLevelBase' is 256MB.
+ *
+ * @param maxBytesForLevelBase maximum bytes for level base.
+ *
+ * @return the reference to the current option.
+ *
+ * See {@link AdvancedMutableColumnFamilyOptionsInterface#setMaxBytesForLevelMultiplier(double)}
+ */
+ T setMaxBytesForLevelBase(
+ long maxBytesForLevelBase);
+
+ /**
+ * The upper-bound of the total size of level-1 files in bytes.
+ * Maximum number of bytes for level L can be calculated as
+ * (maxBytesForLevelBase) * (maxBytesForLevelMultiplier ^ (L-1))
+ * For example, if maxBytesForLevelBase is 20MB, and if
+ * max_bytes_for_level_multiplier is 10, total data size for level-1
+ * will be 200MB, total file size for level-2 will be 2GB,
+ * and total file size for level-3 will be 20GB.
+ * by default 'maxBytesForLevelBase' is 256MB.
+ *
+ * @return the upper-bound of the total size of level-1 files
+ * in bytes.
+ *
+ * See {@link AdvancedMutableColumnFamilyOptionsInterface#maxBytesForLevelMultiplier()}
+ */
+ long maxBytesForLevelBase();
+
+ /**
+ * Compress blocks using the specified compression algorithm. This
+ * parameter can be changed dynamically.
+ *
+ * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression.
+ *
+ * @param compressionType Compression Type.
+ * @return the reference to the current option.
+ */
+ T setCompressionType(
+ CompressionType compressionType);
+
+ /**
+ * Compress blocks using the specified compression algorithm. This
+ * parameter can be changed dynamically.
+ *
+ * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression.
+ *
+ * @return Compression type.
+ */
+ CompressionType compressionType();
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptions.java
new file mode 100644
index 000000000..aca33b136
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptions.java
@@ -0,0 +1,325 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+public class MutableDBOptions extends AbstractMutableOptions {
+
+ /**
+ * User must use builder pattern, or parser.
+ *
+ * @param keys the keys
+ * @param values the values
+ *
+ * See {@link #builder()} and {@link #parse(String)}.
+ */
+ private MutableDBOptions(final String[] keys, final String[] values) {
+ super(keys, values);
+ }
+
+ /**
+ * Creates a builder which allows you
+ * to set MutableDBOptions in a fluent
+ * manner
+ *
+ * @return A builder for MutableDBOptions
+ */
+ public static MutableDBOptionsBuilder builder() {
+ return new MutableDBOptionsBuilder();
+ }
+
+ /**
+ * Parses a String representation of MutableDBOptions
+ *
+ * The format is: key1=value1;key2=value2;key3=value3 etc
+ *
+ * For int[] values, each int should be separated by a comma, e.g.
+ *
+ * key1=value1;intArrayKey1=1,2,3
+ *
+ * @param str The string representation of the mutable db options
+ *
+ * @return A builder for the mutable db options
+ */
+ public static MutableDBOptionsBuilder parse(final String str) {
+ Objects.requireNonNull(str);
+
+ final MutableDBOptionsBuilder builder =
+ new MutableDBOptionsBuilder();
+
+ final String[] options = str.trim().split(KEY_VALUE_PAIR_SEPARATOR);
+ for(final String option : options) {
+ final int equalsOffset = option.indexOf(KEY_VALUE_SEPARATOR);
+ if(equalsOffset <= 0) {
+ throw new IllegalArgumentException(
+ "options string has an invalid key=value pair");
+ }
+
+ final String key = option.substring(0, equalsOffset);
+ if(key.isEmpty()) {
+ throw new IllegalArgumentException("options string is invalid");
+ }
+
+ final String value = option.substring(equalsOffset + 1);
+ if(value.isEmpty()) {
+ throw new IllegalArgumentException("options string is invalid");
+ }
+
+ builder.fromString(key, value);
+ }
+
+ return builder;
+ }
+
+ private interface MutableDBOptionKey extends MutableOptionKey {}
+
+ public enum DBOption implements MutableDBOptionKey {
+ max_background_jobs(ValueType.INT),
+ base_background_compactions(ValueType.INT),
+ max_background_compactions(ValueType.INT),
+ avoid_flush_during_shutdown(ValueType.BOOLEAN),
+ writable_file_max_buffer_size(ValueType.LONG),
+ delayed_write_rate(ValueType.LONG),
+ max_total_wal_size(ValueType.LONG),
+ delete_obsolete_files_period_micros(ValueType.LONG),
+ stats_dump_period_sec(ValueType.INT),
+ stats_persist_period_sec(ValueType.INT),
+ stats_history_buffer_size(ValueType.LONG),
+ max_open_files(ValueType.INT),
+ bytes_per_sync(ValueType.LONG),
+ wal_bytes_per_sync(ValueType.LONG),
+ strict_bytes_per_sync(ValueType.BOOLEAN),
+ compaction_readahead_size(ValueType.LONG);
+
+ private final ValueType valueType;
+ DBOption(final ValueType valueType) {
+ this.valueType = valueType;
+ }
+
+ @Override
+ public ValueType getValueType() {
+ return valueType;
+ }
+ }
+
+ public static class MutableDBOptionsBuilder
+ extends AbstractMutableOptionsBuilder<MutableDBOptions, MutableDBOptionsBuilder, MutableDBOptionKey>
+ implements MutableDBOptionsInterface<MutableDBOptionsBuilder> {
+
+ private final static Map<String, MutableDBOptionKey> ALL_KEYS_LOOKUP = new HashMap<>();
+ static {
+ for(final MutableDBOptionKey key : DBOption.values()) {
+ ALL_KEYS_LOOKUP.put(key.name(), key);
+ }
+ }
+
+ private MutableDBOptionsBuilder() {
+ super();
+ }
+
+ @Override
+ protected MutableDBOptionsBuilder self() {
+ return this;
+ }
+
+ @Override
+ protected Map<String, MutableDBOptionKey> allKeys() {
+ return ALL_KEYS_LOOKUP;
+ }
+
+ @Override
+ protected MutableDBOptions build(final String[] keys,
+ final String[] values) {
+ return new MutableDBOptions(keys, values);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setMaxBackgroundJobs(
+ final int maxBackgroundJobs) {
+ return setInt(DBOption.max_background_jobs, maxBackgroundJobs);
+ }
+
+ @Override
+ public int maxBackgroundJobs() {
+ return getInt(DBOption.max_background_jobs);
+ }
+
+ @Override
+ @Deprecated
+ public void setBaseBackgroundCompactions(
+ final int baseBackgroundCompactions) {
+ setInt(DBOption.base_background_compactions,
+ baseBackgroundCompactions);
+ }
+
+ @Override
+ public int baseBackgroundCompactions() {
+ return getInt(DBOption.base_background_compactions);
+ }
+
+ @Override
+ @Deprecated
+ public MutableDBOptionsBuilder setMaxBackgroundCompactions(
+ final int maxBackgroundCompactions) {
+ return setInt(DBOption.max_background_compactions,
+ maxBackgroundCompactions);
+ }
+
+ @Override
+ @Deprecated
+ public int maxBackgroundCompactions() {
+ return getInt(DBOption.max_background_compactions);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setAvoidFlushDuringShutdown(
+ final boolean avoidFlushDuringShutdown) {
+ return setBoolean(DBOption.avoid_flush_during_shutdown,
+ avoidFlushDuringShutdown);
+ }
+
+ @Override
+ public boolean avoidFlushDuringShutdown() {
+ return getBoolean(DBOption.avoid_flush_during_shutdown);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setWritableFileMaxBufferSize(
+ final long writableFileMaxBufferSize) {
+ return setLong(DBOption.writable_file_max_buffer_size,
+ writableFileMaxBufferSize);
+ }
+
+ @Override
+ public long writableFileMaxBufferSize() {
+ return getLong(DBOption.writable_file_max_buffer_size);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setDelayedWriteRate(
+ final long delayedWriteRate) {
+ return setLong(DBOption.delayed_write_rate,
+ delayedWriteRate);
+ }
+
+ @Override
+ public long delayedWriteRate() {
+ return getLong(DBOption.delayed_write_rate);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setMaxTotalWalSize(
+ final long maxTotalWalSize) {
+ return setLong(DBOption.max_total_wal_size, maxTotalWalSize);
+ }
+
+ @Override
+ public long maxTotalWalSize() {
+ return getLong(DBOption.max_total_wal_size);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setDeleteObsoleteFilesPeriodMicros(
+ final long micros) {
+ return setLong(DBOption.delete_obsolete_files_period_micros, micros);
+ }
+
+ @Override
+ public long deleteObsoleteFilesPeriodMicros() {
+ return getLong(DBOption.delete_obsolete_files_period_micros);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setStatsDumpPeriodSec(
+ final int statsDumpPeriodSec) {
+ return setInt(DBOption.stats_dump_period_sec, statsDumpPeriodSec);
+ }
+
+ @Override
+ public int statsDumpPeriodSec() {
+ return getInt(DBOption.stats_dump_period_sec);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setStatsPersistPeriodSec(
+ final int statsPersistPeriodSec) {
+ return setInt(DBOption.stats_persist_period_sec, statsPersistPeriodSec);
+ }
+
+ @Override
+ public int statsPersistPeriodSec() {
+ return getInt(DBOption.stats_persist_period_sec);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setStatsHistoryBufferSize(
+ final long statsHistoryBufferSize) {
+ return setLong(DBOption.stats_history_buffer_size, statsHistoryBufferSize);
+ }
+
+ @Override
+ public long statsHistoryBufferSize() {
+ return getLong(DBOption.stats_history_buffer_size);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setMaxOpenFiles(final int maxOpenFiles) {
+ return setInt(DBOption.max_open_files, maxOpenFiles);
+ }
+
+ @Override
+ public int maxOpenFiles() {
+ return getInt(DBOption.max_open_files);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setBytesPerSync(final long bytesPerSync) {
+ return setLong(DBOption.bytes_per_sync, bytesPerSync);
+ }
+
+ @Override
+ public long bytesPerSync() {
+ return getLong(DBOption.bytes_per_sync);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setWalBytesPerSync(
+ final long walBytesPerSync) {
+ return setLong(DBOption.wal_bytes_per_sync, walBytesPerSync);
+ }
+
+ @Override
+ public long walBytesPerSync() {
+ return getLong(DBOption.wal_bytes_per_sync);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setStrictBytesPerSync(
+ final boolean strictBytesPerSync) {
+ return setBoolean(DBOption.strict_bytes_per_sync, strictBytesPerSync);
+ }
+
+ @Override
+ public boolean strictBytesPerSync() {
+ return getBoolean(DBOption.strict_bytes_per_sync);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setCompactionReadaheadSize(
+ final long compactionReadaheadSize) {
+ return setLong(DBOption.compaction_readahead_size,
+ compactionReadaheadSize);
+ }
+
+ @Override
+ public long compactionReadaheadSize() {
+ return getLong(DBOption.compaction_readahead_size);
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java b/src/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java
new file mode 100644
index 000000000..a3e23d4b3
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java
@@ -0,0 +1,443 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T>> {
+ /**
+ * Specifies the maximum number of concurrent background jobs (both flushes
+ * and compactions combined).
+ * Default: 2
+ *
+ * @param maxBackgroundJobs number of max concurrent background jobs
+ * @return the instance of the current object.
+ */
+ T setMaxBackgroundJobs(int maxBackgroundJobs);
+
+ /**
+ * Returns the maximum number of concurrent background jobs (both flushes
+ * and compactions combined).
+ * Default: 2
+ *
+ * @return the maximum number of concurrent background jobs.
+ */
+ int maxBackgroundJobs();
+
+ /**
+ * NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the
+ * value of max_background_jobs. This option is ignored.
+ *
+ * Suggested number of concurrent background compaction jobs, submitted to
+ * the default LOW priority thread pool.
+ * Default: -1
+ *
+ * @param baseBackgroundCompactions Suggested number of background compaction
+ * jobs
+ *
+ * @deprecated Use {@link #setMaxBackgroundJobs(int)}
+ */
+ @Deprecated
+ void setBaseBackgroundCompactions(int baseBackgroundCompactions);
+
+ /**
+ * NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the
+ * value of max_background_jobs. This option is ignored.
+ *
+ * Suggested number of concurrent background compaction jobs, submitted to
+ * the default LOW priority thread pool.
+ * Default: -1
+ *
+ * @return Suggested number of background compaction jobs
+ */
+ int baseBackgroundCompactions();
+
+ /**
+ * NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the
+ * value of max_background_jobs. For backwards compatibility we will set
+ * `max_background_jobs = max_background_compactions + max_background_flushes`
+ * in the case where user sets at least one of `max_background_compactions` or
+ * `max_background_flushes` (we replace -1 by 1 in case one option is unset).
+ *
+ * Specifies the maximum number of concurrent background compaction jobs,
+ * submitted to the default LOW priority thread pool.
+ * If you're increasing this, also consider increasing number of threads in
+ * LOW priority thread pool. For more information, see
+ * Default: -1
+ *
+ * @param maxBackgroundCompactions the maximum number of background
+ * compaction jobs.
+ * @return the instance of the current object.
+ *
+ * @see RocksEnv#setBackgroundThreads(int)
+ * @see RocksEnv#setBackgroundThreads(int, Priority)
+ * @see DBOptionsInterface#maxBackgroundFlushes()
+ * @deprecated Use {@link #setMaxBackgroundJobs(int)}
+ */
+ @Deprecated
+ T setMaxBackgroundCompactions(int maxBackgroundCompactions);
+
+ /**
+ * NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the
+ * value of max_background_jobs. For backwards compatibility we will set
+ * `max_background_jobs = max_background_compactions + max_background_flushes`
+ * in the case where user sets at least one of `max_background_compactions` or
+ * `max_background_flushes` (we replace -1 by 1 in case one option is unset).
+ *
+ * Returns the maximum number of concurrent background compaction jobs,
+ * submitted to the default LOW priority thread pool.
+ * When increasing this number, we may also want to consider increasing
+ * number of threads in LOW priority thread pool.
+ * Default: -1
+ *
+ * @return the maximum number of concurrent background compaction jobs.
+ * @see RocksEnv#setBackgroundThreads(int)
+ * @see RocksEnv#setBackgroundThreads(int, Priority)
+ *
+ * @deprecated Use {@link #setMaxBackgroundJobs(int)}
+ */
+ @Deprecated
+ int maxBackgroundCompactions();
+
+ /**
+ * By default RocksDB will flush all memtables on DB close if there are
+ * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup
+ * DB close. Unpersisted data WILL BE LOST.
+ *
+ * DEFAULT: false
+ *
+ * Dynamically changeable through
+ * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
+ * API.
+ *
+ * @param avoidFlushDuringShutdown true if we should avoid flush during
+ * shutdown
+ *
+ * @return the reference to the current options.
+ */
+ T setAvoidFlushDuringShutdown(boolean avoidFlushDuringShutdown);
+
+ /**
+ * By default RocksDB will flush all memtables on DB close if there are
+ * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup
+ * DB close. Unpersisted data WILL BE LOST.
+ *
+ * DEFAULT: false
+ *
+ * Dynamically changeable through
+ * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
+ * API.
+ *
+ * @return true if we should avoid flush during shutdown
+ */
+ boolean avoidFlushDuringShutdown();
+
+ /**
+ * This is the maximum buffer size that is used by WritableFileWriter.
+ * On Windows, we need to maintain an aligned buffer for writes.
+ * We allow the buffer to grow until it's size hits the limit.
+ *
+ * Default: 1024 * 1024 (1 MB)
+ *
+ * @param writableFileMaxBufferSize the maximum buffer size
+ *
+ * @return the reference to the current options.
+ */
+ T setWritableFileMaxBufferSize(long writableFileMaxBufferSize);
+
+ /**
+ * This is the maximum buffer size that is used by WritableFileWriter.
+ * On Windows, we need to maintain an aligned buffer for writes.
+ * We allow the buffer to grow until it's size hits the limit.
+ *
+ * Default: 1024 * 1024 (1 MB)
+ *
+ * @return the maximum buffer size
+ */
+ long writableFileMaxBufferSize();
+
+ /**
+ * The limited write rate to DB if
+ * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or
+ * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered,
+ * or we are writing to the last mem table allowed and we allow more than 3
+ * mem tables. It is calculated using size of user write requests before
+ * compression. RocksDB may decide to slow down more if the compaction still
+ * gets behind further.
+ * If the value is 0, we will infer a value from `rater_limiter` value
+ * if it is not empty, or 16MB if `rater_limiter` is empty. Note that
+ * if users change the rate in `rate_limiter` after DB is opened,
+ * `delayed_write_rate` won't be adjusted.
+ *
+ * Unit: bytes per second.
+ *
+ * Default: 0
+ *
+ * Dynamically changeable through {@link RocksDB#setDBOptions(MutableDBOptions)}.
+ *
+ * @param delayedWriteRate the rate in bytes per second
+ *
+ * @return the reference to the current options.
+ */
+ T setDelayedWriteRate(long delayedWriteRate);
+
+ /**
+ * The limited write rate to DB if
+ * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or
+ * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered,
+ * or we are writing to the last mem table allowed and we allow more than 3
+ * mem tables. It is calculated using size of user write requests before
+ * compression. RocksDB may decide to slow down more if the compaction still
+ * gets behind further.
+ * If the value is 0, we will infer a value from `rater_limiter` value
+ * if it is not empty, or 16MB if `rater_limiter` is empty. Note that
+ * if users change the rate in `rate_limiter` after DB is opened,
+ * `delayed_write_rate` won't be adjusted.
+ *
+ * Unit: bytes per second.
+ *
+ * Default: 0
+ *
+ * Dynamically changeable through {@link RocksDB#setDBOptions(MutableDBOptions)}.
+ *
+ * @return the rate in bytes per second
+ */
+ long delayedWriteRate();
+
+ /**
+ * <p>Once write-ahead logs exceed this size, we will start forcing the
+ * flush of column families whose memtables are backed by the oldest live
+ * WAL file (i.e. the ones that are causing all the space amplification).
+ * </p>
+ * <p>If set to 0 (default), we will dynamically choose the WAL size limit to
+ * be [sum of all write_buffer_size * max_write_buffer_number] * 2</p>
+ * <p>This option takes effect only when there are more than one column family as
+ * otherwise the wal size is dictated by the write_buffer_size.</p>
+ * <p>Default: 0</p>
+ *
+ * @param maxTotalWalSize max total wal size.
+ * @return the instance of the current object.
+ */
+ T setMaxTotalWalSize(long maxTotalWalSize);
+
+ /**
+ * <p>Returns the max total wal size. Once write-ahead logs exceed this size,
+ * we will start forcing the flush of column families whose memtables are
+ * backed by the oldest live WAL file (i.e. the ones that are causing all
+ * the space amplification).</p>
+ *
+ * <p>If set to 0 (default), we will dynamically choose the WAL size limit
+ * to be [sum of all write_buffer_size * max_write_buffer_number] * 2
+ * </p>
+ *
+ * @return max total wal size
+ */
+ long maxTotalWalSize();
+
+ /**
+ * The periodicity when obsolete files get deleted. The default
+ * value is 6 hours. The files that get out of scope by compaction
+ * process will still get automatically delete on every compaction,
+ * regardless of this setting
+ *
+ * @param micros the time interval in micros
+ * @return the instance of the current object.
+ */
+ T setDeleteObsoleteFilesPeriodMicros(long micros);
+
+ /**
+ * The periodicity when obsolete files get deleted. The default
+ * value is 6 hours. The files that get out of scope by compaction
+ * process will still get automatically delete on every compaction,
+ * regardless of this setting
+ *
+ * @return the time interval in micros when obsolete files will be deleted.
+ */
+ long deleteObsoleteFilesPeriodMicros();
+
+ /**
+ * if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
+ * Default: 600 (10 minutes)
+ *
+ * @param statsDumpPeriodSec time interval in seconds.
+ * @return the instance of the current object.
+ */
+ T setStatsDumpPeriodSec(int statsDumpPeriodSec);
+
+ /**
+ * If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
+ * Default: 600 (10 minutes)
+ *
+ * @return time interval in seconds.
+ */
+ int statsDumpPeriodSec();
+
+ /**
+ * If not zero, dump rocksdb.stats to RocksDB every
+ * {@code statsPersistPeriodSec}
+ *
+ * Default: 600
+ *
+ * @param statsPersistPeriodSec time interval in seconds.
+ * @return the instance of the current object.
+ */
+ T setStatsPersistPeriodSec(int statsPersistPeriodSec);
+
+ /**
+ * If not zero, dump rocksdb.stats to RocksDB every
+ * {@code statsPersistPeriodSec}
+ *
+ * @return time interval in seconds.
+ */
+ int statsPersistPeriodSec();
+
+ /**
+ * If not zero, periodically take stats snapshots and store in memory, the
+ * memory size for stats snapshots is capped at {@code statsHistoryBufferSize}
+ *
+ * Default: 1MB
+ *
+ * @param statsHistoryBufferSize the size of the buffer.
+ * @return the instance of the current object.
+ */
+ T setStatsHistoryBufferSize(long statsHistoryBufferSize);
+
+ /**
+ * If not zero, periodically take stats snapshots and store in memory, the
+ * memory size for stats snapshots is capped at {@code statsHistoryBufferSize}
+ *
+ * @return the size of the buffer.
+ */
+ long statsHistoryBufferSize();
+
+ /**
+ * Number of open files that can be used by the DB. You may need to
+ * increase this if your database has a large working set. Value -1 means
+ * files opened are always kept open. You can estimate number of files based
+ * on {@code target_file_size_base} and {@code target_file_size_multiplier}
+ * for level-based compaction. For universal-style compaction, you can usually
+ * set it to -1.
+ * Default: -1
+ *
+ * @param maxOpenFiles the maximum number of open files.
+ * @return the instance of the current object.
+ */
+ T setMaxOpenFiles(int maxOpenFiles);
+
+ /**
+ * Number of open files that can be used by the DB. You may need to
+ * increase this if your database has a large working set. Value -1 means
+ * files opened are always kept open. You can estimate number of files based
+ * on {@code target_file_size_base} and {@code target_file_size_multiplier}
+ * for level-based compaction. For universal-style compaction, you can usually
+ * set it to -1.
+ * Default: -1
+ *
+ * @return the maximum number of open files.
+ */
+ int maxOpenFiles();
+
+ /**
+ * Allows OS to incrementally sync files to disk while they are being
+ * written, asynchronously, in the background.
+ * Issue one request for every bytes_per_sync written. 0 turns it off.
+ * Default: 0
+ *
+ * @param bytesPerSync size in bytes
+ * @return the instance of the current object.
+ */
+ T setBytesPerSync(long bytesPerSync);
+
+ /**
+ * Allows OS to incrementally sync files to disk while they are being
+ * written, asynchronously, in the background.
+ * Issue one request for every bytes_per_sync written. 0 turns it off.
+ * Default: 0
+ *
+ * @return size in bytes
+ */
+ long bytesPerSync();
+
+ /**
+ * Same as {@link #setBytesPerSync(long)} , but applies to WAL files
+ *
+ * Default: 0, turned off
+ *
+ * @param walBytesPerSync size in bytes
+ * @return the instance of the current object.
+ */
+ T setWalBytesPerSync(long walBytesPerSync);
+
+ /**
+ * Same as {@link #bytesPerSync()} , but applies to WAL files
+ *
+ * Default: 0, turned off
+ *
+ * @return size in bytes
+ */
+ long walBytesPerSync();
+
+ /**
+ * When true, guarantees WAL files have at most {@link #walBytesPerSync()}
+ * bytes submitted for writeback at any given time, and SST files have at most
+ * {@link #bytesPerSync()} bytes pending writeback at any given time. This
+ * can be used to handle cases where processing speed exceeds I/O speed
+ * during file generation, which can lead to a huge sync when the file is
+ * finished, even with {@link #bytesPerSync()} / {@link #walBytesPerSync()}
+ * properly configured.
+ *
+ * - If `sync_file_range` is supported it achieves this by waiting for any
+ * prior `sync_file_range`s to finish before proceeding. In this way,
+ * processing (compression, etc.) can proceed uninhibited in the gap
+ * between `sync_file_range`s, and we block only when I/O falls
+ * behind.
+ * - Otherwise the `WritableFile::Sync` method is used. Note this mechanism
+ * always blocks, thus preventing the interleaving of I/O and processing.
+ *
+ * Note: Enabling this option does not provide any additional persistence
+ * guarantees, as it may use `sync_file_range`, which does not write out
+ * metadata.
+ *
+ * Default: false
+ *
+ * @param strictBytesPerSync the bytes per sync
+ * @return the instance of the current object.
+ */
+ T setStrictBytesPerSync(boolean strictBytesPerSync);
+
+ /**
+ * Return the strict byte limit per sync.
+ *
+ * See {@link #setStrictBytesPerSync(boolean)}
+ *
+ * @return the limit in bytes.
+ */
+ boolean strictBytesPerSync();
+
+ /**
+ * If non-zero, we perform bigger reads when doing compaction. If you're
+ * running RocksDB on spinning disks, you should set this to at least 2MB.
+ *
+ * That way RocksDB's compaction is doing sequential instead of random reads.
+ * When non-zero, we also force
+ * {@link DBOptionsInterface#newTableReaderForCompactionInputs()} to true.
+ *
+ * Default: 0
+ *
+ * @param compactionReadaheadSize The compaction read-ahead size
+ *
+ * @return the reference to the current options.
+ */
+ T setCompactionReadaheadSize(final long compactionReadaheadSize);
+
+ /**
+ * If non-zero, we perform bigger reads when doing compaction. If you're
+ * running RocksDB on spinning disks, you should set this to at least 2MB.
+ *
+ * That way RocksDB's compaction is doing sequential instead of random reads.
+ * When non-zero, we also force
+ * {@link DBOptionsInterface#newTableReaderForCompactionInputs()} to true.
+ *
+ * Default: 0
+ *
+ * @return The compaction read-ahead size
+ */
+ long compactionReadaheadSize();
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MutableOptionKey.java b/src/rocksdb/java/src/main/java/org/rocksdb/MutableOptionKey.java
new file mode 100644
index 000000000..ec1b9ff3b
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/MutableOptionKey.java
@@ -0,0 +1,16 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+public interface MutableOptionKey {
+ enum ValueType {
+ DOUBLE,
+ LONG,
+ INT,
+ BOOLEAN,
+ INT_ARRAY,
+ ENUM
+ }
+
+ String name();
+ ValueType getValueType();
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MutableOptionValue.java b/src/rocksdb/java/src/main/java/org/rocksdb/MutableOptionValue.java
new file mode 100644
index 000000000..8ec63269f
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/MutableOptionValue.java
@@ -0,0 +1,376 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+import static org.rocksdb.AbstractMutableOptions.INT_ARRAY_INT_SEPARATOR;
+
+public abstract class MutableOptionValue<T> {
+
+ abstract double asDouble() throws NumberFormatException;
+ abstract long asLong() throws NumberFormatException;
+ abstract int asInt() throws NumberFormatException;
+ abstract boolean asBoolean() throws IllegalStateException;
+ abstract int[] asIntArray() throws IllegalStateException;
+ abstract String asString();
+ abstract T asObject();
+
+ private static abstract class MutableOptionValueObject<T>
+ extends MutableOptionValue<T> {
+ protected final T value;
+
+ private MutableOptionValueObject(final T value) {
+ this.value = value;
+ }
+
+ @Override T asObject() {
+ return value;
+ }
+ }
+
+ static MutableOptionValue<String> fromString(final String s) {
+ return new MutableOptionStringValue(s);
+ }
+
+ static MutableOptionValue<Double> fromDouble(final double d) {
+ return new MutableOptionDoubleValue(d);
+ }
+
+ static MutableOptionValue<Long> fromLong(final long d) {
+ return new MutableOptionLongValue(d);
+ }
+
+ static MutableOptionValue<Integer> fromInt(final int i) {
+ return new MutableOptionIntValue(i);
+ }
+
+ static MutableOptionValue<Boolean> fromBoolean(final boolean b) {
+ return new MutableOptionBooleanValue(b);
+ }
+
+ static MutableOptionValue<int[]> fromIntArray(final int[] ix) {
+ return new MutableOptionIntArrayValue(ix);
+ }
+
+ static <N extends Enum<N>> MutableOptionValue<N> fromEnum(final N value) {
+ return new MutableOptionEnumValue<>(value);
+ }
+
+ static class MutableOptionStringValue
+ extends MutableOptionValueObject<String> {
+ MutableOptionStringValue(final String value) {
+ super(value);
+ }
+
+ @Override
+ double asDouble() throws NumberFormatException {
+ return Double.parseDouble(value);
+ }
+
+ @Override
+ long asLong() throws NumberFormatException {
+ return Long.parseLong(value);
+ }
+
+ @Override
+ int asInt() throws NumberFormatException {
+ return Integer.parseInt(value);
+ }
+
+ @Override
+ boolean asBoolean() throws IllegalStateException {
+ return Boolean.parseBoolean(value);
+ }
+
+ @Override
+ int[] asIntArray() throws IllegalStateException {
+ throw new IllegalStateException("String is not applicable as int[]");
+ }
+
+ @Override
+ String asString() {
+ return value;
+ }
+ }
+
+ static class MutableOptionDoubleValue
+ extends MutableOptionValue<Double> {
+ private final double value;
+ MutableOptionDoubleValue(final double value) {
+ this.value = value;
+ }
+
+ @Override
+ double asDouble() {
+ return value;
+ }
+
+ @Override
+ long asLong() throws NumberFormatException {
+ return Double.valueOf(value).longValue();
+ }
+
+ @Override
+ int asInt() throws NumberFormatException {
+ if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
+ throw new NumberFormatException(
+ "double value lies outside the bounds of int");
+ }
+ return Double.valueOf(value).intValue();
+ }
+
+ @Override
+ boolean asBoolean() throws IllegalStateException {
+ throw new IllegalStateException(
+ "double is not applicable as boolean");
+ }
+
+ @Override
+ int[] asIntArray() throws IllegalStateException {
+ if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
+ throw new NumberFormatException(
+ "double value lies outside the bounds of int");
+ }
+ return new int[] { Double.valueOf(value).intValue() };
+ }
+
+ @Override
+ String asString() {
+ return String.valueOf(value);
+ }
+
+ @Override
+ Double asObject() {
+ return value;
+ }
+ }
+
+ static class MutableOptionLongValue
+ extends MutableOptionValue<Long> {
+ private final long value;
+
+ MutableOptionLongValue(final long value) {
+ this.value = value;
+ }
+
+ @Override
+ double asDouble() {
+ if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) {
+ throw new NumberFormatException(
+ "long value lies outside the bounds of int");
+ }
+ return Long.valueOf(value).doubleValue();
+ }
+
+ @Override
+ long asLong() throws NumberFormatException {
+ return value;
+ }
+
+ @Override
+ int asInt() throws NumberFormatException {
+ if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
+ throw new NumberFormatException(
+ "long value lies outside the bounds of int");
+ }
+ return Long.valueOf(value).intValue();
+ }
+
+ @Override
+ boolean asBoolean() throws IllegalStateException {
+ throw new IllegalStateException(
+ "long is not applicable as boolean");
+ }
+
+ @Override
+ int[] asIntArray() throws IllegalStateException {
+ if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
+ throw new NumberFormatException(
+ "long value lies outside the bounds of int");
+ }
+ return new int[] { Long.valueOf(value).intValue() };
+ }
+
+ @Override
+ String asString() {
+ return String.valueOf(value);
+ }
+
+ @Override
+ Long asObject() {
+ return value;
+ }
+ }
+
+ static class MutableOptionIntValue
+ extends MutableOptionValue<Integer> {
+ private final int value;
+
+ MutableOptionIntValue(final int value) {
+ this.value = value;
+ }
+
+ @Override
+ double asDouble() {
+ if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) {
+ throw new NumberFormatException("int value lies outside the bounds of int");
+ }
+ return Integer.valueOf(value).doubleValue();
+ }
+
+ @Override
+ long asLong() throws NumberFormatException {
+ return value;
+ }
+
+ @Override
+ int asInt() throws NumberFormatException {
+ return value;
+ }
+
+ @Override
+ boolean asBoolean() throws IllegalStateException {
+ throw new IllegalStateException("int is not applicable as boolean");
+ }
+
+ @Override
+ int[] asIntArray() throws IllegalStateException {
+ return new int[] { value };
+ }
+
+ @Override
+ String asString() {
+ return String.valueOf(value);
+ }
+
+ @Override
+ Integer asObject() {
+ return value;
+ }
+ }
+
+ static class MutableOptionBooleanValue
+ extends MutableOptionValue<Boolean> {
+ private final boolean value;
+
+ MutableOptionBooleanValue(final boolean value) {
+ this.value = value;
+ }
+
+ @Override
+ double asDouble() {
+ throw new NumberFormatException("boolean is not applicable as double");
+ }
+
+ @Override
+ long asLong() throws NumberFormatException {
+ throw new NumberFormatException("boolean is not applicable as Long");
+ }
+
+ @Override
+ int asInt() throws NumberFormatException {
+ throw new NumberFormatException("boolean is not applicable as int");
+ }
+
+ @Override
+ boolean asBoolean() {
+ return value;
+ }
+
+ @Override
+ int[] asIntArray() throws IllegalStateException {
+ throw new IllegalStateException("boolean is not applicable as int[]");
+ }
+
+ @Override
+ String asString() {
+ return String.valueOf(value);
+ }
+
+ @Override
+ Boolean asObject() {
+ return value;
+ }
+ }
+
+ static class MutableOptionIntArrayValue
+ extends MutableOptionValueObject<int[]> {
+ MutableOptionIntArrayValue(final int[] value) {
+ super(value);
+ }
+
+ @Override
+ double asDouble() {
+ throw new NumberFormatException("int[] is not applicable as double");
+ }
+
+ @Override
+ long asLong() throws NumberFormatException {
+ throw new NumberFormatException("int[] is not applicable as Long");
+ }
+
+ @Override
+ int asInt() throws NumberFormatException {
+ throw new NumberFormatException("int[] is not applicable as int");
+ }
+
+ @Override
+ boolean asBoolean() {
+ throw new NumberFormatException("int[] is not applicable as boolean");
+ }
+
+ @Override
+ int[] asIntArray() throws IllegalStateException {
+ return value;
+ }
+
+ @Override
+ String asString() {
+ final StringBuilder builder = new StringBuilder();
+ for(int i = 0; i < value.length; i++) {
+ builder.append(i);
+ if(i + 1 < value.length) {
+ builder.append(INT_ARRAY_INT_SEPARATOR);
+ }
+ }
+ return builder.toString();
+ }
+ }
+
+ static class MutableOptionEnumValue<T extends Enum<T>>
+ extends MutableOptionValueObject<T> {
+
+ MutableOptionEnumValue(final T value) {
+ super(value);
+ }
+
+ @Override
+ double asDouble() throws NumberFormatException {
+ throw new NumberFormatException("Enum is not applicable as double");
+ }
+
+ @Override
+ long asLong() throws NumberFormatException {
+ throw new NumberFormatException("Enum is not applicable as long");
+ }
+
+ @Override
+ int asInt() throws NumberFormatException {
+ throw new NumberFormatException("Enum is not applicable as int");
+ }
+
+ @Override
+ boolean asBoolean() throws IllegalStateException {
+ throw new NumberFormatException("Enum is not applicable as boolean");
+ }
+
+ @Override
+ int[] asIntArray() throws IllegalStateException {
+ throw new NumberFormatException("Enum is not applicable as int[]");
+ }
+
+ @Override
+ String asString() {
+ return value.name();
+ }
+ }
+
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/NativeComparatorWrapper.java b/src/rocksdb/java/src/main/java/org/rocksdb/NativeComparatorWrapper.java
new file mode 100644
index 000000000..6acc146f7
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/NativeComparatorWrapper.java
@@ -0,0 +1,59 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A simple abstraction to allow a Java class to wrap a custom comparator
+ * implemented in C++.
+ *
+ * The native comparator must directly extend rocksdb::Comparator.
+ */
+public abstract class NativeComparatorWrapper
+ extends AbstractComparator {
+
+ @Override
+ final ComparatorType getComparatorType() {
+ return ComparatorType.JAVA_NATIVE_COMPARATOR_WRAPPER;
+ }
+
+ @Override
+ public final String name() {
+ throw new IllegalStateException("This should not be called. " +
+ "Implementation is in Native code");
+ }
+
+ @Override
+ public final int compare(final ByteBuffer s1, final ByteBuffer s2) {
+ throw new IllegalStateException("This should not be called. " +
+ "Implementation is in Native code");
+ }
+
+ @Override
+ public final void findShortestSeparator(final ByteBuffer start, final ByteBuffer limit) {
+ throw new IllegalStateException("This should not be called. " +
+ "Implementation is in Native code");
+ }
+
+ @Override
+ public final void findShortSuccessor(final ByteBuffer key) {
+ throw new IllegalStateException("This should not be called. " +
+ "Implementation is in Native code");
+ }
+
+ /**
+ * We override {@link RocksCallbackObject#disposeInternal()}
+ * as disposing of a native rocksdb::Comparator extension requires
+ * a slightly different approach as it is not really a RocksCallbackObject
+ */
+ @Override
+ protected void disposeInternal() {
+ disposeInternal(nativeHandle_);
+ }
+
+ private native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/NativeLibraryLoader.java b/src/rocksdb/java/src/main/java/org/rocksdb/NativeLibraryLoader.java
new file mode 100644
index 000000000..6e37e8cf2
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/NativeLibraryLoader.java
@@ -0,0 +1,125 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+import java.io.*;
+import java.nio.file.Files;
+import java.nio.file.StandardCopyOption;
+
+import org.rocksdb.util.Environment;
+
+/**
+ * This class is used to load the RocksDB shared library from within the jar.
+ * The shared library is extracted to a temp folder and loaded from there.
+ */
+public class NativeLibraryLoader {
+ //singleton
+ private static final NativeLibraryLoader instance = new NativeLibraryLoader();
+ private static boolean initialized = false;
+
+ private static final String sharedLibraryName = Environment.getSharedLibraryName("rocksdb");
+ private static final String jniLibraryName = Environment.getJniLibraryName("rocksdb");
+ private static final String jniLibraryFileName = Environment.getJniLibraryFileName("rocksdb");
+ private static final String tempFilePrefix = "librocksdbjni";
+ private static final String tempFileSuffix = Environment.getJniLibraryExtension();
+
+ /**
+ * Get a reference to the NativeLibraryLoader
+ *
+ * @return The NativeLibraryLoader
+ */
+ public static NativeLibraryLoader getInstance() {
+ return instance;
+ }
+
+ /**
+ * Firstly attempts to load the library from <i>java.library.path</i>,
+ * if that fails then it falls back to extracting
+ * the library from the classpath
+ * {@link org.rocksdb.NativeLibraryLoader#loadLibraryFromJar(java.lang.String)}
+ *
+ * @param tmpDir A temporary directory to use
+ * to copy the native library to when loading from the classpath.
+ * If null, or the empty string, we rely on Java's
+ * {@link java.io.File#createTempFile(String, String)}
+ * function to provide a temporary location.
+ * The temporary file will be registered for deletion
+ * on exit.
+ *
+ * @throws java.io.IOException if a filesystem operation fails.
+ */
+ public synchronized void loadLibrary(final String tmpDir) throws IOException {
+ try {
+ System.loadLibrary(sharedLibraryName);
+ } catch(final UnsatisfiedLinkError ule1) {
+ try {
+ System.loadLibrary(jniLibraryName);
+ } catch(final UnsatisfiedLinkError ule2) {
+ loadLibraryFromJar(tmpDir);
+ }
+ }
+ }
+
+ /**
+ * Attempts to extract the native RocksDB library
+ * from the classpath and load it
+ *
+ * @param tmpDir A temporary directory to use
+ * to copy the native library to. If null,
+ * or the empty string, we rely on Java's
+ * {@link java.io.File#createTempFile(String, String)}
+ * function to provide a temporary location.
+ * The temporary file will be registered for deletion
+ * on exit.
+ *
+ * @throws java.io.IOException if a filesystem operation fails.
+ */
+ void loadLibraryFromJar(final String tmpDir)
+ throws IOException {
+ if (!initialized) {
+ System.load(loadLibraryFromJarToTemp(tmpDir).getAbsolutePath());
+ initialized = true;
+ }
+ }
+
+ File loadLibraryFromJarToTemp(final String tmpDir)
+ throws IOException {
+ final File temp;
+ if (tmpDir == null || tmpDir.isEmpty()) {
+ temp = File.createTempFile(tempFilePrefix, tempFileSuffix);
+ } else {
+ temp = new File(tmpDir, jniLibraryFileName);
+ if (temp.exists() && !temp.delete()) {
+ throw new RuntimeException("File: " + temp.getAbsolutePath()
+ + " already exists and cannot be removed.");
+ }
+ if (!temp.createNewFile()) {
+ throw new RuntimeException("File: " + temp.getAbsolutePath()
+ + " could not be created.");
+ }
+ }
+
+ if (!temp.exists()) {
+ throw new RuntimeException("File " + temp.getAbsolutePath() + " does not exist.");
+ } else {
+ temp.deleteOnExit();
+ }
+
+ // attempt to copy the library from the Jar file to the temp destination
+ try (final InputStream is = getClass().getClassLoader().
+ getResourceAsStream(jniLibraryFileName)) {
+ if (is == null) {
+ throw new RuntimeException(jniLibraryFileName + " was not found inside JAR.");
+ } else {
+ Files.copy(is, temp.toPath(), StandardCopyOption.REPLACE_EXISTING);
+ }
+ }
+
+ return temp;
+ }
+
+ /**
+ * Private constructor to disallow instantiation
+ */
+ private NativeLibraryLoader() {
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/OperationStage.java b/src/rocksdb/java/src/main/java/org/rocksdb/OperationStage.java
new file mode 100644
index 000000000..6ac0a15a2
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/OperationStage.java
@@ -0,0 +1,59 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * The operation stage.
+ */
+public enum OperationStage {
+ STAGE_UNKNOWN((byte)0x0),
+ STAGE_FLUSH_RUN((byte)0x1),
+ STAGE_FLUSH_WRITE_L0((byte)0x2),
+ STAGE_COMPACTION_PREPARE((byte)0x3),
+ STAGE_COMPACTION_RUN((byte)0x4),
+ STAGE_COMPACTION_PROCESS_KV((byte)0x5),
+ STAGE_COMPACTION_INSTALL((byte)0x6),
+ STAGE_COMPACTION_SYNC_FILE((byte)0x7),
+ STAGE_PICK_MEMTABLES_TO_FLUSH((byte)0x8),
+ STAGE_MEMTABLE_ROLLBACK((byte)0x9),
+ STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS((byte)0xA);
+
+ private final byte value;
+
+ OperationStage(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the internal representation value.
+ *
+ * @return the internal representation value.
+ */
+ byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get the Operation stage from the internal representation value.
+ *
+ * @param value the internal representation value.
+ *
+ * @return the operation stage
+ *
+ * @throws IllegalArgumentException if the value does not match
+ * an OperationStage
+ */
+ static OperationStage fromValue(final byte value)
+ throws IllegalArgumentException {
+ for (final OperationStage threadType : OperationStage.values()) {
+ if (threadType.value == value) {
+ return threadType;
+ }
+ }
+ throw new IllegalArgumentException(
+ "Unknown value for OperationStage: " + value);
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/OperationType.java b/src/rocksdb/java/src/main/java/org/rocksdb/OperationType.java
new file mode 100644
index 000000000..7cc9b65cd
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/OperationType.java
@@ -0,0 +1,54 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * The type used to refer to a thread operation.
+ *
+ * A thread operation describes high-level action of a thread,
+ * examples include compaction and flush.
+ */
+public enum OperationType {
+ OP_UNKNOWN((byte)0x0),
+ OP_COMPACTION((byte)0x1),
+ OP_FLUSH((byte)0x2);
+
+ private final byte value;
+
+ OperationType(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the internal representation value.
+ *
+ * @return the internal representation value.
+ */
+ byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get the Operation type from the internal representation value.
+ *
+ * @param value the internal representation value.
+ *
+ * @return the operation type
+ *
+ * @throws IllegalArgumentException if the value does not match
+ * an OperationType
+ */
+ static OperationType fromValue(final byte value)
+ throws IllegalArgumentException {
+ for (final OperationType threadType : OperationType.values()) {
+ if (threadType.value == value) {
+ return threadType;
+ }
+ }
+ throw new IllegalArgumentException(
+ "Unknown value for OperationType: " + value);
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java b/src/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java
new file mode 100644
index 000000000..5a2e1f3ed
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java
@@ -0,0 +1,226 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.List;
+
+/**
+ * Database with Transaction support.
+ */
+public class OptimisticTransactionDB extends RocksDB
+ implements TransactionalDB<OptimisticTransactionOptions> {
+
+ /**
+ * Private constructor.
+ *
+ * @param nativeHandle The native handle of the C++ OptimisticTransactionDB
+ * object
+ */
+ private OptimisticTransactionDB(final long nativeHandle) {
+ super(nativeHandle);
+ }
+
+ /**
+ * Open an OptimisticTransactionDB similar to
+ * {@link RocksDB#open(Options, String)}.
+ *
+ * @param options {@link org.rocksdb.Options} instance.
+ * @param path the path to the rocksdb.
+ *
+ * @return a {@link OptimisticTransactionDB} instance on success, null if the
+ * specified {@link OptimisticTransactionDB} can not be opened.
+ *
+ * @throws RocksDBException if an error occurs whilst opening the database.
+ */
+ public static OptimisticTransactionDB open(final Options options,
+ final String path) throws RocksDBException {
+ final OptimisticTransactionDB otdb = new OptimisticTransactionDB(open(
+ options.nativeHandle_, path));
+
+ // when non-default Options is used, keeping an Options reference
+ // in RocksDB can prevent Java to GC during the life-time of
+ // the currently-created RocksDB.
+ otdb.storeOptionsInstance(options);
+
+ return otdb;
+ }
+
+ /**
+ * Open an OptimisticTransactionDB similar to
+ * {@link RocksDB#open(DBOptions, String, List, List)}.
+ *
+ * @param dbOptions {@link org.rocksdb.DBOptions} instance.
+ * @param path the path to the rocksdb.
+ * @param columnFamilyDescriptors list of column family descriptors
+ * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances
+ *
+ * @return a {@link OptimisticTransactionDB} instance on success, null if the
+ * specified {@link OptimisticTransactionDB} can not be opened.
+ *
+ * @throws RocksDBException if an error occurs whilst opening the database.
+ */
+ public static OptimisticTransactionDB open(final DBOptions dbOptions,
+ final String path,
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors,
+ final List<ColumnFamilyHandle> columnFamilyHandles)
+ throws RocksDBException {
+
+ final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][];
+ final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()];
+ for (int i = 0; i < columnFamilyDescriptors.size(); i++) {
+ final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors
+ .get(i);
+ cfNames[i] = cfDescriptor.getName();
+ cfOptionHandles[i] = cfDescriptor.getOptions().nativeHandle_;
+ }
+
+ final long[] handles = open(dbOptions.nativeHandle_, path, cfNames,
+ cfOptionHandles);
+ final OptimisticTransactionDB otdb =
+ new OptimisticTransactionDB(handles[0]);
+
+ // when non-default Options is used, keeping an Options reference
+ // in RocksDB can prevent Java to GC during the life-time of
+ // the currently-created RocksDB.
+ otdb.storeOptionsInstance(dbOptions);
+
+ for (int i = 1; i < handles.length; i++) {
+ columnFamilyHandles.add(new ColumnFamilyHandle(otdb, handles[i]));
+ }
+
+ return otdb;
+ }
+
+
+ /**
+ * This is similar to {@link #close()} except that it
+ * throws an exception if any error occurs.
+ *
+ * This will not fsync the WAL files.
+ * If syncing is required, the caller must first call {@link #syncWal()}
+ * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
+ * with {@link WriteOptions#setSync(boolean)} set to true.
+ *
+ * See also {@link #close()}.
+ *
+ * @throws RocksDBException if an error occurs whilst closing.
+ */
+ public void closeE() throws RocksDBException {
+ if (owningHandle_.compareAndSet(true, false)) {
+ try {
+ closeDatabase(nativeHandle_);
+ } finally {
+ disposeInternal();
+ }
+ }
+ }
+
+ /**
+ * This is similar to {@link #closeE()} except that it
+ * silently ignores any errors.
+ *
+ * This will not fsync the WAL files.
+ * If syncing is required, the caller must first call {@link #syncWal()}
+ * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
+ * with {@link WriteOptions#setSync(boolean)} set to true.
+ *
+ * See also {@link #close()}.
+ */
+ @Override
+ public void close() {
+ if (owningHandle_.compareAndSet(true, false)) {
+ try {
+ closeDatabase(nativeHandle_);
+ } catch (final RocksDBException e) {
+ // silently ignore the error report
+ } finally {
+ disposeInternal();
+ }
+ }
+ }
+
+ @Override
+ public Transaction beginTransaction(final WriteOptions writeOptions) {
+ return new Transaction(this, beginTransaction(nativeHandle_,
+ writeOptions.nativeHandle_));
+ }
+
+ @Override
+ public Transaction beginTransaction(final WriteOptions writeOptions,
+ final OptimisticTransactionOptions optimisticTransactionOptions) {
+ return new Transaction(this, beginTransaction(nativeHandle_,
+ writeOptions.nativeHandle_,
+ optimisticTransactionOptions.nativeHandle_));
+ }
+
+ // TODO(AR) consider having beingTransaction(... oldTransaction) set a
+ // reference count inside Transaction, so that we can always call
+ // Transaction#close but the object is only disposed when there are as many
+ // closes as beginTransaction. Makes the try-with-resources paradigm easier for
+ // java developers
+
+ @Override
+ public Transaction beginTransaction(final WriteOptions writeOptions,
+ final Transaction oldTransaction) {
+ final long jtxn_handle = beginTransaction_withOld(nativeHandle_,
+ writeOptions.nativeHandle_, oldTransaction.nativeHandle_);
+
+ // RocksJava relies on the assumption that
+ // we do not allocate a new Transaction object
+ // when providing an old_txn
+ assert(jtxn_handle == oldTransaction.nativeHandle_);
+
+ return oldTransaction;
+ }
+
+ @Override
+ public Transaction beginTransaction(final WriteOptions writeOptions,
+ final OptimisticTransactionOptions optimisticTransactionOptions,
+ final Transaction oldTransaction) {
+ final long jtxn_handle = beginTransaction_withOld(nativeHandle_,
+ writeOptions.nativeHandle_, optimisticTransactionOptions.nativeHandle_,
+ oldTransaction.nativeHandle_);
+
+ // RocksJava relies on the assumption that
+ // we do not allocate a new Transaction object
+ // when providing an old_txn
+ assert(jtxn_handle == oldTransaction.nativeHandle_);
+
+ return oldTransaction;
+ }
+
+ /**
+ * Get the underlying database that was opened.
+ *
+ * @return The underlying database that was opened.
+ */
+ public RocksDB getBaseDB() {
+ final RocksDB db = new RocksDB(getBaseDB(nativeHandle_));
+ db.disOwnNativeHandle();
+ return db;
+ }
+
+ @Override protected final native void disposeInternal(final long handle);
+
+ protected static native long open(final long optionsHandle,
+ final String path) throws RocksDBException;
+ protected static native long[] open(final long handle, final String path,
+ final byte[][] columnFamilyNames, final long[] columnFamilyOptions);
+ private native static void closeDatabase(final long handle)
+ throws RocksDBException;
+ private native long beginTransaction(final long handle,
+ final long writeOptionsHandle);
+ private native long beginTransaction(final long handle,
+ final long writeOptionsHandle,
+ final long optimisticTransactionOptionsHandle);
+ private native long beginTransaction_withOld(final long handle,
+ final long writeOptionsHandle, final long oldTransactionHandle);
+ private native long beginTransaction_withOld(final long handle,
+ final long writeOptionsHandle,
+ final long optimisticTransactionOptionsHandle,
+ final long oldTransactionHandle);
+ private native long getBaseDB(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java
new file mode 100644
index 000000000..250edf806
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java
@@ -0,0 +1,53 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+public class OptimisticTransactionOptions extends RocksObject
+ implements TransactionalOptions<OptimisticTransactionOptions> {
+
+ public OptimisticTransactionOptions() {
+ super(newOptimisticTransactionOptions());
+ }
+
+ @Override
+ public boolean isSetSnapshot() {
+ assert(isOwningHandle());
+ return isSetSnapshot(nativeHandle_);
+ }
+
+ @Override
+ public OptimisticTransactionOptions setSetSnapshot(
+ final boolean setSnapshot) {
+ assert(isOwningHandle());
+ setSetSnapshot(nativeHandle_, setSnapshot);
+ return this;
+ }
+
+ /**
+ * Should be set if the DB has a non-default comparator.
+ * See comment in
+ * {@link WriteBatchWithIndex#WriteBatchWithIndex(AbstractComparator, int, boolean)}
+ * constructor.
+ *
+ * @param comparator The comparator to use for the transaction.
+ *
+ * @return this OptimisticTransactionOptions instance
+ */
+ public OptimisticTransactionOptions setComparator(
+ final AbstractComparator comparator) {
+ assert(isOwningHandle());
+ setComparator(nativeHandle_, comparator.nativeHandle_);
+ return this;
+ }
+
+ private native static long newOptimisticTransactionOptions();
+ private native boolean isSetSnapshot(final long handle);
+ private native void setSetSnapshot(final long handle,
+ final boolean setSnapshot);
+ private native void setComparator(final long handle,
+ final long comparatorHandle);
+ @Override protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Options.java b/src/rocksdb/java/src/main/java/org/rocksdb/Options.java
new file mode 100644
index 000000000..8a534f503
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/Options.java
@@ -0,0 +1,2183 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Options to control the behavior of a database. It will be used
+ * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
+ *
+ * If {@link #dispose()} function is not called, then it will be GC'd
+ * automatically and native resources will be released as part of the process.
+ */
+public class Options extends RocksObject
+ implements DBOptionsInterface<Options>,
+ MutableDBOptionsInterface<Options>,
+ ColumnFamilyOptionsInterface<Options>,
+ MutableColumnFamilyOptionsInterface<Options> {
+ static {
+ RocksDB.loadLibrary();
+ }
+
+ /**
+ * Construct options for opening a RocksDB.
+ *
+ * This constructor will create (by allocating a block of memory)
+ * an {@code rocksdb::Options} in the c++ side.
+ */
+ public Options() {
+ super(newOptions());
+ env_ = Env.getDefault();
+ }
+
+ /**
+ * Construct options for opening a RocksDB. Reusing database options
+ * and column family options.
+ *
+ * @param dbOptions {@link org.rocksdb.DBOptions} instance
+ * @param columnFamilyOptions {@link org.rocksdb.ColumnFamilyOptions}
+ * instance
+ */
+ public Options(final DBOptions dbOptions,
+ final ColumnFamilyOptions columnFamilyOptions) {
+ super(newOptions(dbOptions.nativeHandle_,
+ columnFamilyOptions.nativeHandle_));
+ env_ = Env.getDefault();
+ }
+
+ /**
+ * Copy constructor for ColumnFamilyOptions.
+ *
+ * NOTE: This does a shallow copy, which means comparator, merge_operator
+ * and other pointers will be cloned!
+ *
+ * @param other The Options to copy.
+ */
+ public Options(Options other) {
+ super(copyOptions(other.nativeHandle_));
+ this.env_ = other.env_;
+ this.memTableConfig_ = other.memTableConfig_;
+ this.tableFormatConfig_ = other.tableFormatConfig_;
+ this.rateLimiter_ = other.rateLimiter_;
+ this.comparator_ = other.comparator_;
+ this.compactionFilter_ = other.compactionFilter_;
+ this.compactionFilterFactory_ = other.compactionFilterFactory_;
+ this.compactionOptionsUniversal_ = other.compactionOptionsUniversal_;
+ this.compactionOptionsFIFO_ = other.compactionOptionsFIFO_;
+ this.compressionOptions_ = other.compressionOptions_;
+ this.rowCache_ = other.rowCache_;
+ this.writeBufferManager_ = other.writeBufferManager_;
+ }
+
+ @Override
+ public Options setIncreaseParallelism(final int totalThreads) {
+ assert(isOwningHandle());
+ setIncreaseParallelism(nativeHandle_, totalThreads);
+ return this;
+ }
+
+ @Override
+ public Options setCreateIfMissing(final boolean flag) {
+ assert(isOwningHandle());
+ setCreateIfMissing(nativeHandle_, flag);
+ return this;
+ }
+
+ @Override
+ public Options setCreateMissingColumnFamilies(final boolean flag) {
+ assert(isOwningHandle());
+ setCreateMissingColumnFamilies(nativeHandle_, flag);
+ return this;
+ }
+
+ @Override
+ public Options setEnv(final Env env) {
+ assert(isOwningHandle());
+ setEnv(nativeHandle_, env.nativeHandle_);
+ env_ = env;
+ return this;
+ }
+
+ @Override
+ public Env getEnv() {
+ return env_;
+ }
+
+ /**
+ * <p>Set appropriate parameters for bulk loading.
+ * The reason that this is a function that returns "this" instead of a
+ * constructor is to enable chaining of multiple similar calls in the future.
+ * </p>
+ *
+ * <p>All data will be in level 0 without any automatic compaction.
+ * It's recommended to manually call CompactRange(NULL, NULL) before reading
+ * from the database, because otherwise the read can be very slow.</p>
+ *
+ * @return the instance of the current Options.
+ */
+ public Options prepareForBulkLoad() {
+ prepareForBulkLoad(nativeHandle_);
+ return this;
+ }
+
+ @Override
+ public boolean createIfMissing() {
+ assert(isOwningHandle());
+ return createIfMissing(nativeHandle_);
+ }
+
+ @Override
+ public boolean createMissingColumnFamilies() {
+ assert(isOwningHandle());
+ return createMissingColumnFamilies(nativeHandle_);
+ }
+
+ @Override
+ public Options optimizeForSmallDb() {
+ optimizeForSmallDb(nativeHandle_);
+ return this;
+ }
+
+ @Override
+ public Options optimizeForPointLookup(
+ long blockCacheSizeMb) {
+ optimizeForPointLookup(nativeHandle_,
+ blockCacheSizeMb);
+ return this;
+ }
+
+ @Override
+ public Options optimizeLevelStyleCompaction() {
+ optimizeLevelStyleCompaction(nativeHandle_,
+ DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET);
+ return this;
+ }
+
+ @Override
+ public Options optimizeLevelStyleCompaction(
+ long memtableMemoryBudget) {
+ optimizeLevelStyleCompaction(nativeHandle_,
+ memtableMemoryBudget);
+ return this;
+ }
+
+ @Override
+ public Options optimizeUniversalStyleCompaction() {
+ optimizeUniversalStyleCompaction(nativeHandle_,
+ DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET);
+ return this;
+ }
+
+ @Override
+ public Options optimizeUniversalStyleCompaction(
+ final long memtableMemoryBudget) {
+ optimizeUniversalStyleCompaction(nativeHandle_,
+ memtableMemoryBudget);
+ return this;
+ }
+
+ @Override
+ public Options setComparator(final BuiltinComparator builtinComparator) {
+ assert(isOwningHandle());
+ setComparatorHandle(nativeHandle_, builtinComparator.ordinal());
+ return this;
+ }
+
+ @Override
+ public Options setComparator(
+ final AbstractComparator comparator) {
+ assert(isOwningHandle());
+ setComparatorHandle(nativeHandle_, comparator.nativeHandle_,
+ comparator.getComparatorType().getValue());
+ comparator_ = comparator;
+ return this;
+ }
+
+ @Override
+ public Options setMergeOperatorName(final String name) {
+ assert(isOwningHandle());
+ if (name == null) {
+ throw new IllegalArgumentException(
+ "Merge operator name must not be null.");
+ }
+ setMergeOperatorName(nativeHandle_, name);
+ return this;
+ }
+
+ @Override
+ public Options setMergeOperator(final MergeOperator mergeOperator) {
+ setMergeOperator(nativeHandle_, mergeOperator.nativeHandle_);
+ return this;
+ }
+
+ @Override
+ public Options setCompactionFilter(
+ final AbstractCompactionFilter<? extends AbstractSlice<?>>
+ compactionFilter) {
+ setCompactionFilterHandle(nativeHandle_, compactionFilter.nativeHandle_);
+ compactionFilter_ = compactionFilter;
+ return this;
+ }
+
+ @Override
+ public AbstractCompactionFilter<? extends AbstractSlice<?>> compactionFilter() {
+ assert (isOwningHandle());
+ return compactionFilter_;
+ }
+
+ @Override
+ public Options setCompactionFilterFactory(final AbstractCompactionFilterFactory<? extends AbstractCompactionFilter<?>> compactionFilterFactory) {
+ assert (isOwningHandle());
+ setCompactionFilterFactoryHandle(nativeHandle_, compactionFilterFactory.nativeHandle_);
+ compactionFilterFactory_ = compactionFilterFactory;
+ return this;
+ }
+
+ @Override
+ public AbstractCompactionFilterFactory<? extends AbstractCompactionFilter<?>> compactionFilterFactory() {
+ assert (isOwningHandle());
+ return compactionFilterFactory_;
+ }
+
+ @Override
+ public Options setWriteBufferSize(final long writeBufferSize) {
+ assert(isOwningHandle());
+ setWriteBufferSize(nativeHandle_, writeBufferSize);
+ return this;
+ }
+
+ @Override
+ public long writeBufferSize() {
+ assert(isOwningHandle());
+ return writeBufferSize(nativeHandle_);
+ }
+
+ @Override
+ public Options setMaxWriteBufferNumber(final int maxWriteBufferNumber) {
+ assert(isOwningHandle());
+ setMaxWriteBufferNumber(nativeHandle_, maxWriteBufferNumber);
+ return this;
+ }
+
+ @Override
+ public int maxWriteBufferNumber() {
+ assert(isOwningHandle());
+ return maxWriteBufferNumber(nativeHandle_);
+ }
+
+ @Override
+ public boolean errorIfExists() {
+ assert(isOwningHandle());
+ return errorIfExists(nativeHandle_);
+ }
+
+ @Override
+ public Options setErrorIfExists(final boolean errorIfExists) {
+ assert(isOwningHandle());
+ setErrorIfExists(nativeHandle_, errorIfExists);
+ return this;
+ }
+
+ @Override
+ public boolean paranoidChecks() {
+ assert(isOwningHandle());
+ return paranoidChecks(nativeHandle_);
+ }
+
+ @Override
+ public Options setParanoidChecks(final boolean paranoidChecks) {
+ assert(isOwningHandle());
+ setParanoidChecks(nativeHandle_, paranoidChecks);
+ return this;
+ }
+
+ @Override
+ public int maxOpenFiles() {
+ assert(isOwningHandle());
+ return maxOpenFiles(nativeHandle_);
+ }
+
+ @Override
+ public Options setMaxFileOpeningThreads(final int maxFileOpeningThreads) {
+ assert(isOwningHandle());
+ setMaxFileOpeningThreads(nativeHandle_, maxFileOpeningThreads);
+ return this;
+ }
+
+ @Override
+ public int maxFileOpeningThreads() {
+ assert(isOwningHandle());
+ return maxFileOpeningThreads(nativeHandle_);
+ }
+
+ @Override
+ public Options setMaxTotalWalSize(final long maxTotalWalSize) {
+ assert(isOwningHandle());
+ setMaxTotalWalSize(nativeHandle_, maxTotalWalSize);
+ return this;
+ }
+
+ @Override
+ public long maxTotalWalSize() {
+ assert(isOwningHandle());
+ return maxTotalWalSize(nativeHandle_);
+ }
+
+ @Override
+ public Options setMaxOpenFiles(final int maxOpenFiles) {
+ assert(isOwningHandle());
+ setMaxOpenFiles(nativeHandle_, maxOpenFiles);
+ return this;
+ }
+
+ @Override
+ public boolean useFsync() {
+ assert(isOwningHandle());
+ return useFsync(nativeHandle_);
+ }
+
+ @Override
+ public Options setUseFsync(final boolean useFsync) {
+ assert(isOwningHandle());
+ setUseFsync(nativeHandle_, useFsync);
+ return this;
+ }
+
+ @Override
+ public Options setDbPaths(final Collection<DbPath> dbPaths) {
+ assert(isOwningHandle());
+
+ final int len = dbPaths.size();
+ final String paths[] = new String[len];
+ final long targetSizes[] = new long[len];
+
+ int i = 0;
+ for(final DbPath dbPath : dbPaths) {
+ paths[i] = dbPath.path.toString();
+ targetSizes[i] = dbPath.targetSize;
+ i++;
+ }
+ setDbPaths(nativeHandle_, paths, targetSizes);
+ return this;
+ }
+
+ @Override
+ public List<DbPath> dbPaths() {
+ final int len = (int)dbPathsLen(nativeHandle_);
+ if(len == 0) {
+ return Collections.emptyList();
+ } else {
+ final String paths[] = new String[len];
+ final long targetSizes[] = new long[len];
+
+ dbPaths(nativeHandle_, paths, targetSizes);
+
+ final List<DbPath> dbPaths = new ArrayList<>();
+ for(int i = 0; i < len; i++) {
+ dbPaths.add(new DbPath(Paths.get(paths[i]), targetSizes[i]));
+ }
+ return dbPaths;
+ }
+ }
+
+ @Override
+ public String dbLogDir() {
+ assert(isOwningHandle());
+ return dbLogDir(nativeHandle_);
+ }
+
+ @Override
+ public Options setDbLogDir(final String dbLogDir) {
+ assert(isOwningHandle());
+ setDbLogDir(nativeHandle_, dbLogDir);
+ return this;
+ }
+
+ @Override
+ public String walDir() {
+ assert(isOwningHandle());
+ return walDir(nativeHandle_);
+ }
+
+ @Override
+ public Options setWalDir(final String walDir) {
+ assert(isOwningHandle());
+ setWalDir(nativeHandle_, walDir);
+ return this;
+ }
+
+ @Override
+ public long deleteObsoleteFilesPeriodMicros() {
+ assert(isOwningHandle());
+ return deleteObsoleteFilesPeriodMicros(nativeHandle_);
+ }
+
+ @Override
+ public Options setDeleteObsoleteFilesPeriodMicros(
+ final long micros) {
+ assert(isOwningHandle());
+ setDeleteObsoleteFilesPeriodMicros(nativeHandle_, micros);
+ return this;
+ }
+
+ @Override
+ @Deprecated
+ public int maxBackgroundCompactions() {
+ assert(isOwningHandle());
+ return maxBackgroundCompactions(nativeHandle_);
+ }
+
+ @Override
+ public Options setStatistics(final Statistics statistics) {
+ assert(isOwningHandle());
+ setStatistics(nativeHandle_, statistics.nativeHandle_);
+ return this;
+ }
+
+ @Override
+ public Statistics statistics() {
+ assert(isOwningHandle());
+ final long statisticsNativeHandle = statistics(nativeHandle_);
+ if(statisticsNativeHandle == 0) {
+ return null;
+ } else {
+ return new Statistics(statisticsNativeHandle);
+ }
+ }
+
+ @Override
+ @Deprecated
+ public void setBaseBackgroundCompactions(
+ final int baseBackgroundCompactions) {
+ assert(isOwningHandle());
+ setBaseBackgroundCompactions(nativeHandle_, baseBackgroundCompactions);
+ }
+
+ @Override
+ public int baseBackgroundCompactions() {
+ assert(isOwningHandle());
+ return baseBackgroundCompactions(nativeHandle_);
+ }
+
+ @Override
+ @Deprecated
+ public Options setMaxBackgroundCompactions(
+ final int maxBackgroundCompactions) {
+ assert(isOwningHandle());
+ setMaxBackgroundCompactions(nativeHandle_, maxBackgroundCompactions);
+ return this;
+ }
+
+ @Override
+ public Options setMaxSubcompactions(final int maxSubcompactions) {
+ assert(isOwningHandle());
+ setMaxSubcompactions(nativeHandle_, maxSubcompactions);
+ return this;
+ }
+
+ @Override
+ public int maxSubcompactions() {
+ assert(isOwningHandle());
+ return maxSubcompactions(nativeHandle_);
+ }
+
+ @Override
+ @Deprecated
+ public int maxBackgroundFlushes() {
+ assert(isOwningHandle());
+ return maxBackgroundFlushes(nativeHandle_);
+ }
+
+ @Override
+ @Deprecated
+ public Options setMaxBackgroundFlushes(
+ final int maxBackgroundFlushes) {
+ assert(isOwningHandle());
+ setMaxBackgroundFlushes(nativeHandle_, maxBackgroundFlushes);
+ return this;
+ }
+
+ @Override
+ public int maxBackgroundJobs() {
+ assert(isOwningHandle());
+ return maxBackgroundJobs(nativeHandle_);
+ }
+
+ @Override
+ public Options setMaxBackgroundJobs(final int maxBackgroundJobs) {
+ assert(isOwningHandle());
+ setMaxBackgroundJobs(nativeHandle_, maxBackgroundJobs);
+ return this;
+ }
+
+ @Override
+ public long maxLogFileSize() {
+ assert(isOwningHandle());
+ return maxLogFileSize(nativeHandle_);
+ }
+
+ @Override
+ public Options setMaxLogFileSize(final long maxLogFileSize) {
+ assert(isOwningHandle());
+ setMaxLogFileSize(nativeHandle_, maxLogFileSize);
+ return this;
+ }
+
+ @Override
+ public long logFileTimeToRoll() {
+ assert(isOwningHandle());
+ return logFileTimeToRoll(nativeHandle_);
+ }
+
+ @Override
+ public Options setLogFileTimeToRoll(final long logFileTimeToRoll) {
+ assert(isOwningHandle());
+ setLogFileTimeToRoll(nativeHandle_, logFileTimeToRoll);
+ return this;
+ }
+
+ @Override
+ public long keepLogFileNum() {
+ assert(isOwningHandle());
+ return keepLogFileNum(nativeHandle_);
+ }
+
+ @Override
+ public Options setKeepLogFileNum(final long keepLogFileNum) {
+ assert(isOwningHandle());
+ setKeepLogFileNum(nativeHandle_, keepLogFileNum);
+ return this;
+ }
+
+
+ @Override
+ public Options setRecycleLogFileNum(final long recycleLogFileNum) {
+ assert(isOwningHandle());
+ setRecycleLogFileNum(nativeHandle_, recycleLogFileNum);
+ return this;
+ }
+
+ @Override
+ public long recycleLogFileNum() {
+ assert(isOwningHandle());
+ return recycleLogFileNum(nativeHandle_);
+ }
+
+ @Override
+ public long maxManifestFileSize() {
+ assert(isOwningHandle());
+ return maxManifestFileSize(nativeHandle_);
+ }
+
+ @Override
+ public Options setMaxManifestFileSize(
+ final long maxManifestFileSize) {
+ assert(isOwningHandle());
+ setMaxManifestFileSize(nativeHandle_, maxManifestFileSize);
+ return this;
+ }
+
+ @Override
+ public Options setMaxTableFilesSizeFIFO(
+ final long maxTableFilesSize) {
+ assert(maxTableFilesSize > 0); // unsigned native type
+ assert(isOwningHandle());
+ setMaxTableFilesSizeFIFO(nativeHandle_, maxTableFilesSize);
+ return this;
+ }
+
+ @Override
+ public long maxTableFilesSizeFIFO() {
+ return maxTableFilesSizeFIFO(nativeHandle_);
+ }
+
+ @Override
+ public int tableCacheNumshardbits() {
+ assert(isOwningHandle());
+ return tableCacheNumshardbits(nativeHandle_);
+ }
+
+ @Override
+ public Options setTableCacheNumshardbits(
+ final int tableCacheNumshardbits) {
+ assert(isOwningHandle());
+ setTableCacheNumshardbits(nativeHandle_, tableCacheNumshardbits);
+ return this;
+ }
+
+ @Override
+ public long walTtlSeconds() {
+ assert(isOwningHandle());
+ return walTtlSeconds(nativeHandle_);
+ }
+
+ @Override
+ public Options setWalTtlSeconds(final long walTtlSeconds) {
+ assert(isOwningHandle());
+ setWalTtlSeconds(nativeHandle_, walTtlSeconds);
+ return this;
+ }
+
+ @Override
+ public long walSizeLimitMB() {
+ assert(isOwningHandle());
+ return walSizeLimitMB(nativeHandle_);
+ }
+
+ @Override
+ public Options setWalSizeLimitMB(final long sizeLimitMB) {
+ assert(isOwningHandle());
+ setWalSizeLimitMB(nativeHandle_, sizeLimitMB);
+ return this;
+ }
+
+ @Override
+ public long manifestPreallocationSize() {
+ assert(isOwningHandle());
+ return manifestPreallocationSize(nativeHandle_);
+ }
+
+ @Override
+ public Options setManifestPreallocationSize(final long size) {
+ assert(isOwningHandle());
+ setManifestPreallocationSize(nativeHandle_, size);
+ return this;
+ }
+
+ @Override
+ public Options setUseDirectReads(final boolean useDirectReads) {
+ assert(isOwningHandle());
+ setUseDirectReads(nativeHandle_, useDirectReads);
+ return this;
+ }
+
+ @Override
+ public boolean useDirectReads() {
+ assert(isOwningHandle());
+ return useDirectReads(nativeHandle_);
+ }
+
+ @Override
+ public Options setUseDirectIoForFlushAndCompaction(
+ final boolean useDirectIoForFlushAndCompaction) {
+ assert(isOwningHandle());
+ setUseDirectIoForFlushAndCompaction(nativeHandle_, useDirectIoForFlushAndCompaction);
+ return this;
+ }
+
+ @Override
+ public boolean useDirectIoForFlushAndCompaction() {
+ assert(isOwningHandle());
+ return useDirectIoForFlushAndCompaction(nativeHandle_);
+ }
+
+ @Override
+ public Options setAllowFAllocate(final boolean allowFAllocate) {
+ assert(isOwningHandle());
+ setAllowFAllocate(nativeHandle_, allowFAllocate);
+ return this;
+ }
+
+ @Override
+ public boolean allowFAllocate() {
+ assert(isOwningHandle());
+ return allowFAllocate(nativeHandle_);
+ }
+
+ @Override
+ public boolean allowMmapReads() {
+ assert(isOwningHandle());
+ return allowMmapReads(nativeHandle_);
+ }
+
+ @Override
+ public Options setAllowMmapReads(final boolean allowMmapReads) {
+ assert(isOwningHandle());
+ setAllowMmapReads(nativeHandle_, allowMmapReads);
+ return this;
+ }
+
+ @Override
+ public boolean allowMmapWrites() {
+ assert(isOwningHandle());
+ return allowMmapWrites(nativeHandle_);
+ }
+
+ @Override
+ public Options setAllowMmapWrites(final boolean allowMmapWrites) {
+ assert(isOwningHandle());
+ setAllowMmapWrites(nativeHandle_, allowMmapWrites);
+ return this;
+ }
+
+ @Override
+ public boolean isFdCloseOnExec() {
+ assert(isOwningHandle());
+ return isFdCloseOnExec(nativeHandle_);
+ }
+
+ @Override
+ public Options setIsFdCloseOnExec(final boolean isFdCloseOnExec) {
+ assert(isOwningHandle());
+ setIsFdCloseOnExec(nativeHandle_, isFdCloseOnExec);
+ return this;
+ }
+
+ @Override
+ public int statsDumpPeriodSec() {
+ assert(isOwningHandle());
+ return statsDumpPeriodSec(nativeHandle_);
+ }
+
+ @Override
+ public Options setStatsDumpPeriodSec(final int statsDumpPeriodSec) {
+ assert(isOwningHandle());
+ setStatsDumpPeriodSec(nativeHandle_, statsDumpPeriodSec);
+ return this;
+ }
+
+ @Override
+ public Options setStatsPersistPeriodSec(
+ final int statsPersistPeriodSec) {
+ assert(isOwningHandle());
+ setStatsPersistPeriodSec(nativeHandle_, statsPersistPeriodSec);
+ return this;
+ }
+
+ @Override
+ public int statsPersistPeriodSec() {
+ assert(isOwningHandle());
+ return statsPersistPeriodSec(nativeHandle_);
+ }
+
+ @Override
+ public Options setStatsHistoryBufferSize(
+ final long statsHistoryBufferSize) {
+ assert(isOwningHandle());
+ setStatsHistoryBufferSize(nativeHandle_, statsHistoryBufferSize);
+ return this;
+ }
+
+ @Override
+ public long statsHistoryBufferSize() {
+ assert(isOwningHandle());
+ return statsHistoryBufferSize(nativeHandle_);
+ }
+
+ @Override
+ public boolean adviseRandomOnOpen() {
+ return adviseRandomOnOpen(nativeHandle_);
+ }
+
+ @Override
+ public Options setAdviseRandomOnOpen(final boolean adviseRandomOnOpen) {
+ assert(isOwningHandle());
+ setAdviseRandomOnOpen(nativeHandle_, adviseRandomOnOpen);
+ return this;
+ }
+
+ @Override
+ public Options setDbWriteBufferSize(final long dbWriteBufferSize) {
+ assert(isOwningHandle());
+ setDbWriteBufferSize(nativeHandle_, dbWriteBufferSize);
+ return this;
+ }
+
+ @Override
+ public Options setWriteBufferManager(final WriteBufferManager writeBufferManager) {
+ assert(isOwningHandle());
+ setWriteBufferManager(nativeHandle_, writeBufferManager.nativeHandle_);
+ this.writeBufferManager_ = writeBufferManager;
+ return this;
+ }
+
+ @Override
+ public WriteBufferManager writeBufferManager() {
+ assert(isOwningHandle());
+ return this.writeBufferManager_;
+ }
+
+ @Override
+ public long dbWriteBufferSize() {
+ assert(isOwningHandle());
+ return dbWriteBufferSize(nativeHandle_);
+ }
+
+ @Override
+ public Options setAccessHintOnCompactionStart(final AccessHint accessHint) {
+ assert(isOwningHandle());
+ setAccessHintOnCompactionStart(nativeHandle_, accessHint.getValue());
+ return this;
+ }
+
+ @Override
+ public AccessHint accessHintOnCompactionStart() {
+ assert(isOwningHandle());
+ return AccessHint.getAccessHint(accessHintOnCompactionStart(nativeHandle_));
+ }
+
+ @Override
+ public Options setNewTableReaderForCompactionInputs(
+ final boolean newTableReaderForCompactionInputs) {
+ assert(isOwningHandle());
+ setNewTableReaderForCompactionInputs(nativeHandle_,
+ newTableReaderForCompactionInputs);
+ return this;
+ }
+
+ @Override
+ public boolean newTableReaderForCompactionInputs() {
+ assert(isOwningHandle());
+ return newTableReaderForCompactionInputs(nativeHandle_);
+ }
+
+ @Override
+ public Options setCompactionReadaheadSize(final long compactionReadaheadSize) {
+ assert(isOwningHandle());
+ setCompactionReadaheadSize(nativeHandle_, compactionReadaheadSize);
+ return this;
+ }
+
+ @Override
+ public long compactionReadaheadSize() {
+ assert(isOwningHandle());
+ return compactionReadaheadSize(nativeHandle_);
+ }
+
+ @Override
+ public Options setRandomAccessMaxBufferSize(final long randomAccessMaxBufferSize) {
+ assert(isOwningHandle());
+ setRandomAccessMaxBufferSize(nativeHandle_, randomAccessMaxBufferSize);
+ return this;
+ }
+
+ @Override
+ public long randomAccessMaxBufferSize() {
+ assert(isOwningHandle());
+ return randomAccessMaxBufferSize(nativeHandle_);
+ }
+
+ @Override
+ public Options setWritableFileMaxBufferSize(final long writableFileMaxBufferSize) {
+ assert(isOwningHandle());
+ setWritableFileMaxBufferSize(nativeHandle_, writableFileMaxBufferSize);
+ return this;
+ }
+
+ @Override
+ public long writableFileMaxBufferSize() {
+ assert(isOwningHandle());
+ return writableFileMaxBufferSize(nativeHandle_);
+ }
+
+ @Override
+ public boolean useAdaptiveMutex() {
+ assert(isOwningHandle());
+ return useAdaptiveMutex(nativeHandle_);
+ }
+
+ @Override
+ public Options setUseAdaptiveMutex(final boolean useAdaptiveMutex) {
+ assert(isOwningHandle());
+ setUseAdaptiveMutex(nativeHandle_, useAdaptiveMutex);
+ return this;
+ }
+
+ @Override
+ public long bytesPerSync() {
+ return bytesPerSync(nativeHandle_);
+ }
+
+ @Override
+ public Options setBytesPerSync(final long bytesPerSync) {
+ assert(isOwningHandle());
+ setBytesPerSync(nativeHandle_, bytesPerSync);
+ return this;
+ }
+
+ @Override
+ public Options setWalBytesPerSync(final long walBytesPerSync) {
+ assert(isOwningHandle());
+ setWalBytesPerSync(nativeHandle_, walBytesPerSync);
+ return this;
+ }
+
+ @Override
+ public long walBytesPerSync() {
+ assert(isOwningHandle());
+ return walBytesPerSync(nativeHandle_);
+ }
+
+ @Override
+ public Options setStrictBytesPerSync(final boolean strictBytesPerSync) {
+ assert(isOwningHandle());
+ setStrictBytesPerSync(nativeHandle_, strictBytesPerSync);
+ return this;
+ }
+
+ @Override
+ public boolean strictBytesPerSync() {
+ assert(isOwningHandle());
+ return strictBytesPerSync(nativeHandle_);
+ }
+
+ @Override
+ public Options setEnableThreadTracking(final boolean enableThreadTracking) {
+ assert(isOwningHandle());
+ setEnableThreadTracking(nativeHandle_, enableThreadTracking);
+ return this;
+ }
+
+ @Override
+ public boolean enableThreadTracking() {
+ assert(isOwningHandle());
+ return enableThreadTracking(nativeHandle_);
+ }
+
+ @Override
+ public Options setDelayedWriteRate(final long delayedWriteRate) {
+ assert(isOwningHandle());
+ setDelayedWriteRate(nativeHandle_, delayedWriteRate);
+ return this;
+ }
+
+ @Override
+ public long delayedWriteRate(){
+ return delayedWriteRate(nativeHandle_);
+ }
+
+ @Override
+ public Options setEnablePipelinedWrite(final boolean enablePipelinedWrite) {
+ setEnablePipelinedWrite(nativeHandle_, enablePipelinedWrite);
+ return this;
+ }
+
+ @Override
+ public boolean enablePipelinedWrite() {
+ return enablePipelinedWrite(nativeHandle_);
+ }
+
+ @Override
+ public Options setUnorderedWrite(final boolean unorderedWrite) {
+ setUnorderedWrite(nativeHandle_, unorderedWrite);
+ return this;
+ }
+
+ @Override
+ public boolean unorderedWrite() {
+ return unorderedWrite(nativeHandle_);
+ }
+
+ @Override
+ public Options setAllowConcurrentMemtableWrite(
+ final boolean allowConcurrentMemtableWrite) {
+ setAllowConcurrentMemtableWrite(nativeHandle_,
+ allowConcurrentMemtableWrite);
+ return this;
+ }
+
+ @Override
+ public boolean allowConcurrentMemtableWrite() {
+ return allowConcurrentMemtableWrite(nativeHandle_);
+ }
+
+ @Override
+ public Options setEnableWriteThreadAdaptiveYield(
+ final boolean enableWriteThreadAdaptiveYield) {
+ setEnableWriteThreadAdaptiveYield(nativeHandle_,
+ enableWriteThreadAdaptiveYield);
+ return this;
+ }
+
+ @Override
+ public boolean enableWriteThreadAdaptiveYield() {
+ return enableWriteThreadAdaptiveYield(nativeHandle_);
+ }
+
+ @Override
+ public Options setWriteThreadMaxYieldUsec(final long writeThreadMaxYieldUsec) {
+ setWriteThreadMaxYieldUsec(nativeHandle_, writeThreadMaxYieldUsec);
+ return this;
+ }
+
+ @Override
+ public long writeThreadMaxYieldUsec() {
+ return writeThreadMaxYieldUsec(nativeHandle_);
+ }
+
+ @Override
+ public Options setWriteThreadSlowYieldUsec(final long writeThreadSlowYieldUsec) {
+ setWriteThreadSlowYieldUsec(nativeHandle_, writeThreadSlowYieldUsec);
+ return this;
+ }
+
+ @Override
+ public long writeThreadSlowYieldUsec() {
+ return writeThreadSlowYieldUsec(nativeHandle_);
+ }
+
+ @Override
+ public Options setSkipStatsUpdateOnDbOpen(final boolean skipStatsUpdateOnDbOpen) {
+ assert(isOwningHandle());
+ setSkipStatsUpdateOnDbOpen(nativeHandle_, skipStatsUpdateOnDbOpen);
+ return this;
+ }
+
+ @Override
+ public boolean skipStatsUpdateOnDbOpen() {
+ assert(isOwningHandle());
+ return skipStatsUpdateOnDbOpen(nativeHandle_);
+ }
+
+ @Override
+ public Options setWalRecoveryMode(final WALRecoveryMode walRecoveryMode) {
+ assert(isOwningHandle());
+ setWalRecoveryMode(nativeHandle_, walRecoveryMode.getValue());
+ return this;
+ }
+
+ @Override
+ public WALRecoveryMode walRecoveryMode() {
+ assert(isOwningHandle());
+ return WALRecoveryMode.getWALRecoveryMode(walRecoveryMode(nativeHandle_));
+ }
+
+ @Override
+ public Options setAllow2pc(final boolean allow2pc) {
+ assert(isOwningHandle());
+ setAllow2pc(nativeHandle_, allow2pc);
+ return this;
+ }
+
+ @Override
+ public boolean allow2pc() {
+ assert(isOwningHandle());
+ return allow2pc(nativeHandle_);
+ }
+
+ @Override
+ public Options setRowCache(final Cache rowCache) {
+ assert(isOwningHandle());
+ setRowCache(nativeHandle_, rowCache.nativeHandle_);
+ this.rowCache_ = rowCache;
+ return this;
+ }
+
+ @Override
+ public Cache rowCache() {
+ assert(isOwningHandle());
+ return this.rowCache_;
+ }
+
+ @Override
+ public Options setWalFilter(final AbstractWalFilter walFilter) {
+ assert(isOwningHandle());
+ setWalFilter(nativeHandle_, walFilter.nativeHandle_);
+ this.walFilter_ = walFilter;
+ return this;
+ }
+
+ @Override
+ public WalFilter walFilter() {
+ assert(isOwningHandle());
+ return this.walFilter_;
+ }
+
+ @Override
+ public Options setFailIfOptionsFileError(final boolean failIfOptionsFileError) {
+ assert(isOwningHandle());
+ setFailIfOptionsFileError(nativeHandle_, failIfOptionsFileError);
+ return this;
+ }
+
+ @Override
+ public boolean failIfOptionsFileError() {
+ assert(isOwningHandle());
+ return failIfOptionsFileError(nativeHandle_);
+ }
+
+ @Override
+ public Options setDumpMallocStats(final boolean dumpMallocStats) {
+ assert(isOwningHandle());
+ setDumpMallocStats(nativeHandle_, dumpMallocStats);
+ return this;
+ }
+
+ @Override
+ public boolean dumpMallocStats() {
+ assert(isOwningHandle());
+ return dumpMallocStats(nativeHandle_);
+ }
+
+ @Override
+ public Options setAvoidFlushDuringRecovery(final boolean avoidFlushDuringRecovery) {
+ assert(isOwningHandle());
+ setAvoidFlushDuringRecovery(nativeHandle_, avoidFlushDuringRecovery);
+ return this;
+ }
+
+ @Override
+ public boolean avoidFlushDuringRecovery() {
+ assert(isOwningHandle());
+ return avoidFlushDuringRecovery(nativeHandle_);
+ }
+
+ @Override
+ public Options setAvoidFlushDuringShutdown(final boolean avoidFlushDuringShutdown) {
+ assert(isOwningHandle());
+ setAvoidFlushDuringShutdown(nativeHandle_, avoidFlushDuringShutdown);
+ return this;
+ }
+
+ @Override
+ public boolean avoidFlushDuringShutdown() {
+ assert(isOwningHandle());
+ return avoidFlushDuringShutdown(nativeHandle_);
+ }
+
+ @Override
+ public Options setAllowIngestBehind(final boolean allowIngestBehind) {
+ assert(isOwningHandle());
+ setAllowIngestBehind(nativeHandle_, allowIngestBehind);
+ return this;
+ }
+
+ @Override
+ public boolean allowIngestBehind() {
+ assert(isOwningHandle());
+ return allowIngestBehind(nativeHandle_);
+ }
+
+ @Override
+ public Options setPreserveDeletes(final boolean preserveDeletes) {
+ assert(isOwningHandle());
+ setPreserveDeletes(nativeHandle_, preserveDeletes);
+ return this;
+ }
+
+ @Override
+ public boolean preserveDeletes() {
+ assert(isOwningHandle());
+ return preserveDeletes(nativeHandle_);
+ }
+
+ @Override
+ public Options setTwoWriteQueues(final boolean twoWriteQueues) {
+ assert(isOwningHandle());
+ setTwoWriteQueues(nativeHandle_, twoWriteQueues);
+ return this;
+ }
+
+ @Override
+ public boolean twoWriteQueues() {
+ assert(isOwningHandle());
+ return twoWriteQueues(nativeHandle_);
+ }
+
+ @Override
+ public Options setManualWalFlush(final boolean manualWalFlush) {
+ assert(isOwningHandle());
+ setManualWalFlush(nativeHandle_, manualWalFlush);
+ return this;
+ }
+
+ @Override
+ public boolean manualWalFlush() {
+ assert(isOwningHandle());
+ return manualWalFlush(nativeHandle_);
+ }
+
+ @Override
+ public MemTableConfig memTableConfig() {
+ return this.memTableConfig_;
+ }
+
+ @Override
+ public Options setMemTableConfig(final MemTableConfig config) {
+ memTableConfig_ = config;
+ setMemTableFactory(nativeHandle_, config.newMemTableFactoryHandle());
+ return this;
+ }
+
+ @Override
+ public Options setRateLimiter(final RateLimiter rateLimiter) {
+ assert(isOwningHandle());
+ rateLimiter_ = rateLimiter;
+ setRateLimiter(nativeHandle_, rateLimiter.nativeHandle_);
+ return this;
+ }
+
+ @Override
+ public Options setSstFileManager(final SstFileManager sstFileManager) {
+ assert(isOwningHandle());
+ setSstFileManager(nativeHandle_, sstFileManager.nativeHandle_);
+ return this;
+ }
+
+ @Override
+ public Options setLogger(final Logger logger) {
+ assert(isOwningHandle());
+ setLogger(nativeHandle_, logger.nativeHandle_);
+ return this;
+ }
+
+ @Override
+ public Options setInfoLogLevel(final InfoLogLevel infoLogLevel) {
+ assert(isOwningHandle());
+ setInfoLogLevel(nativeHandle_, infoLogLevel.getValue());
+ return this;
+ }
+
+ @Override
+ public InfoLogLevel infoLogLevel() {
+ assert(isOwningHandle());
+ return InfoLogLevel.getInfoLogLevel(
+ infoLogLevel(nativeHandle_));
+ }
+
+ @Override
+ public String memTableFactoryName() {
+ assert(isOwningHandle());
+ return memTableFactoryName(nativeHandle_);
+ }
+
+ @Override
+ public TableFormatConfig tableFormatConfig() {
+ return this.tableFormatConfig_;
+ }
+
+ @Override
+ public Options setTableFormatConfig(final TableFormatConfig config) {
+ tableFormatConfig_ = config;
+ setTableFactory(nativeHandle_, config.newTableFactoryHandle());
+ return this;
+ }
+
+ @Override
+ public String tableFactoryName() {
+ assert(isOwningHandle());
+ return tableFactoryName(nativeHandle_);
+ }
+
+ @Override
+ public Options useFixedLengthPrefixExtractor(final int n) {
+ assert(isOwningHandle());
+ useFixedLengthPrefixExtractor(nativeHandle_, n);
+ return this;
+ }
+
+ @Override
+ public Options useCappedPrefixExtractor(final int n) {
+ assert(isOwningHandle());
+ useCappedPrefixExtractor(nativeHandle_, n);
+ return this;
+ }
+
+ @Override
+ public CompressionType compressionType() {
+ return CompressionType.getCompressionType(compressionType(nativeHandle_));
+ }
+
+ @Override
+ public Options setCompressionPerLevel(
+ final List<CompressionType> compressionLevels) {
+ final byte[] byteCompressionTypes = new byte[
+ compressionLevels.size()];
+ for (int i = 0; i < compressionLevels.size(); i++) {
+ byteCompressionTypes[i] = compressionLevels.get(i).getValue();
+ }
+ setCompressionPerLevel(nativeHandle_, byteCompressionTypes);
+ return this;
+ }
+
+ @Override
+ public List<CompressionType> compressionPerLevel() {
+ final byte[] byteCompressionTypes =
+ compressionPerLevel(nativeHandle_);
+ final List<CompressionType> compressionLevels = new ArrayList<>();
+ for (final Byte byteCompressionType : byteCompressionTypes) {
+ compressionLevels.add(CompressionType.getCompressionType(
+ byteCompressionType));
+ }
+ return compressionLevels;
+ }
+
+ @Override
+ public Options setCompressionType(CompressionType compressionType) {
+ setCompressionType(nativeHandle_, compressionType.getValue());
+ return this;
+ }
+
+
+ @Override
+ public Options setBottommostCompressionType(
+ final CompressionType bottommostCompressionType) {
+ setBottommostCompressionType(nativeHandle_,
+ bottommostCompressionType.getValue());
+ return this;
+ }
+
+ @Override
+ public CompressionType bottommostCompressionType() {
+ return CompressionType.getCompressionType(
+ bottommostCompressionType(nativeHandle_));
+ }
+
+ @Override
+ public Options setBottommostCompressionOptions(
+ final CompressionOptions bottommostCompressionOptions) {
+ setBottommostCompressionOptions(nativeHandle_,
+ bottommostCompressionOptions.nativeHandle_);
+ this.bottommostCompressionOptions_ = bottommostCompressionOptions;
+ return this;
+ }
+
+ @Override
+ public CompressionOptions bottommostCompressionOptions() {
+ return this.bottommostCompressionOptions_;
+ }
+
+ @Override
+ public Options setCompressionOptions(
+ final CompressionOptions compressionOptions) {
+ setCompressionOptions(nativeHandle_, compressionOptions.nativeHandle_);
+ this.compressionOptions_ = compressionOptions;
+ return this;
+ }
+
+ @Override
+ public CompressionOptions compressionOptions() {
+ return this.compressionOptions_;
+ }
+
+ @Override
+ public CompactionStyle compactionStyle() {
+ return CompactionStyle.fromValue(compactionStyle(nativeHandle_));
+ }
+
+ @Override
+ public Options setCompactionStyle(
+ final CompactionStyle compactionStyle) {
+ setCompactionStyle(nativeHandle_, compactionStyle.getValue());
+ return this;
+ }
+
+ @Override
+ public int numLevels() {
+ return numLevels(nativeHandle_);
+ }
+
+ @Override
+ public Options setNumLevels(int numLevels) {
+ setNumLevels(nativeHandle_, numLevels);
+ return this;
+ }
+
+ @Override
+ public int levelZeroFileNumCompactionTrigger() {
+ return levelZeroFileNumCompactionTrigger(nativeHandle_);
+ }
+
+ @Override
+ public Options setLevelZeroFileNumCompactionTrigger(
+ final int numFiles) {
+ setLevelZeroFileNumCompactionTrigger(
+ nativeHandle_, numFiles);
+ return this;
+ }
+
+ @Override
+ public int levelZeroSlowdownWritesTrigger() {
+ return levelZeroSlowdownWritesTrigger(nativeHandle_);
+ }
+
+ @Override
+ public Options setLevelZeroSlowdownWritesTrigger(
+ final int numFiles) {
+ setLevelZeroSlowdownWritesTrigger(nativeHandle_, numFiles);
+ return this;
+ }
+
+ @Override
+ public int levelZeroStopWritesTrigger() {
+ return levelZeroStopWritesTrigger(nativeHandle_);
+ }
+
+ @Override
+ public Options setLevelZeroStopWritesTrigger(
+ final int numFiles) {
+ setLevelZeroStopWritesTrigger(nativeHandle_, numFiles);
+ return this;
+ }
+
+ @Override
+ public long targetFileSizeBase() {
+ return targetFileSizeBase(nativeHandle_);
+ }
+
+ @Override
+ public Options setTargetFileSizeBase(long targetFileSizeBase) {
+ setTargetFileSizeBase(nativeHandle_, targetFileSizeBase);
+ return this;
+ }
+
+ @Override
+ public int targetFileSizeMultiplier() {
+ return targetFileSizeMultiplier(nativeHandle_);
+ }
+
+ @Override
+ public Options setTargetFileSizeMultiplier(int multiplier) {
+ setTargetFileSizeMultiplier(nativeHandle_, multiplier);
+ return this;
+ }
+
+ @Override
+ public Options setMaxBytesForLevelBase(final long maxBytesForLevelBase) {
+ setMaxBytesForLevelBase(nativeHandle_, maxBytesForLevelBase);
+ return this;
+ }
+
+ @Override
+ public long maxBytesForLevelBase() {
+ return maxBytesForLevelBase(nativeHandle_);
+ }
+
+ @Override
+ public Options setLevelCompactionDynamicLevelBytes(
+ final boolean enableLevelCompactionDynamicLevelBytes) {
+ setLevelCompactionDynamicLevelBytes(nativeHandle_,
+ enableLevelCompactionDynamicLevelBytes);
+ return this;
+ }
+
+ @Override
+ public boolean levelCompactionDynamicLevelBytes() {
+ return levelCompactionDynamicLevelBytes(nativeHandle_);
+ }
+
+ @Override
+ public double maxBytesForLevelMultiplier() {
+ return maxBytesForLevelMultiplier(nativeHandle_);
+ }
+
+ @Override
+ public Options setMaxBytesForLevelMultiplier(final double multiplier) {
+ setMaxBytesForLevelMultiplier(nativeHandle_, multiplier);
+ return this;
+ }
+
+ @Override
+ public long maxCompactionBytes() {
+ return maxCompactionBytes(nativeHandle_);
+ }
+
+ @Override
+ public Options setMaxCompactionBytes(final long maxCompactionBytes) {
+ setMaxCompactionBytes(nativeHandle_, maxCompactionBytes);
+ return this;
+ }
+
+ @Override
+ public long arenaBlockSize() {
+ return arenaBlockSize(nativeHandle_);
+ }
+
+ @Override
+ public Options setArenaBlockSize(final long arenaBlockSize) {
+ setArenaBlockSize(nativeHandle_, arenaBlockSize);
+ return this;
+ }
+
+ @Override
+ public boolean disableAutoCompactions() {
+ return disableAutoCompactions(nativeHandle_);
+ }
+
+ @Override
+ public Options setDisableAutoCompactions(
+ final boolean disableAutoCompactions) {
+ setDisableAutoCompactions(nativeHandle_, disableAutoCompactions);
+ return this;
+ }
+
+ @Override
+ public long maxSequentialSkipInIterations() {
+ return maxSequentialSkipInIterations(nativeHandle_);
+ }
+
+ @Override
+ public Options setMaxSequentialSkipInIterations(
+ final long maxSequentialSkipInIterations) {
+ setMaxSequentialSkipInIterations(nativeHandle_,
+ maxSequentialSkipInIterations);
+ return this;
+ }
+
+ @Override
+ public boolean inplaceUpdateSupport() {
+ return inplaceUpdateSupport(nativeHandle_);
+ }
+
+ @Override
+ public Options setInplaceUpdateSupport(
+ final boolean inplaceUpdateSupport) {
+ setInplaceUpdateSupport(nativeHandle_, inplaceUpdateSupport);
+ return this;
+ }
+
+ @Override
+ public long inplaceUpdateNumLocks() {
+ return inplaceUpdateNumLocks(nativeHandle_);
+ }
+
+ @Override
+ public Options setInplaceUpdateNumLocks(
+ final long inplaceUpdateNumLocks) {
+ setInplaceUpdateNumLocks(nativeHandle_, inplaceUpdateNumLocks);
+ return this;
+ }
+
+ @Override
+ public double memtablePrefixBloomSizeRatio() {
+ return memtablePrefixBloomSizeRatio(nativeHandle_);
+ }
+
+ @Override
+ public Options setMemtablePrefixBloomSizeRatio(final double memtablePrefixBloomSizeRatio) {
+ setMemtablePrefixBloomSizeRatio(nativeHandle_, memtablePrefixBloomSizeRatio);
+ return this;
+ }
+
+ @Override
+ public int bloomLocality() {
+ return bloomLocality(nativeHandle_);
+ }
+
+ @Override
+ public Options setBloomLocality(final int bloomLocality) {
+ setBloomLocality(nativeHandle_, bloomLocality);
+ return this;
+ }
+
+ @Override
+ public long maxSuccessiveMerges() {
+ return maxSuccessiveMerges(nativeHandle_);
+ }
+
+ @Override
+ public Options setMaxSuccessiveMerges(long maxSuccessiveMerges) {
+ setMaxSuccessiveMerges(nativeHandle_, maxSuccessiveMerges);
+ return this;
+ }
+
+ @Override
+ public int minWriteBufferNumberToMerge() {
+ return minWriteBufferNumberToMerge(nativeHandle_);
+ }
+
+ @Override
+ public Options setMinWriteBufferNumberToMerge(
+ final int minWriteBufferNumberToMerge) {
+ setMinWriteBufferNumberToMerge(nativeHandle_, minWriteBufferNumberToMerge);
+ return this;
+ }
+
+ @Override
+ public Options setOptimizeFiltersForHits(
+ final boolean optimizeFiltersForHits) {
+ setOptimizeFiltersForHits(nativeHandle_, optimizeFiltersForHits);
+ return this;
+ }
+
+ @Override
+ public boolean optimizeFiltersForHits() {
+ return optimizeFiltersForHits(nativeHandle_);
+ }
+
+ @Override
+ public Options
+ setMemtableHugePageSize(
+ long memtableHugePageSize) {
+ setMemtableHugePageSize(nativeHandle_,
+ memtableHugePageSize);
+ return this;
+ }
+
+ @Override
+ public long memtableHugePageSize() {
+ return memtableHugePageSize(nativeHandle_);
+ }
+
+ @Override
+ public Options setSoftPendingCompactionBytesLimit(long softPendingCompactionBytesLimit) {
+ setSoftPendingCompactionBytesLimit(nativeHandle_,
+ softPendingCompactionBytesLimit);
+ return this;
+ }
+
+ @Override
+ public long softPendingCompactionBytesLimit() {
+ return softPendingCompactionBytesLimit(nativeHandle_);
+ }
+
+ @Override
+ public Options setHardPendingCompactionBytesLimit(long hardPendingCompactionBytesLimit) {
+ setHardPendingCompactionBytesLimit(nativeHandle_, hardPendingCompactionBytesLimit);
+ return this;
+ }
+
+ @Override
+ public long hardPendingCompactionBytesLimit() {
+ return hardPendingCompactionBytesLimit(nativeHandle_);
+ }
+
+ @Override
+ public Options setLevel0FileNumCompactionTrigger(int level0FileNumCompactionTrigger) {
+ setLevel0FileNumCompactionTrigger(nativeHandle_, level0FileNumCompactionTrigger);
+ return this;
+ }
+
+ @Override
+ public int level0FileNumCompactionTrigger() {
+ return level0FileNumCompactionTrigger(nativeHandle_);
+ }
+
+ @Override
+ public Options setLevel0SlowdownWritesTrigger(int level0SlowdownWritesTrigger) {
+ setLevel0SlowdownWritesTrigger(nativeHandle_, level0SlowdownWritesTrigger);
+ return this;
+ }
+
+ @Override
+ public int level0SlowdownWritesTrigger() {
+ return level0SlowdownWritesTrigger(nativeHandle_);
+ }
+
+ @Override
+ public Options setLevel0StopWritesTrigger(int level0StopWritesTrigger) {
+ setLevel0StopWritesTrigger(nativeHandle_, level0StopWritesTrigger);
+ return this;
+ }
+
+ @Override
+ public int level0StopWritesTrigger() {
+ return level0StopWritesTrigger(nativeHandle_);
+ }
+
+ @Override
+ public Options setMaxBytesForLevelMultiplierAdditional(int[] maxBytesForLevelMultiplierAdditional) {
+ setMaxBytesForLevelMultiplierAdditional(nativeHandle_, maxBytesForLevelMultiplierAdditional);
+ return this;
+ }
+
+ @Override
+ public int[] maxBytesForLevelMultiplierAdditional() {
+ return maxBytesForLevelMultiplierAdditional(nativeHandle_);
+ }
+
+ @Override
+ public Options setParanoidFileChecks(boolean paranoidFileChecks) {
+ setParanoidFileChecks(nativeHandle_, paranoidFileChecks);
+ return this;
+ }
+
+ @Override
+ public boolean paranoidFileChecks() {
+ return paranoidFileChecks(nativeHandle_);
+ }
+
+ @Override
+ public Options setMaxWriteBufferNumberToMaintain(
+ final int maxWriteBufferNumberToMaintain) {
+ setMaxWriteBufferNumberToMaintain(
+ nativeHandle_, maxWriteBufferNumberToMaintain);
+ return this;
+ }
+
+ @Override
+ public int maxWriteBufferNumberToMaintain() {
+ return maxWriteBufferNumberToMaintain(nativeHandle_);
+ }
+
+ @Override
+ public Options setCompactionPriority(
+ final CompactionPriority compactionPriority) {
+ setCompactionPriority(nativeHandle_, compactionPriority.getValue());
+ return this;
+ }
+
+ @Override
+ public CompactionPriority compactionPriority() {
+ return CompactionPriority.getCompactionPriority(
+ compactionPriority(nativeHandle_));
+ }
+
+ @Override
+ public Options setReportBgIoStats(final boolean reportBgIoStats) {
+ setReportBgIoStats(nativeHandle_, reportBgIoStats);
+ return this;
+ }
+
+ @Override
+ public boolean reportBgIoStats() {
+ return reportBgIoStats(nativeHandle_);
+ }
+
+ @Override
+ public Options setTtl(final long ttl) {
+ setTtl(nativeHandle_, ttl);
+ return this;
+ }
+
+ @Override
+ public long ttl() {
+ return ttl(nativeHandle_);
+ }
+
+ @Override
+ public Options setCompactionOptionsUniversal(
+ final CompactionOptionsUniversal compactionOptionsUniversal) {
+ setCompactionOptionsUniversal(nativeHandle_,
+ compactionOptionsUniversal.nativeHandle_);
+ this.compactionOptionsUniversal_ = compactionOptionsUniversal;
+ return this;
+ }
+
+ @Override
+ public CompactionOptionsUniversal compactionOptionsUniversal() {
+ return this.compactionOptionsUniversal_;
+ }
+
+ @Override
+ public Options setCompactionOptionsFIFO(final CompactionOptionsFIFO compactionOptionsFIFO) {
+ setCompactionOptionsFIFO(nativeHandle_,
+ compactionOptionsFIFO.nativeHandle_);
+ this.compactionOptionsFIFO_ = compactionOptionsFIFO;
+ return this;
+ }
+
+ @Override
+ public CompactionOptionsFIFO compactionOptionsFIFO() {
+ return this.compactionOptionsFIFO_;
+ }
+
+ @Override
+ public Options setForceConsistencyChecks(final boolean forceConsistencyChecks) {
+ setForceConsistencyChecks(nativeHandle_, forceConsistencyChecks);
+ return this;
+ }
+
+ @Override
+ public boolean forceConsistencyChecks() {
+ return forceConsistencyChecks(nativeHandle_);
+ }
+
+ @Override
+ public Options setAtomicFlush(final boolean atomicFlush) {
+ setAtomicFlush(nativeHandle_, atomicFlush);
+ return this;
+ }
+
+ @Override
+ public boolean atomicFlush() {
+ return atomicFlush(nativeHandle_);
+ }
+
+ private native static long newOptions();
+ private native static long newOptions(long dbOptHandle,
+ long cfOptHandle);
+ private native static long copyOptions(long handle);
+ @Override protected final native void disposeInternal(final long handle);
+ private native void setEnv(long optHandle, long envHandle);
+ private native void prepareForBulkLoad(long handle);
+
+ // DB native handles
+ private native void setIncreaseParallelism(long handle, int totalThreads);
+ private native void setCreateIfMissing(long handle, boolean flag);
+ private native boolean createIfMissing(long handle);
+ private native void setCreateMissingColumnFamilies(
+ long handle, boolean flag);
+ private native boolean createMissingColumnFamilies(long handle);
+ private native void setErrorIfExists(long handle, boolean errorIfExists);
+ private native boolean errorIfExists(long handle);
+ private native void setParanoidChecks(
+ long handle, boolean paranoidChecks);
+ private native boolean paranoidChecks(long handle);
+ private native void setRateLimiter(long handle,
+ long rateLimiterHandle);
+ private native void setSstFileManager(final long handle,
+ final long sstFileManagerHandle);
+ private native void setLogger(long handle,
+ long loggerHandle);
+ private native void setInfoLogLevel(long handle, byte logLevel);
+ private native byte infoLogLevel(long handle);
+ private native void setMaxOpenFiles(long handle, int maxOpenFiles);
+ private native int maxOpenFiles(long handle);
+ private native void setMaxTotalWalSize(long handle,
+ long maxTotalWalSize);
+ private native void setMaxFileOpeningThreads(final long handle,
+ final int maxFileOpeningThreads);
+ private native int maxFileOpeningThreads(final long handle);
+ private native long maxTotalWalSize(long handle);
+ private native void setStatistics(final long handle, final long statisticsHandle);
+ private native long statistics(final long handle);
+ private native boolean useFsync(long handle);
+ private native void setUseFsync(long handle, boolean useFsync);
+ private native void setDbPaths(final long handle, final String[] paths,
+ final long[] targetSizes);
+ private native long dbPathsLen(final long handle);
+ private native void dbPaths(final long handle, final String[] paths,
+ final long[] targetSizes);
+ private native void setDbLogDir(long handle, String dbLogDir);
+ private native String dbLogDir(long handle);
+ private native void setWalDir(long handle, String walDir);
+ private native String walDir(long handle);
+ private native void setDeleteObsoleteFilesPeriodMicros(
+ long handle, long micros);
+ private native long deleteObsoleteFilesPeriodMicros(long handle);
+ private native void setBaseBackgroundCompactions(long handle,
+ int baseBackgroundCompactions);
+ private native int baseBackgroundCompactions(long handle);
+ private native void setMaxBackgroundCompactions(
+ long handle, int maxBackgroundCompactions);
+ private native int maxBackgroundCompactions(long handle);
+ private native void setMaxSubcompactions(long handle, int maxSubcompactions);
+ private native int maxSubcompactions(long handle);
+ private native void setMaxBackgroundFlushes(
+ long handle, int maxBackgroundFlushes);
+ private native int maxBackgroundFlushes(long handle);
+ private native void setMaxBackgroundJobs(long handle, int maxMaxBackgroundJobs);
+ private native int maxBackgroundJobs(long handle);
+ private native void setMaxLogFileSize(long handle, long maxLogFileSize)
+ throws IllegalArgumentException;
+ private native long maxLogFileSize(long handle);
+ private native void setLogFileTimeToRoll(
+ long handle, long logFileTimeToRoll) throws IllegalArgumentException;
+ private native long logFileTimeToRoll(long handle);
+ private native void setKeepLogFileNum(long handle, long keepLogFileNum)
+ throws IllegalArgumentException;
+ private native long keepLogFileNum(long handle);
+ private native void setRecycleLogFileNum(long handle, long recycleLogFileNum);
+ private native long recycleLogFileNum(long handle);
+ private native void setMaxManifestFileSize(
+ long handle, long maxManifestFileSize);
+ private native long maxManifestFileSize(long handle);
+ private native void setMaxTableFilesSizeFIFO(
+ long handle, long maxTableFilesSize);
+ private native long maxTableFilesSizeFIFO(long handle);
+ private native void setTableCacheNumshardbits(
+ long handle, int tableCacheNumshardbits);
+ private native int tableCacheNumshardbits(long handle);
+ private native void setWalTtlSeconds(long handle, long walTtlSeconds);
+ private native long walTtlSeconds(long handle);
+ private native void setWalSizeLimitMB(long handle, long sizeLimitMB);
+ private native long walSizeLimitMB(long handle);
+ private native void setManifestPreallocationSize(
+ long handle, long size) throws IllegalArgumentException;
+ private native long manifestPreallocationSize(long handle);
+ private native void setUseDirectReads(long handle, boolean useDirectReads);
+ private native boolean useDirectReads(long handle);
+ private native void setUseDirectIoForFlushAndCompaction(
+ long handle, boolean useDirectIoForFlushAndCompaction);
+ private native boolean useDirectIoForFlushAndCompaction(long handle);
+ private native void setAllowFAllocate(final long handle,
+ final boolean allowFAllocate);
+ private native boolean allowFAllocate(final long handle);
+ private native void setAllowMmapReads(
+ long handle, boolean allowMmapReads);
+ private native boolean allowMmapReads(long handle);
+ private native void setAllowMmapWrites(
+ long handle, boolean allowMmapWrites);
+ private native boolean allowMmapWrites(long handle);
+ private native void setIsFdCloseOnExec(
+ long handle, boolean isFdCloseOnExec);
+ private native boolean isFdCloseOnExec(long handle);
+ private native void setStatsDumpPeriodSec(
+ long handle, int statsDumpPeriodSec);
+ private native int statsDumpPeriodSec(long handle);
+ private native void setStatsPersistPeriodSec(
+ final long handle, final int statsPersistPeriodSec);
+ private native int statsPersistPeriodSec(
+ final long handle);
+ private native void setStatsHistoryBufferSize(
+ final long handle, final long statsHistoryBufferSize);
+ private native long statsHistoryBufferSize(
+ final long handle);
+ private native void setAdviseRandomOnOpen(
+ long handle, boolean adviseRandomOnOpen);
+ private native boolean adviseRandomOnOpen(long handle);
+ private native void setDbWriteBufferSize(final long handle,
+ final long dbWriteBufferSize);
+ private native void setWriteBufferManager(final long handle,
+ final long writeBufferManagerHandle);
+ private native long dbWriteBufferSize(final long handle);
+ private native void setAccessHintOnCompactionStart(final long handle,
+ final byte accessHintOnCompactionStart);
+ private native byte accessHintOnCompactionStart(final long handle);
+ private native void setNewTableReaderForCompactionInputs(final long handle,
+ final boolean newTableReaderForCompactionInputs);
+ private native boolean newTableReaderForCompactionInputs(final long handle);
+ private native void setCompactionReadaheadSize(final long handle,
+ final long compactionReadaheadSize);
+ private native long compactionReadaheadSize(final long handle);
+ private native void setRandomAccessMaxBufferSize(final long handle,
+ final long randomAccessMaxBufferSize);
+ private native long randomAccessMaxBufferSize(final long handle);
+ private native void setWritableFileMaxBufferSize(final long handle,
+ final long writableFileMaxBufferSize);
+ private native long writableFileMaxBufferSize(final long handle);
+ private native void setUseAdaptiveMutex(
+ long handle, boolean useAdaptiveMutex);
+ private native boolean useAdaptiveMutex(long handle);
+ private native void setBytesPerSync(
+ long handle, long bytesPerSync);
+ private native long bytesPerSync(long handle);
+ private native void setWalBytesPerSync(long handle, long walBytesPerSync);
+ private native long walBytesPerSync(long handle);
+ private native void setStrictBytesPerSync(
+ final long handle, final boolean strictBytesPerSync);
+ private native boolean strictBytesPerSync(
+ final long handle);
+ private native void setEnableThreadTracking(long handle,
+ boolean enableThreadTracking);
+ private native boolean enableThreadTracking(long handle);
+ private native void setDelayedWriteRate(long handle, long delayedWriteRate);
+ private native long delayedWriteRate(long handle);
+ private native void setEnablePipelinedWrite(final long handle,
+ final boolean pipelinedWrite);
+ private native boolean enablePipelinedWrite(final long handle);
+ private native void setUnorderedWrite(final long handle,
+ final boolean unorderedWrite);
+ private native boolean unorderedWrite(final long handle);
+ private native void setAllowConcurrentMemtableWrite(long handle,
+ boolean allowConcurrentMemtableWrite);
+ private native boolean allowConcurrentMemtableWrite(long handle);
+ private native void setEnableWriteThreadAdaptiveYield(long handle,
+ boolean enableWriteThreadAdaptiveYield);
+ private native boolean enableWriteThreadAdaptiveYield(long handle);
+ private native void setWriteThreadMaxYieldUsec(long handle,
+ long writeThreadMaxYieldUsec);
+ private native long writeThreadMaxYieldUsec(long handle);
+ private native void setWriteThreadSlowYieldUsec(long handle,
+ long writeThreadSlowYieldUsec);
+ private native long writeThreadSlowYieldUsec(long handle);
+ private native void setSkipStatsUpdateOnDbOpen(final long handle,
+ final boolean skipStatsUpdateOnDbOpen);
+ private native boolean skipStatsUpdateOnDbOpen(final long handle);
+ private native void setWalRecoveryMode(final long handle,
+ final byte walRecoveryMode);
+ private native byte walRecoveryMode(final long handle);
+ private native void setAllow2pc(final long handle,
+ final boolean allow2pc);
+ private native boolean allow2pc(final long handle);
+ private native void setRowCache(final long handle,
+ final long rowCacheHandle);
+ private native void setWalFilter(final long handle,
+ final long walFilterHandle);
+ private native void setFailIfOptionsFileError(final long handle,
+ final boolean failIfOptionsFileError);
+ private native boolean failIfOptionsFileError(final long handle);
+ private native void setDumpMallocStats(final long handle,
+ final boolean dumpMallocStats);
+ private native boolean dumpMallocStats(final long handle);
+ private native void setAvoidFlushDuringRecovery(final long handle,
+ final boolean avoidFlushDuringRecovery);
+ private native boolean avoidFlushDuringRecovery(final long handle);
+ private native void setAvoidFlushDuringShutdown(final long handle,
+ final boolean avoidFlushDuringShutdown);
+ private native boolean avoidFlushDuringShutdown(final long handle);
+ private native void setAllowIngestBehind(final long handle,
+ final boolean allowIngestBehind);
+ private native boolean allowIngestBehind(final long handle);
+ private native void setPreserveDeletes(final long handle,
+ final boolean preserveDeletes);
+ private native boolean preserveDeletes(final long handle);
+ private native void setTwoWriteQueues(final long handle,
+ final boolean twoWriteQueues);
+ private native boolean twoWriteQueues(final long handle);
+ private native void setManualWalFlush(final long handle,
+ final boolean manualWalFlush);
+ private native boolean manualWalFlush(final long handle);
+
+
+ // CF native handles
+ private native void optimizeForSmallDb(final long handle);
+ private native void optimizeForPointLookup(long handle,
+ long blockCacheSizeMb);
+ private native void optimizeLevelStyleCompaction(long handle,
+ long memtableMemoryBudget);
+ private native void optimizeUniversalStyleCompaction(long handle,
+ long memtableMemoryBudget);
+ private native void setComparatorHandle(long handle, int builtinComparator);
+ private native void setComparatorHandle(long optHandle,
+ long comparatorHandle, byte comparatorType);
+ private native void setMergeOperatorName(
+ long handle, String name);
+ private native void setMergeOperator(
+ long handle, long mergeOperatorHandle);
+ private native void setCompactionFilterHandle(
+ long handle, long compactionFilterHandle);
+ private native void setCompactionFilterFactoryHandle(
+ long handle, long compactionFilterFactoryHandle);
+ private native void setWriteBufferSize(long handle, long writeBufferSize)
+ throws IllegalArgumentException;
+ private native long writeBufferSize(long handle);
+ private native void setMaxWriteBufferNumber(
+ long handle, int maxWriteBufferNumber);
+ private native int maxWriteBufferNumber(long handle);
+ private native void setMinWriteBufferNumberToMerge(
+ long handle, int minWriteBufferNumberToMerge);
+ private native int minWriteBufferNumberToMerge(long handle);
+ private native void setCompressionType(long handle, byte compressionType);
+ private native byte compressionType(long handle);
+ private native void setCompressionPerLevel(long handle,
+ byte[] compressionLevels);
+ private native byte[] compressionPerLevel(long handle);
+ private native void setBottommostCompressionType(long handle,
+ byte bottommostCompressionType);
+ private native byte bottommostCompressionType(long handle);
+ private native void setBottommostCompressionOptions(final long handle,
+ final long bottommostCompressionOptionsHandle);
+ private native void setCompressionOptions(long handle,
+ long compressionOptionsHandle);
+ private native void useFixedLengthPrefixExtractor(
+ long handle, int prefixLength);
+ private native void useCappedPrefixExtractor(
+ long handle, int prefixLength);
+ private native void setNumLevels(
+ long handle, int numLevels);
+ private native int numLevels(long handle);
+ private native void setLevelZeroFileNumCompactionTrigger(
+ long handle, int numFiles);
+ private native int levelZeroFileNumCompactionTrigger(long handle);
+ private native void setLevelZeroSlowdownWritesTrigger(
+ long handle, int numFiles);
+ private native int levelZeroSlowdownWritesTrigger(long handle);
+ private native void setLevelZeroStopWritesTrigger(
+ long handle, int numFiles);
+ private native int levelZeroStopWritesTrigger(long handle);
+ private native void setTargetFileSizeBase(
+ long handle, long targetFileSizeBase);
+ private native long targetFileSizeBase(long handle);
+ private native void setTargetFileSizeMultiplier(
+ long handle, int multiplier);
+ private native int targetFileSizeMultiplier(long handle);
+ private native void setMaxBytesForLevelBase(
+ long handle, long maxBytesForLevelBase);
+ private native long maxBytesForLevelBase(long handle);
+ private native void setLevelCompactionDynamicLevelBytes(
+ long handle, boolean enableLevelCompactionDynamicLevelBytes);
+ private native boolean levelCompactionDynamicLevelBytes(
+ long handle);
+ private native void setMaxBytesForLevelMultiplier(long handle, double multiplier);
+ private native double maxBytesForLevelMultiplier(long handle);
+ private native void setMaxCompactionBytes(long handle, long maxCompactionBytes);
+ private native long maxCompactionBytes(long handle);
+ private native void setArenaBlockSize(
+ long handle, long arenaBlockSize) throws IllegalArgumentException;
+ private native long arenaBlockSize(long handle);
+ private native void setDisableAutoCompactions(
+ long handle, boolean disableAutoCompactions);
+ private native boolean disableAutoCompactions(long handle);
+ private native void setCompactionStyle(long handle, byte compactionStyle);
+ private native byte compactionStyle(long handle);
+ private native void setMaxSequentialSkipInIterations(
+ long handle, long maxSequentialSkipInIterations);
+ private native long maxSequentialSkipInIterations(long handle);
+ private native void setMemTableFactory(long handle, long factoryHandle);
+ private native String memTableFactoryName(long handle);
+ private native void setTableFactory(long handle, long factoryHandle);
+ private native String tableFactoryName(long handle);
+ private native void setInplaceUpdateSupport(
+ long handle, boolean inplaceUpdateSupport);
+ private native boolean inplaceUpdateSupport(long handle);
+ private native void setInplaceUpdateNumLocks(
+ long handle, long inplaceUpdateNumLocks)
+ throws IllegalArgumentException;
+ private native long inplaceUpdateNumLocks(long handle);
+ private native void setMemtablePrefixBloomSizeRatio(
+ long handle, double memtablePrefixBloomSizeRatio);
+ private native double memtablePrefixBloomSizeRatio(long handle);
+ private native void setBloomLocality(
+ long handle, int bloomLocality);
+ private native int bloomLocality(long handle);
+ private native void setMaxSuccessiveMerges(
+ long handle, long maxSuccessiveMerges)
+ throws IllegalArgumentException;
+ private native long maxSuccessiveMerges(long handle);
+ private native void setOptimizeFiltersForHits(long handle,
+ boolean optimizeFiltersForHits);
+ private native boolean optimizeFiltersForHits(long handle);
+ private native void setMemtableHugePageSize(long handle,
+ long memtableHugePageSize);
+ private native long memtableHugePageSize(long handle);
+ private native void setSoftPendingCompactionBytesLimit(long handle,
+ long softPendingCompactionBytesLimit);
+ private native long softPendingCompactionBytesLimit(long handle);
+ private native void setHardPendingCompactionBytesLimit(long handle,
+ long hardPendingCompactionBytesLimit);
+ private native long hardPendingCompactionBytesLimit(long handle);
+ private native void setLevel0FileNumCompactionTrigger(long handle,
+ int level0FileNumCompactionTrigger);
+ private native int level0FileNumCompactionTrigger(long handle);
+ private native void setLevel0SlowdownWritesTrigger(long handle,
+ int level0SlowdownWritesTrigger);
+ private native int level0SlowdownWritesTrigger(long handle);
+ private native void setLevel0StopWritesTrigger(long handle,
+ int level0StopWritesTrigger);
+ private native int level0StopWritesTrigger(long handle);
+ private native void setMaxBytesForLevelMultiplierAdditional(long handle,
+ int[] maxBytesForLevelMultiplierAdditional);
+ private native int[] maxBytesForLevelMultiplierAdditional(long handle);
+ private native void setParanoidFileChecks(long handle,
+ boolean paranoidFileChecks);
+ private native boolean paranoidFileChecks(long handle);
+ private native void setMaxWriteBufferNumberToMaintain(final long handle,
+ final int maxWriteBufferNumberToMaintain);
+ private native int maxWriteBufferNumberToMaintain(final long handle);
+ private native void setCompactionPriority(final long handle,
+ final byte compactionPriority);
+ private native byte compactionPriority(final long handle);
+ private native void setReportBgIoStats(final long handle,
+ final boolean reportBgIoStats);
+ private native boolean reportBgIoStats(final long handle);
+ private native void setTtl(final long handle, final long ttl);
+ private native long ttl(final long handle);
+ private native void setCompactionOptionsUniversal(final long handle,
+ final long compactionOptionsUniversalHandle);
+ private native void setCompactionOptionsFIFO(final long handle,
+ final long compactionOptionsFIFOHandle);
+ private native void setForceConsistencyChecks(final long handle,
+ final boolean forceConsistencyChecks);
+ private native boolean forceConsistencyChecks(final long handle);
+ private native void setAtomicFlush(final long handle,
+ final boolean atomicFlush);
+ private native boolean atomicFlush(final long handle);
+
+ // instance variables
+ // NOTE: If you add new member variables, please update the copy constructor above!
+ private Env env_;
+ private MemTableConfig memTableConfig_;
+ private TableFormatConfig tableFormatConfig_;
+ private RateLimiter rateLimiter_;
+ private AbstractComparator comparator_;
+ private AbstractCompactionFilter<? extends AbstractSlice<?>> compactionFilter_;
+ private AbstractCompactionFilterFactory<? extends AbstractCompactionFilter<?>>
+ compactionFilterFactory_;
+ private CompactionOptionsUniversal compactionOptionsUniversal_;
+ private CompactionOptionsFIFO compactionOptionsFIFO_;
+ private CompressionOptions bottommostCompressionOptions_;
+ private CompressionOptions compressionOptions_;
+ private Cache rowCache_;
+ private WalFilter walFilter_;
+ private WriteBufferManager writeBufferManager_;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/OptionsUtil.java b/src/rocksdb/java/src/main/java/org/rocksdb/OptionsUtil.java
new file mode 100644
index 000000000..f153556ba
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/OptionsUtil.java
@@ -0,0 +1,142 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class OptionsUtil {
+ /**
+ * A static method to construct the DBOptions and ColumnFamilyDescriptors by
+ * loading the latest RocksDB options file stored in the specified rocksdb
+ * database.
+ *
+ * Note that the all the pointer options (except table_factory, which will
+ * be described in more details below) will be initialized with the default
+ * values. Developers can further initialize them after this function call.
+ * Below is an example list of pointer options which will be initialized.
+ *
+ * - env
+ * - memtable_factory
+ * - compaction_filter_factory
+ * - prefix_extractor
+ * - comparator
+ * - merge_operator
+ * - compaction_filter
+ *
+ * For table_factory, this function further supports deserializing
+ * BlockBasedTableFactory and its BlockBasedTableOptions except the
+ * pointer options of BlockBasedTableOptions (flush_block_policy_factory,
+ * block_cache, and block_cache_compressed), which will be initialized with
+ * default values. Developers can further specify these three options by
+ * casting the return value of TableFactoroy::GetOptions() to
+ * BlockBasedTableOptions and making necessary changes.
+ *
+ * @param dbPath the path to the RocksDB.
+ * @param env {@link org.rocksdb.Env} instance.
+ * @param dbOptions {@link org.rocksdb.DBOptions} instance. This will be
+ * filled and returned.
+ * @param cfDescs A list of {@link org.rocksdb.ColumnFamilyDescriptor}'s be
+ * returned.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+
+ public static void loadLatestOptions(String dbPath, Env env, DBOptions dbOptions,
+ List<ColumnFamilyDescriptor> cfDescs) throws RocksDBException {
+ loadLatestOptions(dbPath, env, dbOptions, cfDescs, false);
+ }
+
+ /**
+ * @param dbPath the path to the RocksDB.
+ * @param env {@link org.rocksdb.Env} instance.
+ * @param dbOptions {@link org.rocksdb.DBOptions} instance. This will be
+ * filled and returned.
+ * @param cfDescs A list of {@link org.rocksdb.ColumnFamilyDescriptor}'s be
+ * returned.
+ * @param ignoreUnknownOptions this flag can be set to true if you want to
+ * ignore options that are from a newer version of the db, esentially for
+ * forward compatibility.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public static void loadLatestOptions(String dbPath, Env env, DBOptions dbOptions,
+ List<ColumnFamilyDescriptor> cfDescs, boolean ignoreUnknownOptions) throws RocksDBException {
+ loadLatestOptions(
+ dbPath, env.nativeHandle_, dbOptions.nativeHandle_, cfDescs, ignoreUnknownOptions);
+ }
+
+ /**
+ * Similar to LoadLatestOptions, this function constructs the DBOptions
+ * and ColumnFamilyDescriptors based on the specified RocksDB Options file.
+ * See LoadLatestOptions above.
+ *
+ * @param optionsFileName the RocksDB options file path.
+ * @param env {@link org.rocksdb.Env} instance.
+ * @param dbOptions {@link org.rocksdb.DBOptions} instance. This will be
+ * filled and returned.
+ * @param cfDescs A list of {@link org.rocksdb.ColumnFamilyDescriptor}'s be
+ * returned.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public static void loadOptionsFromFile(String optionsFileName, Env env, DBOptions dbOptions,
+ List<ColumnFamilyDescriptor> cfDescs) throws RocksDBException {
+ loadOptionsFromFile(optionsFileName, env, dbOptions, cfDescs, false);
+ }
+
+ /**
+ * @param optionsFileName the RocksDB options file path.
+ * @param env {@link org.rocksdb.Env} instance.
+ * @param dbOptions {@link org.rocksdb.DBOptions} instance. This will be
+ * filled and returned.
+ * @param cfDescs A list of {@link org.rocksdb.ColumnFamilyDescriptor}'s be
+ * returned.
+ * @param ignoreUnknownOptions this flag can be set to true if you want to
+ * ignore options that are from a newer version of the db, esentially for
+ * forward compatibility.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public static void loadOptionsFromFile(String optionsFileName, Env env, DBOptions dbOptions,
+ List<ColumnFamilyDescriptor> cfDescs, boolean ignoreUnknownOptions) throws RocksDBException {
+ loadOptionsFromFile(
+ optionsFileName, env.nativeHandle_, dbOptions.nativeHandle_, cfDescs, ignoreUnknownOptions);
+ }
+
+ /**
+ * Returns the latest options file name under the specified RocksDB path.
+ *
+ * @param dbPath the path to the RocksDB.
+ * @param env {@link org.rocksdb.Env} instance.
+ * @return the latest options file name under the db path.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public static String getLatestOptionsFileName(String dbPath, Env env) throws RocksDBException {
+ return getLatestOptionsFileName(dbPath, env.nativeHandle_);
+ }
+
+ /**
+ * Private constructor.
+ * This class has only static methods and shouldn't be instantiated.
+ */
+ private OptionsUtil() {}
+
+ // native methods
+ private native static void loadLatestOptions(String dbPath, long envHandle, long dbOptionsHandle,
+ List<ColumnFamilyDescriptor> cfDescs, boolean ignoreUnknownOptions) throws RocksDBException;
+ private native static void loadOptionsFromFile(String optionsFileName, long envHandle,
+ long dbOptionsHandle, List<ColumnFamilyDescriptor> cfDescs, boolean ignoreUnknownOptions)
+ throws RocksDBException;
+ private native static String getLatestOptionsFileName(String dbPath, long envHandle)
+ throws RocksDBException;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/PersistentCache.java b/src/rocksdb/java/src/main/java/org/rocksdb/PersistentCache.java
new file mode 100644
index 000000000..aed565297
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/PersistentCache.java
@@ -0,0 +1,26 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Persistent cache for caching IO pages on a persistent medium. The
+ * cache is specifically designed for persistent read cache.
+ */
+public class PersistentCache extends RocksObject {
+
+ public PersistentCache(final Env env, final String path, final long size,
+ final Logger logger, final boolean optimizedForNvm)
+ throws RocksDBException {
+ super(newPersistentCache(env.nativeHandle_, path, size,
+ logger.nativeHandle_, optimizedForNvm));
+ }
+
+ private native static long newPersistentCache(final long envHandle,
+ final String path, final long size, final long loggerHandle,
+ final boolean optimizedForNvm) throws RocksDBException;
+
+ @Override protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/PlainTableConfig.java b/src/rocksdb/java/src/main/java/org/rocksdb/PlainTableConfig.java
new file mode 100644
index 000000000..c09998167
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/PlainTableConfig.java
@@ -0,0 +1,251 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+/**
+ * The config for plain table sst format.
+ *
+ * <p>PlainTable is a RocksDB's SST file format optimized for low query
+ * latency on pure-memory or really low-latency media.</p>
+ *
+ * <p>It also support prefix hash feature.</p>
+ */
+public class PlainTableConfig extends TableFormatConfig {
+ public static final int VARIABLE_LENGTH = 0;
+ public static final int DEFAULT_BLOOM_BITS_PER_KEY = 10;
+ public static final double DEFAULT_HASH_TABLE_RATIO = 0.75;
+ public static final int DEFAULT_INDEX_SPARSENESS = 16;
+ public static final int DEFAULT_HUGE_TLB_SIZE = 0;
+ public static final EncodingType DEFAULT_ENCODING_TYPE =
+ EncodingType.kPlain;
+ public static final boolean DEFAULT_FULL_SCAN_MODE = false;
+ public static final boolean DEFAULT_STORE_INDEX_IN_FILE
+ = false;
+
+ public PlainTableConfig() {
+ keySize_ = VARIABLE_LENGTH;
+ bloomBitsPerKey_ = DEFAULT_BLOOM_BITS_PER_KEY;
+ hashTableRatio_ = DEFAULT_HASH_TABLE_RATIO;
+ indexSparseness_ = DEFAULT_INDEX_SPARSENESS;
+ hugePageTlbSize_ = DEFAULT_HUGE_TLB_SIZE;
+ encodingType_ = DEFAULT_ENCODING_TYPE;
+ fullScanMode_ = DEFAULT_FULL_SCAN_MODE;
+ storeIndexInFile_ = DEFAULT_STORE_INDEX_IN_FILE;
+ }
+
+ /**
+ * <p>Set the length of the user key. If it is set to be
+ * VARIABLE_LENGTH, then it indicates the user keys are
+ * of variable length.</p>
+ *
+ * <p>Otherwise,all the keys need to have the same length
+ * in byte.</p>
+ *
+ * <p>DEFAULT: VARIABLE_LENGTH</p>
+ *
+ * @param keySize the length of the user key.
+ * @return the reference to the current config.
+ */
+ public PlainTableConfig setKeySize(int keySize) {
+ keySize_ = keySize;
+ return this;
+ }
+
+ /**
+ * @return the specified size of the user key. If VARIABLE_LENGTH,
+ * then it indicates variable-length key.
+ */
+ public int keySize() {
+ return keySize_;
+ }
+
+ /**
+ * Set the number of bits per key used by the internal bloom filter
+ * in the plain table sst format.
+ *
+ * @param bitsPerKey the number of bits per key for bloom filer.
+ * @return the reference to the current config.
+ */
+ public PlainTableConfig setBloomBitsPerKey(int bitsPerKey) {
+ bloomBitsPerKey_ = bitsPerKey;
+ return this;
+ }
+
+ /**
+ * @return the number of bits per key used for the bloom filter.
+ */
+ public int bloomBitsPerKey() {
+ return bloomBitsPerKey_;
+ }
+
+ /**
+ * hashTableRatio is the desired utilization of the hash table used
+ * for prefix hashing. The ideal ratio would be the number of
+ * prefixes / the number of hash buckets. If this value is set to
+ * zero, then hash table will not be used.
+ *
+ * @param ratio the hash table ratio.
+ * @return the reference to the current config.
+ */
+ public PlainTableConfig setHashTableRatio(double ratio) {
+ hashTableRatio_ = ratio;
+ return this;
+ }
+
+ /**
+ * @return the hash table ratio.
+ */
+ public double hashTableRatio() {
+ return hashTableRatio_;
+ }
+
+ /**
+ * Index sparseness determines the index interval for keys inside the
+ * same prefix. This number is equal to the maximum number of linear
+ * search required after hash and binary search. If it's set to 0,
+ * then each key will be indexed.
+ *
+ * @param sparseness the index sparseness.
+ * @return the reference to the current config.
+ */
+ public PlainTableConfig setIndexSparseness(int sparseness) {
+ indexSparseness_ = sparseness;
+ return this;
+ }
+
+ /**
+ * @return the index sparseness.
+ */
+ public long indexSparseness() {
+ return indexSparseness_;
+ }
+
+ /**
+ * <p>huge_page_tlb_size: if &le;0, allocate hash indexes and blooms
+ * from malloc otherwise from huge page TLB.</p>
+ *
+ * <p>The user needs to reserve huge pages for it to be allocated,
+ * like: {@code sysctl -w vm.nr_hugepages=20}</p>
+ *
+ * <p>See linux doc Documentation/vm/hugetlbpage.txt</p>
+ *
+ * @param hugePageTlbSize huge page tlb size
+ * @return the reference to the current config.
+ */
+ public PlainTableConfig setHugePageTlbSize(int hugePageTlbSize) {
+ this.hugePageTlbSize_ = hugePageTlbSize;
+ return this;
+ }
+
+ /**
+ * Returns the value for huge page tlb size
+ *
+ * @return hugePageTlbSize
+ */
+ public int hugePageTlbSize() {
+ return hugePageTlbSize_;
+ }
+
+ /**
+ * Sets the encoding type.
+ *
+ * <p>This setting determines how to encode
+ * the keys. See enum {@link EncodingType} for
+ * the choices.</p>
+ *
+ * <p>The value will determine how to encode keys
+ * when writing to a new SST file. This value will be stored
+ * inside the SST file which will be used when reading from
+ * the file, which makes it possible for users to choose
+ * different encoding type when reopening a DB. Files with
+ * different encoding types can co-exist in the same DB and
+ * can be read.</p>
+ *
+ * @param encodingType {@link org.rocksdb.EncodingType} value.
+ * @return the reference to the current config.
+ */
+ public PlainTableConfig setEncodingType(EncodingType encodingType) {
+ this.encodingType_ = encodingType;
+ return this;
+ }
+
+ /**
+ * Returns the active EncodingType
+ *
+ * @return currently set encoding type
+ */
+ public EncodingType encodingType() {
+ return encodingType_;
+ }
+
+ /**
+ * Set full scan mode, if true the whole file will be read
+ * one record by one without using the index.
+ *
+ * @param fullScanMode boolean value indicating if full
+ * scan mode shall be enabled.
+ * @return the reference to the current config.
+ */
+ public PlainTableConfig setFullScanMode(boolean fullScanMode) {
+ this.fullScanMode_ = fullScanMode;
+ return this;
+ }
+
+ /**
+ * Return if full scan mode is active
+ * @return boolean value indicating if the full scan mode is
+ * enabled.
+ */
+ public boolean fullScanMode() {
+ return fullScanMode_;
+ }
+
+ /**
+ * <p>If set to true: compute plain table index and bloom
+ * filter during file building and store it in file.
+ * When reading file, index will be mmaped instead
+ * of doing recomputation.</p>
+ *
+ * @param storeIndexInFile value indicating if index shall
+ * be stored in a file
+ * @return the reference to the current config.
+ */
+ public PlainTableConfig setStoreIndexInFile(boolean storeIndexInFile) {
+ this.storeIndexInFile_ = storeIndexInFile;
+ return this;
+ }
+
+ /**
+ * Return a boolean value indicating if index shall be stored
+ * in a file.
+ *
+ * @return currently set value for store index in file.
+ */
+ public boolean storeIndexInFile() {
+ return storeIndexInFile_;
+ }
+
+ @Override protected long newTableFactoryHandle() {
+ return newTableFactoryHandle(keySize_, bloomBitsPerKey_,
+ hashTableRatio_, indexSparseness_, hugePageTlbSize_,
+ encodingType_.getValue(), fullScanMode_,
+ storeIndexInFile_);
+ }
+
+ private native long newTableFactoryHandle(
+ int keySize, int bloomBitsPerKey,
+ double hashTableRatio, int indexSparseness,
+ int hugePageTlbSize, byte encodingType,
+ boolean fullScanMode, boolean storeIndexInFile);
+
+ private int keySize_;
+ private int bloomBitsPerKey_;
+ private double hashTableRatio_;
+ private int indexSparseness_;
+ private int hugePageTlbSize_;
+ private EncodingType encodingType_;
+ private boolean fullScanMode_;
+ private boolean storeIndexInFile_;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Priority.java b/src/rocksdb/java/src/main/java/org/rocksdb/Priority.java
new file mode 100644
index 000000000..34a56edcb
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/Priority.java
@@ -0,0 +1,49 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * The Thread Pool priority.
+ */
+public enum Priority {
+ BOTTOM((byte) 0x0),
+ LOW((byte) 0x1),
+ HIGH((byte)0x2),
+ TOTAL((byte)0x3);
+
+ private final byte value;
+
+ Priority(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * <p>Returns the byte value of the enumerations value.</p>
+ *
+ * @return byte representation
+ */
+ byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get Priority by byte value.
+ *
+ * @param value byte representation of Priority.
+ *
+ * @return {@link org.rocksdb.Priority} instance.
+ * @throws java.lang.IllegalArgumentException if an invalid
+ * value is provided.
+ */
+ static Priority getPriority(final byte value) {
+ for (final Priority priority : Priority.values()) {
+ if (priority.getValue() == value){
+ return priority;
+ }
+ }
+ throw new IllegalArgumentException("Illegal value provided for Priority.");
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Range.java b/src/rocksdb/java/src/main/java/org/rocksdb/Range.java
new file mode 100644
index 000000000..74c85e5f0
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/Range.java
@@ -0,0 +1,19 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Range from start to limit.
+ */
+public class Range {
+ final Slice start;
+ final Slice limit;
+
+ public Range(final Slice start, final Slice limit) {
+ this.start = start;
+ this.limit = limit;
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RateLimiter.java b/src/rocksdb/java/src/main/java/org/rocksdb/RateLimiter.java
new file mode 100644
index 000000000..c2b8a0fd9
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/RateLimiter.java
@@ -0,0 +1,227 @@
+// Copyright (c) 2015, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * RateLimiter, which is used to control write rate of flush and
+ * compaction.
+ *
+ * @since 3.10.0
+ */
+public class RateLimiter extends RocksObject {
+ public static final long DEFAULT_REFILL_PERIOD_MICROS = 100 * 1000;
+ public static final int DEFAULT_FAIRNESS = 10;
+ public static final RateLimiterMode DEFAULT_MODE =
+ RateLimiterMode.WRITES_ONLY;
+ public static final boolean DEFAULT_AUTOTUNE = false;
+
+ /**
+ * RateLimiter constructor
+ *
+ * @param rateBytesPerSecond this is the only parameter you want to set
+ * most of the time. It controls the total write rate of compaction
+ * and flush in bytes per second. Currently, RocksDB does not enforce
+ * rate limit for anything other than flush and compaction, e.g. write to
+ * WAL.
+ */
+ public RateLimiter(final long rateBytesPerSecond) {
+ this(rateBytesPerSecond, DEFAULT_REFILL_PERIOD_MICROS, DEFAULT_FAIRNESS,
+ DEFAULT_MODE, DEFAULT_AUTOTUNE);
+ }
+
+ /**
+ * RateLimiter constructor
+ *
+ * @param rateBytesPerSecond this is the only parameter you want to set
+ * most of the time. It controls the total write rate of compaction
+ * and flush in bytes per second. Currently, RocksDB does not enforce
+ * rate limit for anything other than flush and compaction, e.g. write to
+ * WAL.
+ * @param refillPeriodMicros this controls how often tokens are refilled. For
+ * example,
+ * when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to
+ * 100ms, then 1MB is refilled every 100ms internally. Larger value can
+ * lead to burstier writes while smaller value introduces more CPU
+ * overhead. The default of 100,000ms should work for most cases.
+ */
+ public RateLimiter(final long rateBytesPerSecond,
+ final long refillPeriodMicros) {
+ this(rateBytesPerSecond, refillPeriodMicros, DEFAULT_FAIRNESS, DEFAULT_MODE,
+ DEFAULT_AUTOTUNE);
+ }
+
+ /**
+ * RateLimiter constructor
+ *
+ * @param rateBytesPerSecond this is the only parameter you want to set
+ * most of the time. It controls the total write rate of compaction
+ * and flush in bytes per second. Currently, RocksDB does not enforce
+ * rate limit for anything other than flush and compaction, e.g. write to
+ * WAL.
+ * @param refillPeriodMicros this controls how often tokens are refilled. For
+ * example,
+ * when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to
+ * 100ms, then 1MB is refilled every 100ms internally. Larger value can
+ * lead to burstier writes while smaller value introduces more CPU
+ * overhead. The default of 100,000ms should work for most cases.
+ * @param fairness RateLimiter accepts high-pri requests and low-pri requests.
+ * A low-pri request is usually blocked in favor of hi-pri request.
+ * Currently, RocksDB assigns low-pri to request from compaction and
+ * high-pri to request from flush. Low-pri requests can get blocked if
+ * flush requests come in continuously. This fairness parameter grants
+ * low-pri requests permission by fairness chance even though high-pri
+ * requests exist to avoid starvation.
+ * You should be good by leaving it at default 10.
+ */
+ public RateLimiter(final long rateBytesPerSecond,
+ final long refillPeriodMicros, final int fairness) {
+ this(rateBytesPerSecond, refillPeriodMicros, fairness, DEFAULT_MODE,
+ DEFAULT_AUTOTUNE);
+ }
+
+ /**
+ * RateLimiter constructor
+ *
+ * @param rateBytesPerSecond this is the only parameter you want to set
+ * most of the time. It controls the total write rate of compaction
+ * and flush in bytes per second. Currently, RocksDB does not enforce
+ * rate limit for anything other than flush and compaction, e.g. write to
+ * WAL.
+ * @param refillPeriodMicros this controls how often tokens are refilled. For
+ * example,
+ * when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to
+ * 100ms, then 1MB is refilled every 100ms internally. Larger value can
+ * lead to burstier writes while smaller value introduces more CPU
+ * overhead. The default of 100,000ms should work for most cases.
+ * @param fairness RateLimiter accepts high-pri requests and low-pri requests.
+ * A low-pri request is usually blocked in favor of hi-pri request.
+ * Currently, RocksDB assigns low-pri to request from compaction and
+ * high-pri to request from flush. Low-pri requests can get blocked if
+ * flush requests come in continuously. This fairness parameter grants
+ * low-pri requests permission by fairness chance even though high-pri
+ * requests exist to avoid starvation.
+ * You should be good by leaving it at default 10.
+ * @param rateLimiterMode indicates which types of operations count against
+ * the limit.
+ */
+ public RateLimiter(final long rateBytesPerSecond,
+ final long refillPeriodMicros, final int fairness,
+ final RateLimiterMode rateLimiterMode) {
+ this(rateBytesPerSecond, refillPeriodMicros, fairness, rateLimiterMode,
+ DEFAULT_AUTOTUNE);
+ }
+
+ /**
+ * RateLimiter constructor
+ *
+ * @param rateBytesPerSecond this is the only parameter you want to set
+ * most of the time. It controls the total write rate of compaction
+ * and flush in bytes per second. Currently, RocksDB does not enforce
+ * rate limit for anything other than flush and compaction, e.g. write to
+ * WAL.
+ * @param refillPeriodMicros this controls how often tokens are refilled. For
+ * example,
+ * when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to
+ * 100ms, then 1MB is refilled every 100ms internally. Larger value can
+ * lead to burstier writes while smaller value introduces more CPU
+ * overhead. The default of 100,000ms should work for most cases.
+ * @param fairness RateLimiter accepts high-pri requests and low-pri requests.
+ * A low-pri request is usually blocked in favor of hi-pri request.
+ * Currently, RocksDB assigns low-pri to request from compaction and
+ * high-pri to request from flush. Low-pri requests can get blocked if
+ * flush requests come in continuously. This fairness parameter grants
+ * low-pri requests permission by fairness chance even though high-pri
+ * requests exist to avoid starvation.
+ * You should be good by leaving it at default 10.
+ * @param rateLimiterMode indicates which types of operations count against
+ * the limit.
+ * @param autoTune Enables dynamic adjustment of rate limit within the range
+ * {@code [rate_bytes_per_sec / 20, rate_bytes_per_sec]}, according to
+ * the recent demand for background I/O.
+ */
+ public RateLimiter(final long rateBytesPerSecond,
+ final long refillPeriodMicros, final int fairness,
+ final RateLimiterMode rateLimiterMode, final boolean autoTune) {
+ super(newRateLimiterHandle(rateBytesPerSecond,
+ refillPeriodMicros, fairness, rateLimiterMode.getValue(), autoTune));
+ }
+
+ /**
+ * <p>This API allows user to dynamically change rate limiter's bytes per second.
+ * REQUIRED: bytes_per_second &gt; 0</p>
+ *
+ * @param bytesPerSecond bytes per second.
+ */
+ public void setBytesPerSecond(final long bytesPerSecond) {
+ assert(isOwningHandle());
+ setBytesPerSecond(nativeHandle_, bytesPerSecond);
+ }
+
+ /**
+ * Returns the bytes per second.
+ *
+ * @return bytes per second.
+ */
+ public long getBytesPerSecond() {
+ assert(isOwningHandle());
+ return getBytesPerSecond(nativeHandle_);
+ }
+
+ /**
+ * <p>Request for token to write bytes. If this request can not be satisfied,
+ * the call is blocked. Caller is responsible to make sure
+ * {@code bytes &lt; GetSingleBurstBytes()}.</p>
+ *
+ * @param bytes requested bytes.
+ */
+ public void request(final long bytes) {
+ assert(isOwningHandle());
+ request(nativeHandle_, bytes);
+ }
+
+ /**
+ * <p>Max bytes can be granted in a single burst.</p>
+ *
+ * @return max bytes can be granted in a single burst.
+ */
+ public long getSingleBurstBytes() {
+ assert(isOwningHandle());
+ return getSingleBurstBytes(nativeHandle_);
+ }
+
+ /**
+ * <p>Total bytes that go through rate limiter.</p>
+ *
+ * @return total bytes that go through rate limiter.
+ */
+ public long getTotalBytesThrough() {
+ assert(isOwningHandle());
+ return getTotalBytesThrough(nativeHandle_);
+ }
+
+ /**
+ * <p>Total # of requests that go through rate limiter.</p>
+ *
+ * @return total # of requests that go through rate limiter.
+ */
+ public long getTotalRequests() {
+ assert(isOwningHandle());
+ return getTotalRequests(nativeHandle_);
+ }
+
+ private static native long newRateLimiterHandle(final long rateBytesPerSecond,
+ final long refillPeriodMicros, final int fairness,
+ final byte rateLimiterMode, final boolean autoTune);
+ @Override protected final native void disposeInternal(final long handle);
+
+ private native void setBytesPerSecond(final long handle,
+ final long bytesPerSecond);
+ private native long getBytesPerSecond(final long handle);
+ private native void request(final long handle, final long bytes);
+ private native long getSingleBurstBytes(final long handle);
+ private native long getTotalBytesThrough(final long handle);
+ private native long getTotalRequests(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RateLimiterMode.java b/src/rocksdb/java/src/main/java/org/rocksdb/RateLimiterMode.java
new file mode 100644
index 000000000..4b029d816
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/RateLimiterMode.java
@@ -0,0 +1,52 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Mode for {@link RateLimiter#RateLimiter(long, long, int, RateLimiterMode)}.
+ */
+public enum RateLimiterMode {
+ READS_ONLY((byte)0x0),
+ WRITES_ONLY((byte)0x1),
+ ALL_IO((byte)0x2);
+
+ private final byte value;
+
+ RateLimiterMode(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * <p>Returns the byte value of the enumerations value.</p>
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value;
+ }
+
+ /**
+ * <p>Get the RateLimiterMode enumeration value by
+ * passing the byte identifier to this method.</p>
+ *
+ * @param byteIdentifier of RateLimiterMode.
+ *
+ * @return AccessHint instance.
+ *
+ * @throws IllegalArgumentException if the access hint for the byteIdentifier
+ * cannot be found
+ */
+ public static RateLimiterMode getRateLimiterMode(final byte byteIdentifier) {
+ for (final RateLimiterMode rateLimiterMode : RateLimiterMode.values()) {
+ if (rateLimiterMode.getValue() == byteIdentifier) {
+ return rateLimiterMode;
+ }
+ }
+
+ throw new IllegalArgumentException(
+ "Illegal value provided for RateLimiterMode.");
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ReadOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/ReadOptions.java
new file mode 100644
index 000000000..1f1510568
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/ReadOptions.java
@@ -0,0 +1,622 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * The class that controls the get behavior.
+ *
+ * Note that dispose() must be called before an Options instance
+ * become out-of-scope to release the allocated memory in c++.
+ */
+public class ReadOptions extends RocksObject {
+ public ReadOptions() {
+ super(newReadOptions());
+ }
+
+ /**
+ * @param verifyChecksums verification will be performed on every read
+ * when set to true
+ * @param fillCache if true, then fill-cache behavior will be performed.
+ */
+ public ReadOptions(final boolean verifyChecksums, final boolean fillCache) {
+ super(newReadOptions(verifyChecksums, fillCache));
+ }
+
+ /**
+ * Copy constructor.
+ *
+ * NOTE: This does a shallow copy, which means snapshot, iterate_upper_bound
+ * and other pointers will be cloned!
+ *
+ * @param other The ReadOptions to copy.
+ */
+ public ReadOptions(ReadOptions other) {
+ super(copyReadOptions(other.nativeHandle_));
+ this.iterateLowerBoundSlice_ = other.iterateLowerBoundSlice_;
+ this.iterateUpperBoundSlice_ = other.iterateUpperBoundSlice_;
+ }
+
+ /**
+ * If true, all data read from underlying storage will be
+ * verified against corresponding checksums.
+ * Default: true
+ *
+ * @return true if checksum verification is on.
+ */
+ public boolean verifyChecksums() {
+ assert(isOwningHandle());
+ return verifyChecksums(nativeHandle_);
+ }
+
+ /**
+ * If true, all data read from underlying storage will be
+ * verified against corresponding checksums.
+ * Default: true
+ *
+ * @param verifyChecksums if true, then checksum verification
+ * will be performed on every read.
+ * @return the reference to the current ReadOptions.
+ */
+ public ReadOptions setVerifyChecksums(
+ final boolean verifyChecksums) {
+ assert(isOwningHandle());
+ setVerifyChecksums(nativeHandle_, verifyChecksums);
+ return this;
+ }
+
+ // TODO(yhchiang): this option seems to be block-based table only.
+ // move this to a better place?
+ /**
+ * Fill the cache when loading the block-based sst formated db.
+ * Callers may wish to set this field to false for bulk scans.
+ * Default: true
+ *
+ * @return true if the fill-cache behavior is on.
+ */
+ public boolean fillCache() {
+ assert(isOwningHandle());
+ return fillCache(nativeHandle_);
+ }
+
+ /**
+ * Fill the cache when loading the block-based sst formatted db.
+ * Callers may wish to set this field to false for bulk scans.
+ * Default: true
+ *
+ * @param fillCache if true, then fill-cache behavior will be
+ * performed.
+ * @return the reference to the current ReadOptions.
+ */
+ public ReadOptions setFillCache(final boolean fillCache) {
+ assert(isOwningHandle());
+ setFillCache(nativeHandle_, fillCache);
+ return this;
+ }
+
+ /**
+ * Returns the currently assigned Snapshot instance.
+ *
+ * @return the Snapshot assigned to this instance. If no Snapshot
+ * is assigned null.
+ */
+ public Snapshot snapshot() {
+ assert(isOwningHandle());
+ long snapshotHandle = snapshot(nativeHandle_);
+ if (snapshotHandle != 0) {
+ return new Snapshot(snapshotHandle);
+ }
+ return null;
+ }
+
+ /**
+ * <p>If "snapshot" is non-nullptr, read as of the supplied snapshot
+ * (which must belong to the DB that is being read and which must
+ * not have been released). If "snapshot" is nullptr, use an implicit
+ * snapshot of the state at the beginning of this read operation.</p>
+ * <p>Default: null</p>
+ *
+ * @param snapshot {@link Snapshot} instance
+ * @return the reference to the current ReadOptions.
+ */
+ public ReadOptions setSnapshot(final Snapshot snapshot) {
+ assert(isOwningHandle());
+ if (snapshot != null) {
+ setSnapshot(nativeHandle_, snapshot.nativeHandle_);
+ } else {
+ setSnapshot(nativeHandle_, 0l);
+ }
+ return this;
+ }
+
+ /**
+ * Returns the current read tier.
+ *
+ * @return the read tier in use, by default {@link ReadTier#READ_ALL_TIER}
+ */
+ public ReadTier readTier() {
+ assert(isOwningHandle());
+ return ReadTier.getReadTier(readTier(nativeHandle_));
+ }
+
+ /**
+ * Specify if this read request should process data that ALREADY
+ * resides on a particular cache. If the required data is not
+ * found at the specified cache, then {@link RocksDBException} is thrown.
+ *
+ * @param readTier {@link ReadTier} instance
+ * @return the reference to the current ReadOptions.
+ */
+ public ReadOptions setReadTier(final ReadTier readTier) {
+ assert(isOwningHandle());
+ setReadTier(nativeHandle_, readTier.getValue());
+ return this;
+ }
+
+ /**
+ * Specify to create a tailing iterator -- a special iterator that has a
+ * view of the complete database (i.e. it can also be used to read newly
+ * added data) and is optimized for sequential reads. It will return records
+ * that were inserted into the database after the creation of the iterator.
+ * Default: false
+ *
+ * Not supported in {@code ROCKSDB_LITE} mode!
+ *
+ * @return true if tailing iterator is enabled.
+ */
+ public boolean tailing() {
+ assert(isOwningHandle());
+ return tailing(nativeHandle_);
+ }
+
+ /**
+ * Specify to create a tailing iterator -- a special iterator that has a
+ * view of the complete database (i.e. it can also be used to read newly
+ * added data) and is optimized for sequential reads. It will return records
+ * that were inserted into the database after the creation of the iterator.
+ * Default: false
+ * Not supported in ROCKSDB_LITE mode!
+ *
+ * @param tailing if true, then tailing iterator will be enabled.
+ * @return the reference to the current ReadOptions.
+ */
+ public ReadOptions setTailing(final boolean tailing) {
+ assert(isOwningHandle());
+ setTailing(nativeHandle_, tailing);
+ return this;
+ }
+
+ /**
+ * Returns whether managed iterators will be used.
+ *
+ * @return the setting of whether managed iterators will be used,
+ * by default false
+ *
+ * @deprecated This options is not used anymore.
+ */
+ @Deprecated
+ public boolean managed() {
+ assert(isOwningHandle());
+ return managed(nativeHandle_);
+ }
+
+ /**
+ * Specify to create a managed iterator -- a special iterator that
+ * uses less resources by having the ability to free its underlying
+ * resources on request.
+ *
+ * @param managed if true, then managed iterators will be enabled.
+ * @return the reference to the current ReadOptions.
+ *
+ * @deprecated This options is not used anymore.
+ */
+ @Deprecated
+ public ReadOptions setManaged(final boolean managed) {
+ assert(isOwningHandle());
+ setManaged(nativeHandle_, managed);
+ return this;
+ }
+
+ /**
+ * Returns whether a total seek order will be used
+ *
+ * @return the setting of whether a total seek order will be used
+ */
+ public boolean totalOrderSeek() {
+ assert(isOwningHandle());
+ return totalOrderSeek(nativeHandle_);
+ }
+
+ /**
+ * Enable a total order seek regardless of index format (e.g. hash index)
+ * used in the table. Some table format (e.g. plain table) may not support
+ * this option.
+ *
+ * @param totalOrderSeek if true, then total order seek will be enabled.
+ * @return the reference to the current ReadOptions.
+ */
+ public ReadOptions setTotalOrderSeek(final boolean totalOrderSeek) {
+ assert(isOwningHandle());
+ setTotalOrderSeek(nativeHandle_, totalOrderSeek);
+ return this;
+ }
+
+ /**
+ * Returns whether the iterator only iterates over the same prefix as the seek
+ *
+ * @return the setting of whether the iterator only iterates over the same
+ * prefix as the seek, default is false
+ */
+ public boolean prefixSameAsStart() {
+ assert(isOwningHandle());
+ return prefixSameAsStart(nativeHandle_);
+ }
+
+ /**
+ * Enforce that the iterator only iterates over the same prefix as the seek.
+ * This option is effective only for prefix seeks, i.e. prefix_extractor is
+ * non-null for the column family and {@link #totalOrderSeek()} is false.
+ * Unlike iterate_upper_bound, {@link #setPrefixSameAsStart(boolean)} only
+ * works within a prefix but in both directions.
+ *
+ * @param prefixSameAsStart if true, then the iterator only iterates over the
+ * same prefix as the seek
+ * @return the reference to the current ReadOptions.
+ */
+ public ReadOptions setPrefixSameAsStart(final boolean prefixSameAsStart) {
+ assert(isOwningHandle());
+ setPrefixSameAsStart(nativeHandle_, prefixSameAsStart);
+ return this;
+ }
+
+ /**
+ * Returns whether the blocks loaded by the iterator will be pinned in memory
+ *
+ * @return the setting of whether the blocks loaded by the iterator will be
+ * pinned in memory
+ */
+ public boolean pinData() {
+ assert(isOwningHandle());
+ return pinData(nativeHandle_);
+ }
+
+ /**
+ * Keep the blocks loaded by the iterator pinned in memory as long as the
+ * iterator is not deleted, If used when reading from tables created with
+ * BlockBasedTableOptions::use_delta_encoding = false,
+ * Iterator's property "rocksdb.iterator.is-key-pinned" is guaranteed to
+ * return 1.
+ *
+ * @param pinData if true, the blocks loaded by the iterator will be pinned
+ * @return the reference to the current ReadOptions.
+ */
+ public ReadOptions setPinData(final boolean pinData) {
+ assert(isOwningHandle());
+ setPinData(nativeHandle_, pinData);
+ return this;
+ }
+
+ /**
+ * If true, when PurgeObsoleteFile is called in CleanupIteratorState, we
+ * schedule a background job in the flush job queue and delete obsolete files
+ * in background.
+ *
+ * Default: false
+ *
+ * @return true when PurgeObsoleteFile is called in CleanupIteratorState
+ */
+ public boolean backgroundPurgeOnIteratorCleanup() {
+ assert(isOwningHandle());
+ return backgroundPurgeOnIteratorCleanup(nativeHandle_);
+ }
+
+ /**
+ * If true, when PurgeObsoleteFile is called in CleanupIteratorState, we
+ * schedule a background job in the flush job queue and delete obsolete files
+ * in background.
+ *
+ * Default: false
+ *
+ * @param backgroundPurgeOnIteratorCleanup true when PurgeObsoleteFile is
+ * called in CleanupIteratorState
+ * @return the reference to the current ReadOptions.
+ */
+ public ReadOptions setBackgroundPurgeOnIteratorCleanup(
+ final boolean backgroundPurgeOnIteratorCleanup) {
+ assert(isOwningHandle());
+ setBackgroundPurgeOnIteratorCleanup(nativeHandle_,
+ backgroundPurgeOnIteratorCleanup);
+ return this;
+ }
+
+ /**
+ * If non-zero, NewIterator will create a new table reader which
+ * performs reads of the given size. Using a large size (&gt; 2MB) can
+ * improve the performance of forward iteration on spinning disks.
+ *
+ * Default: 0
+ *
+ * @return The readahead size is bytes
+ */
+ public long readaheadSize() {
+ assert(isOwningHandle());
+ return readaheadSize(nativeHandle_);
+ }
+
+ /**
+ * If non-zero, NewIterator will create a new table reader which
+ * performs reads of the given size. Using a large size (&gt; 2MB) can
+ * improve the performance of forward iteration on spinning disks.
+ *
+ * Default: 0
+ *
+ * @param readaheadSize The readahead size is bytes
+ * @return the reference to the current ReadOptions.
+ */
+ public ReadOptions setReadaheadSize(final long readaheadSize) {
+ assert(isOwningHandle());
+ setReadaheadSize(nativeHandle_, readaheadSize);
+ return this;
+ }
+
+ /**
+ * A threshold for the number of keys that can be skipped before failing an
+ * iterator seek as incomplete.
+ *
+ * @return the number of keys that can be skipped
+ * before failing an iterator seek as incomplete.
+ */
+ public long maxSkippableInternalKeys() {
+ assert(isOwningHandle());
+ return maxSkippableInternalKeys(nativeHandle_);
+ }
+
+ /**
+ * A threshold for the number of keys that can be skipped before failing an
+ * iterator seek as incomplete. The default value of 0 should be used to
+ * never fail a request as incomplete, even on skipping too many keys.
+ *
+ * Default: 0
+ *
+ * @param maxSkippableInternalKeys the number of keys that can be skipped
+ * before failing an iterator seek as incomplete.
+ *
+ * @return the reference to the current ReadOptions.
+ */
+ public ReadOptions setMaxSkippableInternalKeys(
+ final long maxSkippableInternalKeys) {
+ assert(isOwningHandle());
+ setMaxSkippableInternalKeys(nativeHandle_, maxSkippableInternalKeys);
+ return this;
+ }
+
+ /**
+ * If true, keys deleted using the DeleteRange() API will be visible to
+ * readers until they are naturally deleted during compaction. This improves
+ * read performance in DBs with many range deletions.
+ *
+ * Default: false
+ *
+ * @return true if keys deleted using the DeleteRange() API will be visible
+ */
+ public boolean ignoreRangeDeletions() {
+ assert(isOwningHandle());
+ return ignoreRangeDeletions(nativeHandle_);
+ }
+
+ /**
+ * If true, keys deleted using the DeleteRange() API will be visible to
+ * readers until they are naturally deleted during compaction. This improves
+ * read performance in DBs with many range deletions.
+ *
+ * Default: false
+ *
+ * @param ignoreRangeDeletions true if keys deleted using the DeleteRange()
+ * API should be visible
+ * @return the reference to the current ReadOptions.
+ */
+ public ReadOptions setIgnoreRangeDeletions(final boolean ignoreRangeDeletions) {
+ assert(isOwningHandle());
+ setIgnoreRangeDeletions(nativeHandle_, ignoreRangeDeletions);
+ return this;
+ }
+
+ /**
+ * Defines the smallest key at which the backward
+ * iterator can return an entry. Once the bound is passed,
+ * {@link RocksIterator#isValid()} will be false.
+ *
+ * The lower bound is inclusive i.e. the bound value is a valid
+ * entry.
+ *
+ * If prefix_extractor is not null, the Seek target and `iterate_lower_bound`
+ * need to have the same prefix. This is because ordering is not guaranteed
+ * outside of prefix domain.
+ *
+ * Default: null
+ *
+ * @param iterateLowerBound Slice representing the upper bound
+ * @return the reference to the current ReadOptions.
+ */
+ public ReadOptions setIterateLowerBound(final Slice iterateLowerBound) {
+ assert(isOwningHandle());
+ if (iterateLowerBound != null) {
+ // Hold onto a reference so it doesn't get garbage collected out from under us.
+ iterateLowerBoundSlice_ = iterateLowerBound;
+ setIterateLowerBound(nativeHandle_, iterateLowerBoundSlice_.getNativeHandle());
+ }
+ return this;
+ }
+
+ /**
+ * Returns the smallest key at which the backward
+ * iterator can return an entry.
+ *
+ * The lower bound is inclusive i.e. the bound value is a valid entry.
+ *
+ * @return the smallest key, or null if there is no lower bound defined.
+ */
+ public Slice iterateLowerBound() {
+ assert(isOwningHandle());
+ final long lowerBoundSliceHandle = iterateLowerBound(nativeHandle_);
+ if (lowerBoundSliceHandle != 0) {
+ // Disown the new slice - it's owned by the C++ side of the JNI boundary
+ // from the perspective of this method.
+ return new Slice(lowerBoundSliceHandle, false);
+ }
+ return null;
+ }
+
+ /**
+ * Defines the extent up to which the forward iterator
+ * can returns entries. Once the bound is reached,
+ * {@link RocksIterator#isValid()} will be false.
+ *
+ * The upper bound is exclusive i.e. the bound value is not a valid entry.
+ *
+ * If prefix_extractor is not null, the Seek target and iterate_upper_bound
+ * need to have the same prefix. This is because ordering is not guaranteed
+ * outside of prefix domain.
+ *
+ * Default: null
+ *
+ * @param iterateUpperBound Slice representing the upper bound
+ * @return the reference to the current ReadOptions.
+ */
+ public ReadOptions setIterateUpperBound(final Slice iterateUpperBound) {
+ assert(isOwningHandle());
+ if (iterateUpperBound != null) {
+ // Hold onto a reference so it doesn't get garbage collected out from under us.
+ iterateUpperBoundSlice_ = iterateUpperBound;
+ setIterateUpperBound(nativeHandle_, iterateUpperBoundSlice_.getNativeHandle());
+ }
+ return this;
+ }
+
+ /**
+ * Returns the largest key at which the forward
+ * iterator can return an entry.
+ *
+ * The upper bound is exclusive i.e. the bound value is not a valid entry.
+ *
+ * @return the largest key, or null if there is no upper bound defined.
+ */
+ public Slice iterateUpperBound() {
+ assert(isOwningHandle());
+ final long upperBoundSliceHandle = iterateUpperBound(nativeHandle_);
+ if (upperBoundSliceHandle != 0) {
+ // Disown the new slice - it's owned by the C++ side of the JNI boundary
+ // from the perspective of this method.
+ return new Slice(upperBoundSliceHandle, false);
+ }
+ return null;
+ }
+
+ /**
+ * A callback to determine whether relevant keys for this scan exist in a
+ * given table based on the table's properties. The callback is passed the
+ * properties of each table during iteration. If the callback returns false,
+ * the table will not be scanned. This option only affects Iterators and has
+ * no impact on point lookups.
+ *
+ * Default: null (every table will be scanned)
+ *
+ * @param tableFilter the table filter for the callback.
+ *
+ * @return the reference to the current ReadOptions.
+ */
+ public ReadOptions setTableFilter(final AbstractTableFilter tableFilter) {
+ assert(isOwningHandle());
+ setTableFilter(nativeHandle_, tableFilter.nativeHandle_);
+ return this;
+ }
+
+ /**
+ * Needed to support differential snapshots. Has 2 effects:
+ * 1) Iterator will skip all internal keys with seqnum &lt; iter_start_seqnum
+ * 2) if this param &gt; 0 iterator will return INTERNAL keys instead of user
+ * keys; e.g. return tombstones as well.
+ *
+ * Default: 0 (don't filter by seqnum, return user keys)
+ *
+ * @param startSeqnum the starting sequence number.
+ *
+ * @return the reference to the current ReadOptions.
+ */
+ public ReadOptions setIterStartSeqnum(final long startSeqnum) {
+ assert(isOwningHandle());
+ setIterStartSeqnum(nativeHandle_, startSeqnum);
+ return this;
+ }
+
+ /**
+ * Returns the starting Sequence Number of any iterator.
+ * See {@link #setIterStartSeqnum(long)}.
+ *
+ * @return the starting sequence number of any iterator.
+ */
+ public long iterStartSeqnum() {
+ assert(isOwningHandle());
+ return iterStartSeqnum(nativeHandle_);
+ }
+
+ // instance variables
+ // NOTE: If you add new member variables, please update the copy constructor above!
+ //
+ // Hold a reference to any iterate lower or upper bound that was set on this
+ // object until we're destroyed or it's overwritten. That way the caller can
+ // freely leave scope without us losing the Java Slice object, which during
+ // close() would also reap its associated rocksdb::Slice native object since
+ // it's possibly (likely) to be an owning handle.
+ private Slice iterateLowerBoundSlice_;
+ private Slice iterateUpperBoundSlice_;
+
+ private native static long newReadOptions();
+ private native static long newReadOptions(final boolean verifyChecksums,
+ final boolean fillCache);
+ private native static long copyReadOptions(long handle);
+ @Override protected final native void disposeInternal(final long handle);
+
+ private native boolean verifyChecksums(long handle);
+ private native void setVerifyChecksums(long handle, boolean verifyChecksums);
+ private native boolean fillCache(long handle);
+ private native void setFillCache(long handle, boolean fillCache);
+ private native long snapshot(long handle);
+ private native void setSnapshot(long handle, long snapshotHandle);
+ private native byte readTier(long handle);
+ private native void setReadTier(long handle, byte readTierValue);
+ private native boolean tailing(long handle);
+ private native void setTailing(long handle, boolean tailing);
+ private native boolean managed(long handle);
+ private native void setManaged(long handle, boolean managed);
+ private native boolean totalOrderSeek(long handle);
+ private native void setTotalOrderSeek(long handle, boolean totalOrderSeek);
+ private native boolean prefixSameAsStart(long handle);
+ private native void setPrefixSameAsStart(long handle, boolean prefixSameAsStart);
+ private native boolean pinData(long handle);
+ private native void setPinData(long handle, boolean pinData);
+ private native boolean backgroundPurgeOnIteratorCleanup(final long handle);
+ private native void setBackgroundPurgeOnIteratorCleanup(final long handle,
+ final boolean backgroundPurgeOnIteratorCleanup);
+ private native long readaheadSize(final long handle);
+ private native void setReadaheadSize(final long handle,
+ final long readaheadSize);
+ private native long maxSkippableInternalKeys(final long handle);
+ private native void setMaxSkippableInternalKeys(final long handle,
+ final long maxSkippableInternalKeys);
+ private native boolean ignoreRangeDeletions(final long handle);
+ private native void setIgnoreRangeDeletions(final long handle,
+ final boolean ignoreRangeDeletions);
+ private native void setIterateUpperBound(final long handle,
+ final long upperBoundSliceHandle);
+ private native long iterateUpperBound(final long handle);
+ private native void setIterateLowerBound(final long handle,
+ final long lowerBoundSliceHandle);
+ private native long iterateLowerBound(final long handle);
+ private native void setTableFilter(final long handle,
+ final long tableFilterHandle);
+ private native void setIterStartSeqnum(final long handle, final long seqNum);
+ private native long iterStartSeqnum(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ReadTier.java b/src/rocksdb/java/src/main/java/org/rocksdb/ReadTier.java
new file mode 100644
index 000000000..78f83f6ad
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/ReadTier.java
@@ -0,0 +1,49 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * RocksDB {@link ReadOptions} read tiers.
+ */
+public enum ReadTier {
+ READ_ALL_TIER((byte)0),
+ BLOCK_CACHE_TIER((byte)1),
+ PERSISTED_TIER((byte)2),
+ MEMTABLE_TIER((byte)3);
+
+ private final byte value;
+
+ ReadTier(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Returns the byte value of the enumerations value
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get ReadTier by byte value.
+ *
+ * @param value byte representation of ReadTier.
+ *
+ * @return {@link org.rocksdb.ReadTier} instance or null.
+ * @throws java.lang.IllegalArgumentException if an invalid
+ * value is provided.
+ */
+ public static ReadTier getReadTier(final byte value) {
+ for (final ReadTier readTier : ReadTier.values()) {
+ if (readTier.getValue() == value){
+ return readTier;
+ }
+ }
+ throw new IllegalArgumentException("Illegal value provided for ReadTier.");
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java b/src/rocksdb/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java
new file mode 100644
index 000000000..6ee81d858
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java
@@ -0,0 +1,18 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Just a Java wrapper around EmptyValueCompactionFilter implemented in C++
+ */
+public class RemoveEmptyValueCompactionFilter
+ extends AbstractCompactionFilter<Slice> {
+ public RemoveEmptyValueCompactionFilter() {
+ super(createNewRemoveEmptyValueCompactionFilter0());
+ }
+
+ private native static long createNewRemoveEmptyValueCompactionFilter0();
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RestoreOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/RestoreOptions.java
new file mode 100644
index 000000000..94d93fc71
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/RestoreOptions.java
@@ -0,0 +1,32 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * RestoreOptions to control the behavior of restore.
+ *
+ * Note that dispose() must be called before this instance become out-of-scope
+ * to release the allocated memory in c++.
+ *
+ */
+public class RestoreOptions extends RocksObject {
+ /**
+ * Constructor
+ *
+ * @param keepLogFiles If true, restore won't overwrite the existing log files
+ * in wal_dir. It will also move all log files from archive directory to
+ * wal_dir. Use this option in combination with
+ * BackupableDBOptions::backup_log_files = false for persisting in-memory
+ * databases.
+ * Default: false
+ */
+ public RestoreOptions(final boolean keepLogFiles) {
+ super(newRestoreOptions(keepLogFiles));
+ }
+
+ private native static long newRestoreOptions(boolean keepLogFiles);
+ @Override protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ReusedSynchronisationType.java b/src/rocksdb/java/src/main/java/org/rocksdb/ReusedSynchronisationType.java
new file mode 100644
index 000000000..2709a5d59
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/ReusedSynchronisationType.java
@@ -0,0 +1,65 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+/**
+ * Determines the type of synchronisation primitive used
+ * in native code.
+ */
+public enum ReusedSynchronisationType {
+ /**
+ * Standard mutex.
+ */
+ MUTEX((byte)0x0),
+
+ /**
+ * Use adaptive mutex, which spins in the user space before resorting
+ * to kernel. This could reduce context switch when the mutex is not
+ * heavily contended. However, if the mutex is hot, we could end up
+ * wasting spin time.
+ */
+ ADAPTIVE_MUTEX((byte)0x1),
+
+ /**
+ * There is a reused buffer per-thread.
+ */
+ THREAD_LOCAL((byte)0x2);
+
+ private final byte value;
+
+ ReusedSynchronisationType(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Returns the byte value of the enumerations value
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get ReusedSynchronisationType by byte value.
+ *
+ * @param value byte representation of ReusedSynchronisationType.
+ *
+ * @return {@link org.rocksdb.ReusedSynchronisationType} instance.
+ * @throws java.lang.IllegalArgumentException if an invalid
+ * value is provided.
+ */
+ public static ReusedSynchronisationType getReusedSynchronisationType(
+ final byte value) {
+ for (final ReusedSynchronisationType reusedSynchronisationType
+ : ReusedSynchronisationType.values()) {
+ if (reusedSynchronisationType.getValue() == value) {
+ return reusedSynchronisationType;
+ }
+ }
+ throw new IllegalArgumentException(
+ "Illegal value provided for ReusedSynchronisationType.");
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RocksCallbackObject.java b/src/rocksdb/java/src/main/java/org/rocksdb/RocksCallbackObject.java
new file mode 100644
index 000000000..a662f78fd
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/RocksCallbackObject.java
@@ -0,0 +1,50 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * RocksCallbackObject is similar to {@link RocksObject} but varies
+ * in its construction as it is designed for Java objects which have functions
+ * which are called from C++ via JNI.
+ *
+ * RocksCallbackObject is the base-class any RocksDB classes that acts as a
+ * callback from some underlying underlying native C++ {@code rocksdb} object.
+ *
+ * The use of {@code RocksObject} should always be preferred over
+ * {@link RocksCallbackObject} if callbacks are not required.
+ */
+public abstract class RocksCallbackObject extends
+ AbstractImmutableNativeReference {
+
+ protected final long nativeHandle_;
+
+ protected RocksCallbackObject(final long... nativeParameterHandles) {
+ super(true);
+ this.nativeHandle_ = initializeNative(nativeParameterHandles);
+ }
+
+ /**
+ * Construct the Native C++ object which will callback
+ * to our object methods
+ *
+ * @param nativeParameterHandles An array of native handles for any parameter
+ * objects that are needed during construction
+ *
+ * @return The native handle of the C++ object which will callback to us
+ */
+ protected abstract long initializeNative(
+ final long... nativeParameterHandles);
+
+ /**
+ * Deletes underlying C++ native callback object pointer
+ */
+ @Override
+ protected void disposeInternal() {
+ disposeInternal(nativeHandle_);
+ }
+
+ private native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RocksDB.java b/src/rocksdb/java/src/main/java/org/rocksdb/RocksDB.java
new file mode 100644
index 000000000..338324b13
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/RocksDB.java
@@ -0,0 +1,4522 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicReference;
+import org.rocksdb.util.Environment;
+
+/**
+ * A RocksDB is a persistent ordered map from keys to values. It is safe for
+ * concurrent access from multiple threads without any external synchronization.
+ * All methods of this class could potentially throw RocksDBException, which
+ * indicates sth wrong at the RocksDB library side and the call failed.
+ */
+public class RocksDB extends RocksObject {
+ public static final byte[] DEFAULT_COLUMN_FAMILY = "default".getBytes();
+ public static final int NOT_FOUND = -1;
+
+ private enum LibraryState {
+ NOT_LOADED,
+ LOADING,
+ LOADED
+ }
+
+ private static AtomicReference<LibraryState> libraryLoaded
+ = new AtomicReference<>(LibraryState.NOT_LOADED);
+
+ static {
+ RocksDB.loadLibrary();
+ }
+
+ /**
+ * Loads the necessary library files.
+ * Calling this method twice will have no effect.
+ * By default the method extracts the shared library for loading at
+ * java.io.tmpdir, however, you can override this temporary location by
+ * setting the environment variable ROCKSDB_SHAREDLIB_DIR.
+ */
+ public static void loadLibrary() {
+ if (libraryLoaded.get() == LibraryState.LOADED) {
+ return;
+ }
+
+ if (libraryLoaded.compareAndSet(LibraryState.NOT_LOADED,
+ LibraryState.LOADING)) {
+ final String tmpDir = System.getenv("ROCKSDB_SHAREDLIB_DIR");
+ // loading possibly necessary libraries.
+ for (final CompressionType compressionType : CompressionType.values()) {
+ try {
+ if (compressionType.getLibraryName() != null) {
+ System.loadLibrary(compressionType.getLibraryName());
+ }
+ } catch (UnsatisfiedLinkError e) {
+ // since it may be optional, we ignore its loading failure here.
+ }
+ }
+ try {
+ NativeLibraryLoader.getInstance().loadLibrary(tmpDir);
+ } catch (IOException e) {
+ libraryLoaded.set(LibraryState.NOT_LOADED);
+ throw new RuntimeException("Unable to load the RocksDB shared library",
+ e);
+ }
+
+ libraryLoaded.set(LibraryState.LOADED);
+ return;
+ }
+
+ while (libraryLoaded.get() == LibraryState.LOADING) {
+ try {
+ Thread.sleep(10);
+ } catch(final InterruptedException e) {
+ //ignore
+ }
+ }
+ }
+
+ /**
+ * Tries to load the necessary library files from the given list of
+ * directories.
+ *
+ * @param paths a list of strings where each describes a directory
+ * of a library.
+ */
+ public static void loadLibrary(final List<String> paths) {
+ if (libraryLoaded.get() == LibraryState.LOADED) {
+ return;
+ }
+
+ if (libraryLoaded.compareAndSet(LibraryState.NOT_LOADED,
+ LibraryState.LOADING)) {
+ for (final CompressionType compressionType : CompressionType.values()) {
+ if (compressionType.equals(CompressionType.NO_COMPRESSION)) {
+ continue;
+ }
+ for (final String path : paths) {
+ try {
+ System.load(path + "/" + Environment.getSharedLibraryFileName(
+ compressionType.getLibraryName()));
+ break;
+ } catch (UnsatisfiedLinkError e) {
+ // since they are optional, we ignore loading fails.
+ }
+ }
+ }
+ boolean success = false;
+ UnsatisfiedLinkError err = null;
+ for (final String path : paths) {
+ try {
+ System.load(path + "/" +
+ Environment.getJniLibraryFileName("rocksdbjni"));
+ success = true;
+ break;
+ } catch (UnsatisfiedLinkError e) {
+ err = e;
+ }
+ }
+ if (!success) {
+ libraryLoaded.set(LibraryState.NOT_LOADED);
+ throw err;
+ }
+
+ libraryLoaded.set(LibraryState.LOADED);
+ return;
+ }
+
+ while (libraryLoaded.get() == LibraryState.LOADING) {
+ try {
+ Thread.sleep(10);
+ } catch(final InterruptedException e) {
+ //ignore
+ }
+ }
+ }
+
+ /**
+ * Private constructor.
+ *
+ * @param nativeHandle The native handle of the C++ RocksDB object
+ */
+ protected RocksDB(final long nativeHandle) {
+ super(nativeHandle);
+ }
+
+ /**
+ * The factory constructor of RocksDB that opens a RocksDB instance given
+ * the path to the database using the default options w/ createIfMissing
+ * set to true.
+ *
+ * @param path the path to the rocksdb.
+ * @return a {@link RocksDB} instance on success, null if the specified
+ * {@link RocksDB} can not be opened.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @see Options#setCreateIfMissing(boolean)
+ */
+ public static RocksDB open(final String path) throws RocksDBException {
+ final Options options = new Options();
+ options.setCreateIfMissing(true);
+ return open(options, path);
+ }
+
+ /**
+ * The factory constructor of RocksDB that opens a RocksDB instance given
+ * the path to the database using the specified options and db path and a list
+ * of column family names.
+ * <p>
+ * If opened in read write mode every existing column family name must be
+ * passed within the list to this method.</p>
+ * <p>
+ * If opened in read-only mode only a subset of existing column families must
+ * be passed to this method.</p>
+ * <p>
+ * Options instance *should* not be disposed before all DBs using this options
+ * instance have been closed. If user doesn't call options dispose explicitly,
+ * then this options instance will be GC'd automatically</p>
+ * <p>
+ * ColumnFamily handles are disposed when the RocksDB instance is disposed.
+ * </p>
+ *
+ * @param path the path to the rocksdb.
+ * @param columnFamilyDescriptors list of column family descriptors
+ * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances
+ * on open.
+ * @return a {@link RocksDB} instance on success, null if the specified
+ * {@link RocksDB} can not be opened.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @see DBOptions#setCreateIfMissing(boolean)
+ */
+ public static RocksDB open(final String path,
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors,
+ final List<ColumnFamilyHandle> columnFamilyHandles)
+ throws RocksDBException {
+ final DBOptions options = new DBOptions();
+ return open(options, path, columnFamilyDescriptors, columnFamilyHandles);
+ }
+
+ /**
+ * The factory constructor of RocksDB that opens a RocksDB instance given
+ * the path to the database using the specified options and db path.
+ *
+ * <p>
+ * Options instance *should* not be disposed before all DBs using this options
+ * instance have been closed. If user doesn't call options dispose explicitly,
+ * then this options instance will be GC'd automatically.</p>
+ * <p>
+ * Options instance can be re-used to open multiple DBs if DB statistics is
+ * not used. If DB statistics are required, then its recommended to open DB
+ * with new Options instance as underlying native statistics instance does not
+ * use any locks to prevent concurrent updates.</p>
+ *
+ * @param options {@link org.rocksdb.Options} instance.
+ * @param path the path to the rocksdb.
+ * @return a {@link RocksDB} instance on success, null if the specified
+ * {@link RocksDB} can not be opened.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ *
+ * @see Options#setCreateIfMissing(boolean)
+ */
+ public static RocksDB open(final Options options, final String path)
+ throws RocksDBException {
+ // when non-default Options is used, keeping an Options reference
+ // in RocksDB can prevent Java to GC during the life-time of
+ // the currently-created RocksDB.
+ final RocksDB db = new RocksDB(open(options.nativeHandle_, path));
+ db.storeOptionsInstance(options);
+ return db;
+ }
+
+ /**
+ * The factory constructor of RocksDB that opens a RocksDB instance given
+ * the path to the database using the specified options and db path and a list
+ * of column family names.
+ * <p>
+ * If opened in read write mode every existing column family name must be
+ * passed within the list to this method.</p>
+ * <p>
+ * If opened in read-only mode only a subset of existing column families must
+ * be passed to this method.</p>
+ * <p>
+ * Options instance *should* not be disposed before all DBs using this options
+ * instance have been closed. If user doesn't call options dispose explicitly,
+ * then this options instance will be GC'd automatically.</p>
+ * <p>
+ * Options instance can be re-used to open multiple DBs if DB statistics is
+ * not used. If DB statistics are required, then its recommended to open DB
+ * with new Options instance as underlying native statistics instance does not
+ * use any locks to prevent concurrent updates.</p>
+ * <p>
+ * ColumnFamily handles are disposed when the RocksDB instance is disposed.
+ * </p>
+ *
+ * @param options {@link org.rocksdb.DBOptions} instance.
+ * @param path the path to the rocksdb.
+ * @param columnFamilyDescriptors list of column family descriptors
+ * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances
+ * on open.
+ * @return a {@link RocksDB} instance on success, null if the specified
+ * {@link RocksDB} can not be opened.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ *
+ * @see DBOptions#setCreateIfMissing(boolean)
+ */
+ public static RocksDB open(final DBOptions options, final String path,
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors,
+ final List<ColumnFamilyHandle> columnFamilyHandles)
+ throws RocksDBException {
+
+ final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][];
+ final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()];
+ for (int i = 0; i < columnFamilyDescriptors.size(); i++) {
+ final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors
+ .get(i);
+ cfNames[i] = cfDescriptor.getName();
+ cfOptionHandles[i] = cfDescriptor.getOptions().nativeHandle_;
+ }
+
+ final long[] handles = open(options.nativeHandle_, path, cfNames,
+ cfOptionHandles);
+ final RocksDB db = new RocksDB(handles[0]);
+ db.storeOptionsInstance(options);
+
+ for (int i = 1; i < handles.length; i++) {
+ columnFamilyHandles.add(new ColumnFamilyHandle(db, handles[i]));
+ }
+
+ return db;
+ }
+
+ /**
+ * The factory constructor of RocksDB that opens a RocksDB instance in
+ * Read-Only mode given the path to the database using the default
+ * options.
+ *
+ * @param path the path to the RocksDB.
+ * @return a {@link RocksDB} instance on success, null if the specified
+ * {@link RocksDB} can not be opened.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public static RocksDB openReadOnly(final String path)
+ throws RocksDBException {
+ // This allows to use the rocksjni default Options instead of
+ // the c++ one.
+ Options options = new Options();
+ return openReadOnly(options, path);
+ }
+
+ /**
+ * The factory constructor of RocksDB that opens a RocksDB instance in
+ * Read-Only mode given the path to the database using the default
+ * options.
+ *
+ * @param path the path to the RocksDB.
+ * @param columnFamilyDescriptors list of column family descriptors
+ * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances
+ * on open.
+ * @return a {@link RocksDB} instance on success, null if the specified
+ * {@link RocksDB} can not be opened.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public static RocksDB openReadOnly(final String path,
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors,
+ final List<ColumnFamilyHandle> columnFamilyHandles)
+ throws RocksDBException {
+ // This allows to use the rocksjni default Options instead of
+ // the c++ one.
+ final DBOptions options = new DBOptions();
+ return openReadOnly(options, path, columnFamilyDescriptors,
+ columnFamilyHandles);
+ }
+
+ /**
+ * The factory constructor of RocksDB that opens a RocksDB instance in
+ * Read-Only mode given the path to the database using the specified
+ * options and db path.
+ *
+ * Options instance *should* not be disposed before all DBs using this options
+ * instance have been closed. If user doesn't call options dispose explicitly,
+ * then this options instance will be GC'd automatically.
+ *
+ * @param options {@link Options} instance.
+ * @param path the path to the RocksDB.
+ * @return a {@link RocksDB} instance on success, null if the specified
+ * {@link RocksDB} can not be opened.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public static RocksDB openReadOnly(final Options options, final String path)
+ throws RocksDBException {
+ // when non-default Options is used, keeping an Options reference
+ // in RocksDB can prevent Java to GC during the life-time of
+ // the currently-created RocksDB.
+ final RocksDB db = new RocksDB(openROnly(options.nativeHandle_, path));
+ db.storeOptionsInstance(options);
+ return db;
+ }
+
+ /**
+ * The factory constructor of RocksDB that opens a RocksDB instance in
+ * Read-Only mode given the path to the database using the specified
+ * options and db path.
+ *
+ * <p>This open method allows to open RocksDB using a subset of available
+ * column families</p>
+ * <p>Options instance *should* not be disposed before all DBs using this
+ * options instance have been closed. If user doesn't call options dispose
+ * explicitly,then this options instance will be GC'd automatically.</p>
+ *
+ * @param options {@link DBOptions} instance.
+ * @param path the path to the RocksDB.
+ * @param columnFamilyDescriptors list of column family descriptors
+ * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances
+ * on open.
+ * @return a {@link RocksDB} instance on success, null if the specified
+ * {@link RocksDB} can not be opened.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public static RocksDB openReadOnly(final DBOptions options, final String path,
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors,
+ final List<ColumnFamilyHandle> columnFamilyHandles)
+ throws RocksDBException {
+ // when non-default Options is used, keeping an Options reference
+ // in RocksDB can prevent Java to GC during the life-time of
+ // the currently-created RocksDB.
+
+ final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][];
+ final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()];
+ for (int i = 0; i < columnFamilyDescriptors.size(); i++) {
+ final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors
+ .get(i);
+ cfNames[i] = cfDescriptor.getName();
+ cfOptionHandles[i] = cfDescriptor.getOptions().nativeHandle_;
+ }
+
+ final long[] handles = openROnly(options.nativeHandle_, path, cfNames,
+ cfOptionHandles);
+ final RocksDB db = new RocksDB(handles[0]);
+ db.storeOptionsInstance(options);
+
+ for (int i = 1; i < handles.length; i++) {
+ columnFamilyHandles.add(new ColumnFamilyHandle(db, handles[i]));
+ }
+
+ return db;
+ }
+
+ /**
+ * This is similar to {@link #close()} except that it
+ * throws an exception if any error occurs.
+ *
+ * This will not fsync the WAL files.
+ * If syncing is required, the caller must first call {@link #syncWal()}
+ * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
+ * with {@link WriteOptions#setSync(boolean)} set to true.
+ *
+ * See also {@link #close()}.
+ *
+ * @throws RocksDBException if an error occurs whilst closing.
+ */
+ public void closeE() throws RocksDBException {
+ if (owningHandle_.compareAndSet(true, false)) {
+ try {
+ closeDatabase(nativeHandle_);
+ } finally {
+ disposeInternal();
+ }
+ }
+ }
+
+ /**
+ * This is similar to {@link #closeE()} except that it
+ * silently ignores any errors.
+ *
+ * This will not fsync the WAL files.
+ * If syncing is required, the caller must first call {@link #syncWal()}
+ * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
+ * with {@link WriteOptions#setSync(boolean)} set to true.
+ *
+ * See also {@link #close()}.
+ */
+ @Override
+ public void close() {
+ if (owningHandle_.compareAndSet(true, false)) {
+ try {
+ closeDatabase(nativeHandle_);
+ } catch (final RocksDBException e) {
+ // silently ignore the error report
+ } finally {
+ disposeInternal();
+ }
+ }
+ }
+
+ /**
+ * Static method to determine all available column families for a
+ * rocksdb database identified by path
+ *
+ * @param options Options for opening the database
+ * @param path Absolute path to rocksdb database
+ * @return List&lt;byte[]&gt; List containing the column family names
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public static List<byte[]> listColumnFamilies(final Options options,
+ final String path) throws RocksDBException {
+ return Arrays.asList(RocksDB.listColumnFamilies(options.nativeHandle_,
+ path));
+ }
+
+ /**
+ * Creates a new column family with the name columnFamilyName and
+ * allocates a ColumnFamilyHandle within an internal structure.
+ * The ColumnFamilyHandle is automatically disposed with DB disposal.
+ *
+ * @param columnFamilyDescriptor column family to be created.
+ * @return {@link org.rocksdb.ColumnFamilyHandle} instance.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public ColumnFamilyHandle createColumnFamily(
+ final ColumnFamilyDescriptor columnFamilyDescriptor)
+ throws RocksDBException {
+ return new ColumnFamilyHandle(this, createColumnFamily(nativeHandle_,
+ columnFamilyDescriptor.getName(),
+ columnFamilyDescriptor.getName().length,
+ columnFamilyDescriptor.getOptions().nativeHandle_));
+ }
+
+ /**
+ * Bulk create column families with the same column family options.
+ *
+ * @param columnFamilyOptions the options for the column families.
+ * @param columnFamilyNames the names of the column families.
+ *
+ * @return the handles to the newly created column families.
+ *
+ * @throws RocksDBException if an error occurs whilst creating
+ * the column families
+ */
+ public List<ColumnFamilyHandle> createColumnFamilies(
+ final ColumnFamilyOptions columnFamilyOptions,
+ final List<byte[]> columnFamilyNames) throws RocksDBException {
+ final byte[][] cfNames = columnFamilyNames.toArray(
+ new byte[0][]);
+ final long[] cfHandles = createColumnFamilies(nativeHandle_,
+ columnFamilyOptions.nativeHandle_, cfNames);
+ final List<ColumnFamilyHandle> columnFamilyHandles =
+ new ArrayList<>(cfHandles.length);
+ for (int i = 0; i < cfHandles.length; i++) {
+ columnFamilyHandles.add(new ColumnFamilyHandle(this, cfHandles[i]));
+ }
+ return columnFamilyHandles;
+ }
+
+ /**
+ * Bulk create column families with the same column family options.
+ *
+ * @param columnFamilyDescriptors the descriptions of the column families.
+ *
+ * @return the handles to the newly created column families.
+ *
+ * @throws RocksDBException if an error occurs whilst creating
+ * the column families
+ */
+ public List<ColumnFamilyHandle> createColumnFamilies(
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors)
+ throws RocksDBException {
+ final long[] cfOptsHandles = new long[columnFamilyDescriptors.size()];
+ final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][];
+ for (int i = 0; i < columnFamilyDescriptors.size(); i++) {
+ final ColumnFamilyDescriptor columnFamilyDescriptor
+ = columnFamilyDescriptors.get(i);
+ cfOptsHandles[i] = columnFamilyDescriptor.getOptions().nativeHandle_;
+ cfNames[i] = columnFamilyDescriptor.getName();
+ }
+ final long[] cfHandles = createColumnFamilies(nativeHandle_,
+ cfOptsHandles, cfNames);
+ final List<ColumnFamilyHandle> columnFamilyHandles =
+ new ArrayList<>(cfHandles.length);
+ for (int i = 0; i < cfHandles.length; i++) {
+ columnFamilyHandles.add(new ColumnFamilyHandle(this, cfHandles[i]));
+ }
+ return columnFamilyHandles;
+ }
+
+ /**
+ * Drops the column family specified by {@code columnFamilyHandle}. This call
+ * only records a drop record in the manifest and prevents the column
+ * family from flushing and compacting.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void dropColumnFamily(final ColumnFamilyHandle columnFamilyHandle)
+ throws RocksDBException {
+ dropColumnFamily(nativeHandle_, columnFamilyHandle.nativeHandle_);
+ }
+
+ // Bulk drop column families. This call only records drop records in the
+ // manifest and prevents the column families from flushing and compacting.
+ // In case of error, the request may succeed partially. User may call
+ // ListColumnFamilies to check the result.
+ public void dropColumnFamilies(
+ final List<ColumnFamilyHandle> columnFamilies) throws RocksDBException {
+ final long[] cfHandles = new long[columnFamilies.size()];
+ for (int i = 0; i < columnFamilies.size(); i++) {
+ cfHandles[i] = columnFamilies.get(i).nativeHandle_;
+ }
+ dropColumnFamilies(nativeHandle_, cfHandles);
+ }
+
+ //TODO(AR) what about DestroyColumnFamilyHandle
+
+ /**
+ * Set the database entry for "key" to "value".
+ *
+ * @param key the specified key to be inserted.
+ * @param value the value associated with the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void put(final byte[] key, final byte[] value)
+ throws RocksDBException {
+ put(nativeHandle_, key, 0, key.length, value, 0, value.length);
+ }
+
+ /**
+ * Set the database entry for "key" to "value".
+ *
+ * @param key The specified key to be inserted
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
+ * @param value the value associated with the specified key
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and no larger than ("value".length - offset)
+ *
+ * @throws RocksDBException thrown if errors happens in underlying native
+ * library.
+ * @throws IndexOutOfBoundsException if an offset or length is out of bounds
+ */
+ public void put(final byte[] key, final int offset, final int len,
+ final byte[] value, final int vOffset, final int vLen)
+ throws RocksDBException {
+ checkBounds(offset, len, key.length);
+ checkBounds(vOffset, vLen, value.length);
+ put(nativeHandle_, key, offset, len, value, vOffset, vLen);
+ }
+
+ /**
+ * Set the database entry for "key" to "value" in the specified
+ * column family.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param key the specified key to be inserted.
+ * @param value the value associated with the specified key.
+ *
+ * throws IllegalArgumentException if column family is not present
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void put(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key, final byte[] value) throws RocksDBException {
+ put(nativeHandle_, key, 0, key.length, value, 0, value.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Set the database entry for "key" to "value" in the specified
+ * column family.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param key The specified key to be inserted
+ * @param offset the offset of the "key" array to be used, must
+ * be non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
+ * @param value the value associated with the specified key
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and no larger than ("value".length - offset)
+ *
+ * @throws RocksDBException thrown if errors happens in underlying native
+ * library.
+ * @throws IndexOutOfBoundsException if an offset or length is out of bounds
+ */
+ public void put(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key, final int offset, final int len,
+ final byte[] value, final int vOffset, final int vLen)
+ throws RocksDBException {
+ checkBounds(offset, len, key.length);
+ checkBounds(vOffset, vLen, value.length);
+ put(nativeHandle_, key, offset, len, value, vOffset, vLen,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Set the database entry for "key" to "value".
+ *
+ * @param writeOpts {@link org.rocksdb.WriteOptions} instance.
+ * @param key the specified key to be inserted.
+ * @param value the value associated with the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void put(final WriteOptions writeOpts, final byte[] key,
+ final byte[] value) throws RocksDBException {
+ put(nativeHandle_, writeOpts.nativeHandle_,
+ key, 0, key.length, value, 0, value.length);
+ }
+
+ /**
+ * Set the database entry for "key" to "value".
+ *
+ * @param writeOpts {@link org.rocksdb.WriteOptions} instance.
+ * @param key The specified key to be inserted
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
+ * @param value the value associated with the specified key
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and no larger than ("value".length - offset)
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @throws IndexOutOfBoundsException if an offset or length is out of bounds
+ */
+ public void put(final WriteOptions writeOpts,
+ final byte[] key, final int offset, final int len,
+ final byte[] value, final int vOffset, final int vLen)
+ throws RocksDBException {
+ checkBounds(offset, len, key.length);
+ checkBounds(vOffset, vLen, value.length);
+ put(nativeHandle_, writeOpts.nativeHandle_,
+ key, offset, len, value, vOffset, vLen);
+ }
+
+ /**
+ * Set the database entry for "key" to "value" for the specified
+ * column family.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param writeOpts {@link org.rocksdb.WriteOptions} instance.
+ * @param key the specified key to be inserted.
+ * @param value the value associated with the specified key.
+ *
+ * throws IllegalArgumentException if column family is not present
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @see IllegalArgumentException
+ */
+ public void put(final ColumnFamilyHandle columnFamilyHandle,
+ final WriteOptions writeOpts, final byte[] key,
+ final byte[] value) throws RocksDBException {
+ put(nativeHandle_, writeOpts.nativeHandle_, key, 0, key.length, value,
+ 0, value.length, columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Set the database entry for "key" to "value" for the specified
+ * column family.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param writeOpts {@link org.rocksdb.WriteOptions} instance.
+ * @param key the specified key to be inserted. Position and limit is used.
+ * Supports direct buffer only.
+ * @param value the value associated with the specified key. Position and limit is used.
+ * Supports direct buffer only.
+ *
+ * throws IllegalArgumentException if column family is not present
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @see IllegalArgumentException
+ */
+ public void put(final ColumnFamilyHandle columnFamilyHandle, final WriteOptions writeOpts,
+ final ByteBuffer key, final ByteBuffer value) throws RocksDBException {
+ assert key.isDirect() && value.isDirect();
+ putDirect(nativeHandle_, writeOpts.nativeHandle_, key, key.position(), key.remaining(), value,
+ value.position(), value.remaining(), columnFamilyHandle.nativeHandle_);
+ key.position(key.limit());
+ value.position(value.limit());
+ }
+
+ /**
+ * Set the database entry for "key" to "value".
+ *
+ * @param writeOpts {@link org.rocksdb.WriteOptions} instance.
+ * @param key the specified key to be inserted. Position and limit is used.
+ * Supports direct buffer only.
+ * @param value the value associated with the specified key. Position and limit is used.
+ * Supports direct buffer only.
+ *
+ * throws IllegalArgumentException if column family is not present
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @see IllegalArgumentException
+ */
+ public void put(final WriteOptions writeOpts, final ByteBuffer key, final ByteBuffer value)
+ throws RocksDBException {
+ assert key.isDirect() && value.isDirect();
+ putDirect(nativeHandle_, writeOpts.nativeHandle_, key, key.position(), key.remaining(), value,
+ value.position(), value.remaining(), 0);
+ key.position(key.limit());
+ value.position(value.limit());
+ }
+
+ /**
+ * Set the database entry for "key" to "value" for the specified
+ * column family.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param writeOpts {@link org.rocksdb.WriteOptions} instance.
+ * @param key The specified key to be inserted
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
+ * @param value the value associated with the specified key
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and no larger than ("value".length - offset)
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @throws IndexOutOfBoundsException if an offset or length is out of bounds
+ */
+ public void put(final ColumnFamilyHandle columnFamilyHandle,
+ final WriteOptions writeOpts,
+ final byte[] key, final int offset, final int len,
+ final byte[] value, final int vOffset, final int vLen)
+ throws RocksDBException {
+ checkBounds(offset, len, key.length);
+ checkBounds(vOffset, vLen, value.length);
+ put(nativeHandle_, writeOpts.nativeHandle_, key, offset, len, value,
+ vOffset, vLen, columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Remove the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
+ *
+ * @param key Key to delete within database
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ *
+ * @deprecated Use {@link #delete(byte[])}
+ */
+ @Deprecated
+ public void remove(final byte[] key) throws RocksDBException {
+ delete(key);
+ }
+
+ /**
+ * Delete the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
+ *
+ * @param key Key to delete within database
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void delete(final byte[] key) throws RocksDBException {
+ delete(nativeHandle_, key, 0, key.length);
+ }
+
+ /**
+ * Delete the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
+ *
+ * @param key Key to delete within database
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be
+ * non-negative and no larger than ("key".length - offset)
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void delete(final byte[] key, final int offset, final int len)
+ throws RocksDBException {
+ delete(nativeHandle_, key, offset, len);
+ }
+
+ /**
+ * Remove the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param key Key to delete within database
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ *
+ * @deprecated Use {@link #delete(ColumnFamilyHandle, byte[])}
+ */
+ @Deprecated
+ public void remove(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key) throws RocksDBException {
+ delete(columnFamilyHandle, key);
+ }
+
+ /**
+ * Delete the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param key Key to delete within database
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void delete(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key) throws RocksDBException {
+ delete(nativeHandle_, key, 0, key.length, columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Delete the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param key Key to delete within database
+ * @param offset the offset of the "key" array to be used,
+ * must be non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("value".length - offset)
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void delete(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key, final int offset, final int len)
+ throws RocksDBException {
+ delete(nativeHandle_, key, offset, len, columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Remove the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
+ *
+ * @param writeOpt WriteOptions to be used with delete operation
+ * @param key Key to delete within database
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ *
+ * @deprecated Use {@link #delete(WriteOptions, byte[])}
+ */
+ @Deprecated
+ public void remove(final WriteOptions writeOpt, final byte[] key)
+ throws RocksDBException {
+ delete(writeOpt, key);
+ }
+
+ /**
+ * Delete the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
+ *
+ * @param writeOpt WriteOptions to be used with delete operation
+ * @param key Key to delete within database
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void delete(final WriteOptions writeOpt, final byte[] key)
+ throws RocksDBException {
+ delete(nativeHandle_, writeOpt.nativeHandle_, key, 0, key.length);
+ }
+
+ /**
+ * Delete the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
+ *
+ * @param writeOpt WriteOptions to be used with delete operation
+ * @param key Key to delete within database
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be
+ * non-negative and no larger than ("key".length - offset)
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void delete(final WriteOptions writeOpt, final byte[] key,
+ final int offset, final int len) throws RocksDBException {
+ delete(nativeHandle_, writeOpt.nativeHandle_, key, offset, len);
+ }
+
+ /**
+ * Remove the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param writeOpt WriteOptions to be used with delete operation
+ * @param key Key to delete within database
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ *
+ * @deprecated Use {@link #delete(ColumnFamilyHandle, WriteOptions, byte[])}
+ */
+ @Deprecated
+ public void remove(final ColumnFamilyHandle columnFamilyHandle,
+ final WriteOptions writeOpt, final byte[] key) throws RocksDBException {
+ delete(columnFamilyHandle, writeOpt, key);
+ }
+
+ /**
+ * Delete the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param writeOpt WriteOptions to be used with delete operation
+ * @param key Key to delete within database
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void delete(final ColumnFamilyHandle columnFamilyHandle,
+ final WriteOptions writeOpt, final byte[] key)
+ throws RocksDBException {
+ delete(nativeHandle_, writeOpt.nativeHandle_, key, 0, key.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Delete the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param writeOpt WriteOptions to be used with delete operation
+ * @param key Key to delete within database
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be
+ * non-negative and no larger than ("key".length - offset)
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void delete(final ColumnFamilyHandle columnFamilyHandle,
+ final WriteOptions writeOpt, final byte[] key, final int offset,
+ final int len) throws RocksDBException {
+ delete(nativeHandle_, writeOpt.nativeHandle_, key, offset, len,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Get the value associated with the specified key within column family.
+ *
+ * @param opt {@link org.rocksdb.ReadOptions} instance.
+ * @param key the key to retrieve the value. It is using position and limit.
+ * Supports direct buffer only.
+ * @param value the out-value to receive the retrieved value.
+ * It is using position and limit. Limit is set according to value size.
+ * Supports direct buffer only.
+ * @return The size of the actual value that matches the specified
+ * {@code key} in byte. If the return value is greater than the
+ * length of {@code value}, then it indicates that the size of the
+ * input buffer {@code value} is insufficient and partial result will
+ * be returned. RocksDB.NOT_FOUND will be returned if the value not
+ * found.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public int get(final ReadOptions opt, final ByteBuffer key, final ByteBuffer value)
+ throws RocksDBException {
+ assert key.isDirect() && value.isDirect();
+ int result = getDirect(nativeHandle_, opt.nativeHandle_, key, key.position(), key.remaining(),
+ value, value.position(), value.remaining(), 0);
+ if (result != NOT_FOUND) {
+ value.limit(Math.min(value.limit(), value.position() + result));
+ }
+ key.position(key.limit());
+ return result;
+ }
+
+ /**
+ * Get the value associated with the specified key within column family.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param opt {@link org.rocksdb.ReadOptions} instance.
+ * @param key the key to retrieve the value. It is using position and limit.
+ * Supports direct buffer only.
+ * @param value the out-value to receive the retrieved value.
+ * It is using position and limit. Limit is set according to value size.
+ * Supports direct buffer only.
+ * @return The size of the actual value that matches the specified
+ * {@code key} in byte. If the return value is greater than the
+ * length of {@code value}, then it indicates that the size of the
+ * input buffer {@code value} is insufficient and partial result will
+ * be returned. RocksDB.NOT_FOUND will be returned if the value not
+ * found.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public int get(final ColumnFamilyHandle columnFamilyHandle, final ReadOptions opt,
+ final ByteBuffer key, final ByteBuffer value) throws RocksDBException {
+ assert key.isDirect() && value.isDirect();
+ int result = getDirect(nativeHandle_, opt.nativeHandle_, key, key.position(), key.remaining(),
+ value, value.position(), value.remaining(), columnFamilyHandle.nativeHandle_);
+ if (result != NOT_FOUND) {
+ value.limit(Math.min(value.limit(), value.position() + result));
+ }
+ key.position(key.limit());
+ return result;
+ }
+
+ /**
+ * Remove the database entry for {@code key}. Requires that the key exists
+ * and was not overwritten. It is not an error if the key did not exist
+ * in the database.
+ *
+ * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
+ * times), then the result of calling SingleDelete() on this key is undefined.
+ * SingleDelete() only behaves correctly if there has been only one Put()
+ * for this key since the previous call to SingleDelete() for this key.
+ *
+ * This feature is currently an experimental performance optimization
+ * for a very specific workload. It is up to the caller to ensure that
+ * SingleDelete is only used for a key that is not deleted using Delete() or
+ * written using Merge(). Mixing SingleDelete operations with Deletes and
+ * Merges can result in undefined behavior.
+ *
+ * @param key Key to delete within database
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ @Experimental("Performance optimization for a very specific workload")
+ public void singleDelete(final byte[] key) throws RocksDBException {
+ singleDelete(nativeHandle_, key, key.length);
+ }
+
+ /**
+ * Remove the database entry for {@code key}. Requires that the key exists
+ * and was not overwritten. It is not an error if the key did not exist
+ * in the database.
+ *
+ * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
+ * times), then the result of calling SingleDelete() on this key is undefined.
+ * SingleDelete() only behaves correctly if there has been only one Put()
+ * for this key since the previous call to SingleDelete() for this key.
+ *
+ * This feature is currently an experimental performance optimization
+ * for a very specific workload. It is up to the caller to ensure that
+ * SingleDelete is only used for a key that is not deleted using Delete() or
+ * written using Merge(). Mixing SingleDelete operations with Deletes and
+ * Merges can result in undefined behavior.
+ *
+ * @param columnFamilyHandle The column family to delete the key from
+ * @param key Key to delete within database
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ @Experimental("Performance optimization for a very specific workload")
+ public void singleDelete(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key) throws RocksDBException {
+ singleDelete(nativeHandle_, key, key.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Remove the database entry for {@code key}. Requires that the key exists
+ * and was not overwritten. It is not an error if the key did not exist
+ * in the database.
+ *
+ * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
+ * times), then the result of calling SingleDelete() on this key is undefined.
+ * SingleDelete() only behaves correctly if there has been only one Put()
+ * for this key since the previous call to SingleDelete() for this key.
+ *
+ * This feature is currently an experimental performance optimization
+ * for a very specific workload. It is up to the caller to ensure that
+ * SingleDelete is only used for a key that is not deleted using Delete() or
+ * written using Merge(). Mixing SingleDelete operations with Deletes and
+ * Merges can result in undefined behavior.
+ *
+ * Note: consider setting {@link WriteOptions#setSync(boolean)} true.
+ *
+ * @param writeOpt Write options for the delete
+ * @param key Key to delete within database
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ @Experimental("Performance optimization for a very specific workload")
+ public void singleDelete(final WriteOptions writeOpt, final byte[] key)
+ throws RocksDBException {
+ singleDelete(nativeHandle_, writeOpt.nativeHandle_, key, key.length);
+ }
+
+ /**
+ * Remove the database entry for {@code key}. Requires that the key exists
+ * and was not overwritten. It is not an error if the key did not exist
+ * in the database.
+ *
+ * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
+ * times), then the result of calling SingleDelete() on this key is undefined.
+ * SingleDelete() only behaves correctly if there has been only one Put()
+ * for this key since the previous call to SingleDelete() for this key.
+ *
+ * This feature is currently an experimental performance optimization
+ * for a very specific workload. It is up to the caller to ensure that
+ * SingleDelete is only used for a key that is not deleted using Delete() or
+ * written using Merge(). Mixing SingleDelete operations with Deletes and
+ * Merges can result in undefined behavior.
+ *
+ * Note: consider setting {@link WriteOptions#setSync(boolean)} true.
+ *
+ * @param columnFamilyHandle The column family to delete the key from
+ * @param writeOpt Write options for the delete
+ * @param key Key to delete within database
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ @Experimental("Performance optimization for a very specific workload")
+ public void singleDelete(final ColumnFamilyHandle columnFamilyHandle,
+ final WriteOptions writeOpt, final byte[] key) throws RocksDBException {
+ singleDelete(nativeHandle_, writeOpt.nativeHandle_, key, key.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+
+ /**
+ * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
+ * including "beginKey" and excluding "endKey". a non-OK status on error. It
+ * is not an error if no keys exist in the range ["beginKey", "endKey").
+ *
+ * Delete the database entry (if any) for "key". Returns OK on success, and a
+ * non-OK status on error. It is not an error if "key" did not exist in the
+ * database.
+ *
+ * @param beginKey First key to delete within database (inclusive)
+ * @param endKey Last key to delete within database (exclusive)
+ *
+ * @throws RocksDBException thrown if error happens in underlying native
+ * library.
+ */
+ public void deleteRange(final byte[] beginKey, final byte[] endKey)
+ throws RocksDBException {
+ deleteRange(nativeHandle_, beginKey, 0, beginKey.length, endKey, 0,
+ endKey.length);
+ }
+
+ /**
+ * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
+ * including "beginKey" and excluding "endKey". a non-OK status on error. It
+ * is not an error if no keys exist in the range ["beginKey", "endKey").
+ *
+ * Delete the database entry (if any) for "key". Returns OK on success, and a
+ * non-OK status on error. It is not an error if "key" did not exist in the
+ * database.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance
+ * @param beginKey First key to delete within database (inclusive)
+ * @param endKey Last key to delete within database (exclusive)
+ *
+ * @throws RocksDBException thrown if error happens in underlying native
+ * library.
+ */
+ public void deleteRange(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] beginKey, final byte[] endKey) throws RocksDBException {
+ deleteRange(nativeHandle_, beginKey, 0, beginKey.length, endKey, 0,
+ endKey.length, columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
+ * including "beginKey" and excluding "endKey". a non-OK status on error. It
+ * is not an error if no keys exist in the range ["beginKey", "endKey").
+ *
+ * Delete the database entry (if any) for "key". Returns OK on success, and a
+ * non-OK status on error. It is not an error if "key" did not exist in the
+ * database.
+ *
+ * @param writeOpt WriteOptions to be used with delete operation
+ * @param beginKey First key to delete within database (inclusive)
+ * @param endKey Last key to delete within database (exclusive)
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void deleteRange(final WriteOptions writeOpt, final byte[] beginKey,
+ final byte[] endKey) throws RocksDBException {
+ deleteRange(nativeHandle_, writeOpt.nativeHandle_, beginKey, 0,
+ beginKey.length, endKey, 0, endKey.length);
+ }
+
+ /**
+ * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
+ * including "beginKey" and excluding "endKey". a non-OK status on error. It
+ * is not an error if no keys exist in the range ["beginKey", "endKey").
+ *
+ * Delete the database entry (if any) for "key". Returns OK on success, and a
+ * non-OK status on error. It is not an error if "key" did not exist in the
+ * database.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance
+ * @param writeOpt WriteOptions to be used with delete operation
+ * @param beginKey First key to delete within database (included)
+ * @param endKey Last key to delete within database (excluded)
+ *
+ * @throws RocksDBException thrown if error happens in underlying native
+ * library.
+ */
+ public void deleteRange(final ColumnFamilyHandle columnFamilyHandle,
+ final WriteOptions writeOpt, final byte[] beginKey, final byte[] endKey)
+ throws RocksDBException {
+ deleteRange(nativeHandle_, writeOpt.nativeHandle_, beginKey, 0,
+ beginKey.length, endKey, 0, endKey.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+
+ /**
+ * Add merge operand for key/value pair.
+ *
+ * @param key the specified key to be merged.
+ * @param value the value to be merged with the current value for the
+ * specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void merge(final byte[] key, final byte[] value)
+ throws RocksDBException {
+ merge(nativeHandle_, key, 0, key.length, value, 0, value.length);
+ }
+
+ /**
+ * Add merge operand for key/value pair.
+ *
+ * @param key the specified key to be merged.
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
+ * @param value the value to be merged with the current value for the
+ * specified key.
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and must be non-negative and no larger than
+ * ("value".length - offset)
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @throws IndexOutOfBoundsException if an offset or length is out of bounds
+ */
+ public void merge(final byte[] key, int offset, int len, final byte[] value,
+ final int vOffset, final int vLen) throws RocksDBException {
+ checkBounds(offset, len, key.length);
+ checkBounds(vOffset, vLen, value.length);
+ merge(nativeHandle_, key, offset, len, value, vOffset, vLen);
+ }
+
+ /**
+ * Add merge operand for key/value pair in a ColumnFamily.
+ *
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param key the specified key to be merged.
+ * @param value the value to be merged with the current value for
+ * the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void merge(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key, final byte[] value) throws RocksDBException {
+ merge(nativeHandle_, key, 0, key.length, value, 0, value.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Add merge operand for key/value pair in a ColumnFamily.
+ *
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param key the specified key to be merged.
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
+ * @param value the value to be merged with the current value for
+ * the specified key.
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * must be non-negative and no larger than ("value".length - offset)
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @throws IndexOutOfBoundsException if an offset or length is out of bounds
+ */
+ public void merge(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key, final int offset, final int len, final byte[] value,
+ final int vOffset, final int vLen) throws RocksDBException {
+ checkBounds(offset, len, key.length);
+ checkBounds(vOffset, vLen, value.length);
+ merge(nativeHandle_, key, offset, len, value, vOffset, vLen,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Add merge operand for key/value pair.
+ *
+ * @param writeOpts {@link WriteOptions} for this write.
+ * @param key the specified key to be merged.
+ * @param value the value to be merged with the current value for
+ * the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void merge(final WriteOptions writeOpts, final byte[] key,
+ final byte[] value) throws RocksDBException {
+ merge(nativeHandle_, writeOpts.nativeHandle_,
+ key, 0, key.length, value, 0, value.length);
+ }
+
+ /**
+ * Add merge operand for key/value pair.
+ *
+ * @param writeOpts {@link WriteOptions} for this write.
+ * @param key the specified key to be merged.
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("value".length - offset)
+ * @param value the value to be merged with the current value for
+ * the specified key.
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and no larger than ("value".length - offset)
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @throws IndexOutOfBoundsException if an offset or length is out of bounds
+ */
+ public void merge(final WriteOptions writeOpts,
+ final byte[] key, final int offset, final int len,
+ final byte[] value, final int vOffset, final int vLen)
+ throws RocksDBException {
+ checkBounds(offset, len, key.length);
+ checkBounds(vOffset, vLen, value.length);
+ merge(nativeHandle_, writeOpts.nativeHandle_,
+ key, offset, len, value, vOffset, vLen);
+ }
+
+ /**
+ * Delete the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
+ *
+ * @param writeOpt WriteOptions to be used with delete operation
+ * @param key Key to delete within database. It is using position and limit.
+ * Supports direct buffer only.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void delete(final WriteOptions writeOpt, final ByteBuffer key) throws RocksDBException {
+ assert key.isDirect();
+ deleteDirect(nativeHandle_, writeOpt.nativeHandle_, key, key.position(), key.remaining(), 0);
+ key.position(key.limit());
+ }
+
+ /**
+ * Delete the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param writeOpt WriteOptions to be used with delete operation
+ * @param key Key to delete within database. It is using position and limit.
+ * Supports direct buffer only.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void delete(final ColumnFamilyHandle columnFamilyHandle, final WriteOptions writeOpt,
+ final ByteBuffer key) throws RocksDBException {
+ assert key.isDirect();
+ deleteDirect(nativeHandle_, writeOpt.nativeHandle_, key, key.position(), key.remaining(),
+ columnFamilyHandle.nativeHandle_);
+ key.position(key.limit());
+ }
+
+ /**
+ * Add merge operand for key/value pair.
+ *
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param writeOpts {@link WriteOptions} for this write.
+ * @param key the specified key to be merged.
+ * @param value the value to be merged with the current value for the
+ * specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void merge(final ColumnFamilyHandle columnFamilyHandle,
+ final WriteOptions writeOpts, final byte[] key, final byte[] value)
+ throws RocksDBException {
+ merge(nativeHandle_, writeOpts.nativeHandle_,
+ key, 0, key.length, value, 0, value.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Add merge operand for key/value pair.
+ *
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param writeOpts {@link WriteOptions} for this write.
+ * @param key the specified key to be merged.
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
+ * @param value the value to be merged with the current value for
+ * the specified key.
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and no larger than ("value".length - offset)
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @throws IndexOutOfBoundsException if an offset or length is out of bounds
+ */
+ public void merge(
+ final ColumnFamilyHandle columnFamilyHandle, final WriteOptions writeOpts,
+ final byte[] key, final int offset, final int len,
+ final byte[] value, final int vOffset, final int vLen)
+ throws RocksDBException {
+ checkBounds(offset, len, key.length);
+ checkBounds(vOffset, vLen, value.length);
+ merge(nativeHandle_, writeOpts.nativeHandle_,
+ key, offset, len, value, vOffset, vLen,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Apply the specified updates to the database.
+ *
+ * @param writeOpts WriteOptions instance
+ * @param updates WriteBatch instance
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void write(final WriteOptions writeOpts, final WriteBatch updates)
+ throws RocksDBException {
+ write0(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_);
+ }
+
+ /**
+ * Apply the specified updates to the database.
+ *
+ * @param writeOpts WriteOptions instance
+ * @param updates WriteBatchWithIndex instance
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void write(final WriteOptions writeOpts,
+ final WriteBatchWithIndex updates) throws RocksDBException {
+ write1(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_);
+ }
+
+ // TODO(AR) we should improve the #get() API, returning -1 (RocksDB.NOT_FOUND) is not very nice
+ // when we could communicate better status into, also the C++ code show that -2 could be returned
+
+ /**
+ * Get the value associated with the specified key within column family*
+ *
+ * @param key the key to retrieve the value.
+ * @param value the out-value to receive the retrieved value.
+ *
+ * @return The size of the actual value that matches the specified
+ * {@code key} in byte. If the return value is greater than the
+ * length of {@code value}, then it indicates that the size of the
+ * input buffer {@code value} is insufficient and partial result will
+ * be returned. RocksDB.NOT_FOUND will be returned if the value not
+ * found.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public int get(final byte[] key, final byte[] value) throws RocksDBException {
+ return get(nativeHandle_, key, 0, key.length, value, 0, value.length);
+ }
+
+ /**
+ * Get the value associated with the specified key within column family*
+ *
+ * @param key the key to retrieve the value.
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
+ * @param value the out-value to receive the retrieved value.
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "value".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and and no larger than ("value".length - offset)
+ *
+ * @return The size of the actual value that matches the specified
+ * {@code key} in byte. If the return value is greater than the
+ * length of {@code value}, then it indicates that the size of the
+ * input buffer {@code value} is insufficient and partial result will
+ * be returned. RocksDB.NOT_FOUND will be returned if the value not
+ * found.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public int get(final byte[] key, final int offset, final int len,
+ final byte[] value, final int vOffset, final int vLen)
+ throws RocksDBException {
+ checkBounds(offset, len, key.length);
+ checkBounds(vOffset, vLen, value.length);
+ return get(nativeHandle_, key, offset, len, value, vOffset, vLen);
+ }
+
+ /**
+ * Get the value associated with the specified key within column family.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param key the key to retrieve the value.
+ * @param value the out-value to receive the retrieved value.
+ * @return The size of the actual value that matches the specified
+ * {@code key} in byte. If the return value is greater than the
+ * length of {@code value}, then it indicates that the size of the
+ * input buffer {@code value} is insufficient and partial result will
+ * be returned. RocksDB.NOT_FOUND will be returned if the value not
+ * found.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
+ final byte[] value) throws RocksDBException, IllegalArgumentException {
+ return get(nativeHandle_, key, 0, key.length, value, 0, value.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Get the value associated with the specified key within column family.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param key the key to retrieve the value.
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * an no larger than ("key".length - offset)
+ * @param value the out-value to receive the retrieved value.
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and no larger than ("value".length - offset)
+ *
+ * @return The size of the actual value that matches the specified
+ * {@code key} in byte. If the return value is greater than the
+ * length of {@code value}, then it indicates that the size of the
+ * input buffer {@code value} is insufficient and partial result will
+ * be returned. RocksDB.NOT_FOUND will be returned if the value not
+ * found.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
+ final int offset, final int len, final byte[] value, final int vOffset,
+ final int vLen) throws RocksDBException, IllegalArgumentException {
+ checkBounds(offset, len, key.length);
+ checkBounds(vOffset, vLen, value.length);
+ return get(nativeHandle_, key, offset, len, value, vOffset, vLen,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Get the value associated with the specified key.
+ *
+ * @param opt {@link org.rocksdb.ReadOptions} instance.
+ * @param key the key to retrieve the value.
+ * @param value the out-value to receive the retrieved value.
+ * @return The size of the actual value that matches the specified
+ * {@code key} in byte. If the return value is greater than the
+ * length of {@code value}, then it indicates that the size of the
+ * input buffer {@code value} is insufficient and partial result will
+ * be returned. RocksDB.NOT_FOUND will be returned if the value not
+ * found.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public int get(final ReadOptions opt, final byte[] key,
+ final byte[] value) throws RocksDBException {
+ return get(nativeHandle_, opt.nativeHandle_,
+ key, 0, key.length, value, 0, value.length);
+ }
+
+ /**
+ * Get the value associated with the specified key.
+ *
+ * @param opt {@link org.rocksdb.ReadOptions} instance.
+ * @param key the key to retrieve the value.
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
+ * @param value the out-value to receive the retrieved value.
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and no larger than ("value".length - offset)
+ * @return The size of the actual value that matches the specified
+ * {@code key} in byte. If the return value is greater than the
+ * length of {@code value}, then it indicates that the size of the
+ * input buffer {@code value} is insufficient and partial result will
+ * be returned. RocksDB.NOT_FOUND will be returned if the value not
+ * found.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public int get(final ReadOptions opt, final byte[] key, final int offset,
+ final int len, final byte[] value, final int vOffset, final int vLen)
+ throws RocksDBException {
+ checkBounds(offset, len, key.length);
+ checkBounds(vOffset, vLen, value.length);
+ return get(nativeHandle_, opt.nativeHandle_,
+ key, offset, len, value, vOffset, vLen);
+ }
+
+ /**
+ * Get the value associated with the specified key within column family.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param opt {@link org.rocksdb.ReadOptions} instance.
+ * @param key the key to retrieve the value.
+ * @param value the out-value to receive the retrieved value.
+ * @return The size of the actual value that matches the specified
+ * {@code key} in byte. If the return value is greater than the
+ * length of {@code value}, then it indicates that the size of the
+ * input buffer {@code value} is insufficient and partial result will
+ * be returned. RocksDB.NOT_FOUND will be returned if the value not
+ * found.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public int get(final ColumnFamilyHandle columnFamilyHandle,
+ final ReadOptions opt, final byte[] key, final byte[] value)
+ throws RocksDBException {
+ return get(nativeHandle_, opt.nativeHandle_, key, 0, key.length, value,
+ 0, value.length, columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Get the value associated with the specified key within column family.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param opt {@link org.rocksdb.ReadOptions} instance.
+ * @param key the key to retrieve the value.
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be
+ * non-negative and and no larger than ("key".length - offset)
+ * @param value the out-value to receive the retrieved value.
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, and must be
+ * non-negative and no larger than ("value".length - offset)
+ * @return The size of the actual value that matches the specified
+ * {@code key} in byte. If the return value is greater than the
+ * length of {@code value}, then it indicates that the size of the
+ * input buffer {@code value} is insufficient and partial result will
+ * be returned. RocksDB.NOT_FOUND will be returned if the value not
+ * found.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public int get(final ColumnFamilyHandle columnFamilyHandle,
+ final ReadOptions opt, final byte[] key, final int offset, final int len,
+ final byte[] value, final int vOffset, final int vLen)
+ throws RocksDBException {
+ checkBounds(offset, len, key.length);
+ checkBounds(vOffset, vLen, value.length);
+ return get(nativeHandle_, opt.nativeHandle_, key, offset, len, value,
+ vOffset, vLen, columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * The simplified version of get which returns a new byte array storing
+ * the value associated with the specified input key if any. null will be
+ * returned if the specified key is not found.
+ *
+ * @param key the key retrieve the value.
+ * @return a byte array storing the value associated with the input key if
+ * any. null if it does not find the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public byte[] get(final byte[] key) throws RocksDBException {
+ return get(nativeHandle_, key, 0, key.length);
+ }
+
+ /**
+ * The simplified version of get which returns a new byte array storing
+ * the value associated with the specified input key if any. null will be
+ * returned if the specified key is not found.
+ *
+ * @param key the key retrieve the value.
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
+ * @return a byte array storing the value associated with the input key if
+ * any. null if it does not find the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public byte[] get(final byte[] key, final int offset,
+ final int len) throws RocksDBException {
+ checkBounds(offset, len, key.length);
+ return get(nativeHandle_, key, offset, len);
+ }
+
+ /**
+ * The simplified version of get which returns a new byte array storing
+ * the value associated with the specified input key if any. null will be
+ * returned if the specified key is not found.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param key the key retrieve the value.
+ * @return a byte array storing the value associated with the input key if
+ * any. null if it does not find the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public byte[] get(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key) throws RocksDBException {
+ return get(nativeHandle_, key, 0, key.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * The simplified version of get which returns a new byte array storing
+ * the value associated with the specified input key if any. null will be
+ * returned if the specified key is not found.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param key the key retrieve the value.
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
+ * @return a byte array storing the value associated with the input key if
+ * any. null if it does not find the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public byte[] get(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key, final int offset, final int len)
+ throws RocksDBException {
+ checkBounds(offset, len, key.length);
+ return get(nativeHandle_, key, offset, len,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * The simplified version of get which returns a new byte array storing
+ * the value associated with the specified input key if any. null will be
+ * returned if the specified key is not found.
+ *
+ * @param key the key retrieve the value.
+ * @param opt Read options.
+ * @return a byte array storing the value associated with the input key if
+ * any. null if it does not find the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public byte[] get(final ReadOptions opt, final byte[] key)
+ throws RocksDBException {
+ return get(nativeHandle_, opt.nativeHandle_, key, 0, key.length);
+ }
+
+ /**
+ * The simplified version of get which returns a new byte array storing
+ * the value associated with the specified input key if any. null will be
+ * returned if the specified key is not found.
+ *
+ * @param key the key retrieve the value.
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
+ * @param opt Read options.
+ * @return a byte array storing the value associated with the input key if
+ * any. null if it does not find the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public byte[] get(final ReadOptions opt, final byte[] key, final int offset,
+ final int len) throws RocksDBException {
+ checkBounds(offset, len, key.length);
+ return get(nativeHandle_, opt.nativeHandle_, key, offset, len);
+ }
+
+ /**
+ * The simplified version of get which returns a new byte array storing
+ * the value associated with the specified input key if any. null will be
+ * returned if the specified key is not found.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param key the key retrieve the value.
+ * @param opt Read options.
+ * @return a byte array storing the value associated with the input key if
+ * any. null if it does not find the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public byte[] get(final ColumnFamilyHandle columnFamilyHandle,
+ final ReadOptions opt, final byte[] key) throws RocksDBException {
+ return get(nativeHandle_, opt.nativeHandle_, key, 0, key.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * The simplified version of get which returns a new byte array storing
+ * the value associated with the specified input key if any. null will be
+ * returned if the specified key is not found.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param key the key retrieve the value.
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
+ * @param opt Read options.
+ * @return a byte array storing the value associated with the input key if
+ * any. null if it does not find the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public byte[] get(final ColumnFamilyHandle columnFamilyHandle,
+ final ReadOptions opt, final byte[] key, final int offset, final int len)
+ throws RocksDBException {
+ checkBounds(offset, len, key.length);
+ return get(nativeHandle_, opt.nativeHandle_, key, offset, len,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Returns a map of keys for which values were found in DB.
+ *
+ * @param keys List of keys for which values need to be retrieved.
+ * @return Map where key of map is the key passed by user and value for map
+ * entry is the corresponding value in DB.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ *
+ * @deprecated Consider {@link #multiGetAsList(List)} instead.
+ */
+ @Deprecated
+ public Map<byte[], byte[]> multiGet(final List<byte[]> keys)
+ throws RocksDBException {
+ assert(keys.size() != 0);
+
+ final byte[][] keysArray = keys.toArray(new byte[0][]);
+ final int keyOffsets[] = new int[keysArray.length];
+ final int keyLengths[] = new int[keysArray.length];
+ for(int i = 0; i < keyLengths.length; i++) {
+ keyLengths[i] = keysArray[i].length;
+ }
+
+ final byte[][] values = multiGet(nativeHandle_, keysArray, keyOffsets,
+ keyLengths);
+
+ final Map<byte[], byte[]> keyValueMap =
+ new HashMap<>(computeCapacityHint(values.length));
+ for(int i = 0; i < values.length; i++) {
+ if(values[i] == null) {
+ continue;
+ }
+
+ keyValueMap.put(keys.get(i), values[i]);
+ }
+
+ return keyValueMap;
+ }
+
+ /**
+ * Returns a map of keys for which values were found in DB.
+ * <p>
+ * Note: Every key needs to have a related column family name in
+ * {@code columnFamilyHandleList}.
+ * </p>
+ *
+ * @param columnFamilyHandleList {@link java.util.List} containing
+ * {@link org.rocksdb.ColumnFamilyHandle} instances.
+ * @param keys List of keys for which values need to be retrieved.
+ * @return Map where key of map is the key passed by user and value for map
+ * entry is the corresponding value in DB.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @throws IllegalArgumentException thrown if the size of passed keys is not
+ * equal to the amount of passed column family handles.
+ *
+ * @deprecated Consider {@link #multiGetAsList(List, List)} instead.
+ */
+ @Deprecated
+ public Map<byte[], byte[]> multiGet(
+ final List<ColumnFamilyHandle> columnFamilyHandleList,
+ final List<byte[]> keys) throws RocksDBException,
+ IllegalArgumentException {
+ assert(keys.size() != 0);
+ // Check if key size equals cfList size. If not a exception must be
+ // thrown. If not a Segmentation fault happens.
+ if (keys.size() != columnFamilyHandleList.size()) {
+ throw new IllegalArgumentException(
+ "For each key there must be a ColumnFamilyHandle.");
+ }
+ final long[] cfHandles = new long[columnFamilyHandleList.size()];
+ for (int i = 0; i < columnFamilyHandleList.size(); i++) {
+ cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
+ }
+
+ final byte[][] keysArray = keys.toArray(new byte[0][]);
+ final int keyOffsets[] = new int[keysArray.length];
+ final int keyLengths[] = new int[keysArray.length];
+ for(int i = 0; i < keyLengths.length; i++) {
+ keyLengths[i] = keysArray[i].length;
+ }
+
+ final byte[][] values = multiGet(nativeHandle_, keysArray, keyOffsets,
+ keyLengths, cfHandles);
+
+ final Map<byte[], byte[]> keyValueMap =
+ new HashMap<>(computeCapacityHint(values.length));
+ for(int i = 0; i < values.length; i++) {
+ if (values[i] == null) {
+ continue;
+ }
+ keyValueMap.put(keys.get(i), values[i]);
+ }
+ return keyValueMap;
+ }
+
+ /**
+ * Returns a map of keys for which values were found in DB.
+ *
+ * @param opt Read options.
+ * @param keys of keys for which values need to be retrieved.
+ * @return Map where key of map is the key passed by user and value for map
+ * entry is the corresponding value in DB.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ *
+ * @deprecated Consider {@link #multiGetAsList(ReadOptions, List)} instead.
+ */
+ @Deprecated
+ public Map<byte[], byte[]> multiGet(final ReadOptions opt,
+ final List<byte[]> keys) throws RocksDBException {
+ assert(keys.size() != 0);
+
+ final byte[][] keysArray = keys.toArray(new byte[0][]);
+ final int keyOffsets[] = new int[keysArray.length];
+ final int keyLengths[] = new int[keysArray.length];
+ for(int i = 0; i < keyLengths.length; i++) {
+ keyLengths[i] = keysArray[i].length;
+ }
+
+ final byte[][] values = multiGet(nativeHandle_, opt.nativeHandle_,
+ keysArray, keyOffsets, keyLengths);
+
+ final Map<byte[], byte[]> keyValueMap =
+ new HashMap<>(computeCapacityHint(values.length));
+ for(int i = 0; i < values.length; i++) {
+ if(values[i] == null) {
+ continue;
+ }
+
+ keyValueMap.put(keys.get(i), values[i]);
+ }
+
+ return keyValueMap;
+ }
+
+ /**
+ * Returns a map of keys for which values were found in DB.
+ * <p>
+ * Note: Every key needs to have a related column family name in
+ * {@code columnFamilyHandleList}.
+ * </p>
+ *
+ * @param opt Read options.
+ * @param columnFamilyHandleList {@link java.util.List} containing
+ * {@link org.rocksdb.ColumnFamilyHandle} instances.
+ * @param keys of keys for which values need to be retrieved.
+ * @return Map where key of map is the key passed by user and value for map
+ * entry is the corresponding value in DB.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @throws IllegalArgumentException thrown if the size of passed keys is not
+ * equal to the amount of passed column family handles.
+ *
+ * @deprecated Consider {@link #multiGetAsList(ReadOptions, List, List)}
+ * instead.
+ */
+ @Deprecated
+ public Map<byte[], byte[]> multiGet(final ReadOptions opt,
+ final List<ColumnFamilyHandle> columnFamilyHandleList,
+ final List<byte[]> keys) throws RocksDBException {
+ assert(keys.size() != 0);
+ // Check if key size equals cfList size. If not a exception must be
+ // thrown. If not a Segmentation fault happens.
+ if (keys.size()!=columnFamilyHandleList.size()){
+ throw new IllegalArgumentException(
+ "For each key there must be a ColumnFamilyHandle.");
+ }
+ final long[] cfHandles = new long[columnFamilyHandleList.size()];
+ for (int i = 0; i < columnFamilyHandleList.size(); i++) {
+ cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
+ }
+
+ final byte[][] keysArray = keys.toArray(new byte[0][]);
+ final int keyOffsets[] = new int[keysArray.length];
+ final int keyLengths[] = new int[keysArray.length];
+ for(int i = 0; i < keyLengths.length; i++) {
+ keyLengths[i] = keysArray[i].length;
+ }
+
+ final byte[][] values = multiGet(nativeHandle_, opt.nativeHandle_,
+ keysArray, keyOffsets, keyLengths, cfHandles);
+
+ final Map<byte[], byte[]> keyValueMap
+ = new HashMap<>(computeCapacityHint(values.length));
+ for(int i = 0; i < values.length; i++) {
+ if(values[i] == null) {
+ continue;
+ }
+ keyValueMap.put(keys.get(i), values[i]);
+ }
+
+ return keyValueMap;
+ }
+
+ /**
+ * Takes a list of keys, and returns a list of values for the given list of
+ * keys. List will contain null for keys which could not be found.
+ *
+ * @param keys List of keys for which values need to be retrieved.
+ * @return List of values for the given list of keys. List will contain
+ * null for keys which could not be found.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public List<byte[]> multiGetAsList(final List<byte[]> keys)
+ throws RocksDBException {
+ assert(keys.size() != 0);
+
+ final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
+ final int keyOffsets[] = new int[keysArray.length];
+ final int keyLengths[] = new int[keysArray.length];
+ for(int i = 0; i < keyLengths.length; i++) {
+ keyLengths[i] = keysArray[i].length;
+ }
+
+ return Arrays.asList(multiGet(nativeHandle_, keysArray, keyOffsets,
+ keyLengths));
+ }
+
+ /**
+ * Returns a list of values for the given list of keys. List will contain
+ * null for keys which could not be found.
+ * <p>
+ * Note: Every key needs to have a related column family name in
+ * {@code columnFamilyHandleList}.
+ * </p>
+ *
+ * @param columnFamilyHandleList {@link java.util.List} containing
+ * {@link org.rocksdb.ColumnFamilyHandle} instances.
+ * @param keys List of keys for which values need to be retrieved.
+ * @return List of values for the given list of keys. List will contain
+ * null for keys which could not be found.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @throws IllegalArgumentException thrown if the size of passed keys is not
+ * equal to the amount of passed column family handles.
+ */
+ public List<byte[]> multiGetAsList(
+ final List<ColumnFamilyHandle> columnFamilyHandleList,
+ final List<byte[]> keys) throws RocksDBException,
+ IllegalArgumentException {
+ assert(keys.size() != 0);
+ // Check if key size equals cfList size. If not a exception must be
+ // thrown. If not a Segmentation fault happens.
+ if (keys.size() != columnFamilyHandleList.size()) {
+ throw new IllegalArgumentException(
+ "For each key there must be a ColumnFamilyHandle.");
+ }
+ final long[] cfHandles = new long[columnFamilyHandleList.size()];
+ for (int i = 0; i < columnFamilyHandleList.size(); i++) {
+ cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
+ }
+
+ final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
+ final int keyOffsets[] = new int[keysArray.length];
+ final int keyLengths[] = new int[keysArray.length];
+ for(int i = 0; i < keyLengths.length; i++) {
+ keyLengths[i] = keysArray[i].length;
+ }
+
+ return Arrays.asList(multiGet(nativeHandle_, keysArray, keyOffsets,
+ keyLengths, cfHandles));
+ }
+
+ /**
+ * Returns a list of values for the given list of keys. List will contain
+ * null for keys which could not be found.
+ *
+ * @param opt Read options.
+ * @param keys of keys for which values need to be retrieved.
+ * @return List of values for the given list of keys. List will contain
+ * null for keys which could not be found.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public List<byte[]> multiGetAsList(final ReadOptions opt,
+ final List<byte[]> keys) throws RocksDBException {
+ assert(keys.size() != 0);
+
+ final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
+ final int keyOffsets[] = new int[keysArray.length];
+ final int keyLengths[] = new int[keysArray.length];
+ for(int i = 0; i < keyLengths.length; i++) {
+ keyLengths[i] = keysArray[i].length;
+ }
+
+ return Arrays.asList(multiGet(nativeHandle_, opt.nativeHandle_,
+ keysArray, keyOffsets, keyLengths));
+ }
+
+ /**
+ * Returns a list of values for the given list of keys. List will contain
+ * null for keys which could not be found.
+ * <p>
+ * Note: Every key needs to have a related column family name in
+ * {@code columnFamilyHandleList}.
+ * </p>
+ *
+ * @param opt Read options.
+ * @param columnFamilyHandleList {@link java.util.List} containing
+ * {@link org.rocksdb.ColumnFamilyHandle} instances.
+ * @param keys of keys for which values need to be retrieved.
+ * @return List of values for the given list of keys. List will contain
+ * null for keys which could not be found.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @throws IllegalArgumentException thrown if the size of passed keys is not
+ * equal to the amount of passed column family handles.
+ */
+ public List<byte[]> multiGetAsList(final ReadOptions opt,
+ final List<ColumnFamilyHandle> columnFamilyHandleList,
+ final List<byte[]> keys) throws RocksDBException {
+ assert(keys.size() != 0);
+ // Check if key size equals cfList size. If not a exception must be
+ // thrown. If not a Segmentation fault happens.
+ if (keys.size()!=columnFamilyHandleList.size()){
+ throw new IllegalArgumentException(
+ "For each key there must be a ColumnFamilyHandle.");
+ }
+ final long[] cfHandles = new long[columnFamilyHandleList.size()];
+ for (int i = 0; i < columnFamilyHandleList.size(); i++) {
+ cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
+ }
+
+ final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
+ final int keyOffsets[] = new int[keysArray.length];
+ final int keyLengths[] = new int[keysArray.length];
+ for(int i = 0; i < keyLengths.length; i++) {
+ keyLengths[i] = keysArray[i].length;
+ }
+
+ return Arrays.asList(multiGet(nativeHandle_, opt.nativeHandle_,
+ keysArray, keyOffsets, keyLengths, cfHandles));
+ }
+
+ /**
+ * If the key definitely does not exist in the database, then this method
+ * returns null, else it returns an instance of KeyMayExistResult
+ *
+ * If the caller wants to obtain value when the key
+ * is found in memory, then {@code valueHolder} must be set.
+ *
+ * This check is potentially lighter-weight than invoking
+ * {@link #get(byte[])}. One way to make this lighter weight is to avoid
+ * doing any IOs.
+ *
+ * @param key byte array of a key to search for
+ * @param valueHolder non-null to retrieve the value if it is found, or null
+ * if the value is not needed. If non-null, upon return of the function,
+ * the {@code value} will be set if it could be retrieved.
+ *
+ * @return false if the key definitely does not exist in the database,
+ * otherwise true.
+ */
+ public boolean keyMayExist(final byte[] key,
+ /* @Nullable */ final Holder<byte[]> valueHolder) {
+ return keyMayExist(key, 0, key.length, valueHolder);
+ }
+
+ /**
+ * If the key definitely does not exist in the database, then this method
+ * returns null, else it returns an instance of KeyMayExistResult
+ *
+ * If the caller wants to obtain value when the key
+ * is found in memory, then {@code valueHolder} must be set.
+ *
+ * This check is potentially lighter-weight than invoking
+ * {@link #get(byte[], int, int)}. One way to make this lighter weight is to
+ * avoid doing any IOs.
+ *
+ * @param key byte array of a key to search for
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than "key".length
+ * @param valueHolder non-null to retrieve the value if it is found, or null
+ * if the value is not needed. If non-null, upon return of the function,
+ * the {@code value} will be set if it could be retrieved.
+ *
+ * @return false if the key definitely does not exist in the database,
+ * otherwise true.
+ */
+ public boolean keyMayExist(final byte[] key,
+ final int offset, final int len,
+ /* @Nullable */ final Holder<byte[]> valueHolder) {
+ return keyMayExist((ColumnFamilyHandle)null, key, offset, len, valueHolder);
+ }
+
+ /**
+ * If the key definitely does not exist in the database, then this method
+ * returns null, else it returns an instance of KeyMayExistResult
+ *
+ * If the caller wants to obtain value when the key
+ * is found in memory, then {@code valueHolder} must be set.
+ *
+ * This check is potentially lighter-weight than invoking
+ * {@link #get(ColumnFamilyHandle,byte[])}. One way to make this lighter
+ * weight is to avoid doing any IOs.
+ *
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param key byte array of a key to search for
+ * @param valueHolder non-null to retrieve the value if it is found, or null
+ * if the value is not needed. If non-null, upon return of the function,
+ * the {@code value} will be set if it could be retrieved.
+ *
+ * @return false if the key definitely does not exist in the database,
+ * otherwise true.
+ */
+ public boolean keyMayExist(
+ final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
+ /* @Nullable */ final Holder<byte[]> valueHolder) {
+ return keyMayExist(columnFamilyHandle, key, 0, key.length,
+ valueHolder);
+ }
+
+ /**
+ * If the key definitely does not exist in the database, then this method
+ * returns null, else it returns an instance of KeyMayExistResult
+ *
+ * If the caller wants to obtain value when the key
+ * is found in memory, then {@code valueHolder} must be set.
+ *
+ * This check is potentially lighter-weight than invoking
+ * {@link #get(ColumnFamilyHandle, byte[], int, int)}. One way to make this
+ * lighter weight is to avoid doing any IOs.
+ *
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param key byte array of a key to search for
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than "key".length
+ * @param valueHolder non-null to retrieve the value if it is found, or null
+ * if the value is not needed. If non-null, upon return of the function,
+ * the {@code value} will be set if it could be retrieved.
+ *
+ * @return false if the key definitely does not exist in the database,
+ * otherwise true.
+ */
+ public boolean keyMayExist(
+ final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key, int offset, int len,
+ /* @Nullable */ final Holder<byte[]> valueHolder) {
+ return keyMayExist(columnFamilyHandle, null, key, offset, len,
+ valueHolder);
+ }
+
+ /**
+ * If the key definitely does not exist in the database, then this method
+ * returns null, else it returns an instance of KeyMayExistResult
+ *
+ * If the caller wants to obtain value when the key
+ * is found in memory, then {@code valueHolder} must be set.
+ *
+ * This check is potentially lighter-weight than invoking
+ * {@link #get(ReadOptions, byte[])}. One way to make this
+ * lighter weight is to avoid doing any IOs.
+ *
+ * @param readOptions {@link ReadOptions} instance
+ * @param key byte array of a key to search for
+ * @param valueHolder non-null to retrieve the value if it is found, or null
+ * if the value is not needed. If non-null, upon return of the function,
+ * the {@code value} will be set if it could be retrieved.
+ *
+ * @return false if the key definitely does not exist in the database,
+ * otherwise true.
+ */
+ public boolean keyMayExist(
+ final ReadOptions readOptions, final byte[] key,
+ /* @Nullable */ final Holder<byte[]> valueHolder) {
+ return keyMayExist(readOptions, key, 0, key.length,
+ valueHolder);
+ }
+
+ /**
+ * If the key definitely does not exist in the database, then this method
+ * returns null, else it returns an instance of KeyMayExistResult
+ *
+ * If the caller wants to obtain value when the key
+ * is found in memory, then {@code valueHolder} must be set.
+ *
+ * This check is potentially lighter-weight than invoking
+ * {@link #get(ReadOptions, byte[], int, int)}. One way to make this
+ * lighter weight is to avoid doing any IOs.
+ *
+ * @param readOptions {@link ReadOptions} instance
+ * @param key byte array of a key to search for
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than "key".length
+ * @param valueHolder non-null to retrieve the value if it is found, or null
+ * if the value is not needed. If non-null, upon return of the function,
+ * the {@code value} will be set if it could be retrieved.
+ *
+ * @return false if the key definitely does not exist in the database,
+ * otherwise true.
+ */
+ public boolean keyMayExist(
+ final ReadOptions readOptions,
+ final byte[] key, final int offset, final int len,
+ /* @Nullable */ final Holder<byte[]> valueHolder) {
+ return keyMayExist(null, readOptions,
+ key, offset, len, valueHolder);
+ }
+
+ /**
+ * If the key definitely does not exist in the database, then this method
+ * returns null, else it returns an instance of KeyMayExistResult
+ *
+ * If the caller wants to obtain value when the key
+ * is found in memory, then {@code valueHolder} must be set.
+ *
+ * This check is potentially lighter-weight than invoking
+ * {@link #get(ColumnFamilyHandle, ReadOptions, byte[])}. One way to make this
+ * lighter weight is to avoid doing any IOs.
+ *
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param readOptions {@link ReadOptions} instance
+ * @param key byte array of a key to search for
+ * @param valueHolder non-null to retrieve the value if it is found, or null
+ * if the value is not needed. If non-null, upon return of the function,
+ * the {@code value} will be set if it could be retrieved.
+ *
+ * @return false if the key definitely does not exist in the database,
+ * otherwise true.
+ */
+ public boolean keyMayExist(
+ final ColumnFamilyHandle columnFamilyHandle,
+ final ReadOptions readOptions, final byte[] key,
+ /* @Nullable */ final Holder<byte[]> valueHolder) {
+ return keyMayExist(columnFamilyHandle, readOptions,
+ key, 0, key.length, valueHolder);
+ }
+
+ /**
+ * If the key definitely does not exist in the database, then this method
+ * returns null, else it returns an instance of KeyMayExistResult
+ *
+ * If the caller wants to obtain value when the key
+ * is found in memory, then {@code valueHolder} must be set.
+ *
+ * This check is potentially lighter-weight than invoking
+ * {@link #get(ColumnFamilyHandle, ReadOptions, byte[], int, int)}.
+ * One way to make this lighter weight is to avoid doing any IOs.
+ *
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param readOptions {@link ReadOptions} instance
+ * @param key byte array of a key to search for
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than "key".length
+ * @param valueHolder non-null to retrieve the value if it is found, or null
+ * if the value is not needed. If non-null, upon return of the function,
+ * the {@code value} will be set if it could be retrieved.
+ *
+ * @return false if the key definitely does not exist in the database,
+ * otherwise true.
+ */
+ public boolean keyMayExist(
+ final ColumnFamilyHandle columnFamilyHandle,
+ final ReadOptions readOptions,
+ final byte[] key, final int offset, final int len,
+ /* @Nullable */ final Holder<byte[]> valueHolder) {
+ checkBounds(offset, len, key.length);
+ if (valueHolder == null) {
+ return keyMayExist(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_,
+ readOptions == null ? 0 : readOptions.nativeHandle_,
+ key, offset, len);
+ } else {
+ final byte[][] result = keyMayExistFoundValue(
+ nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_,
+ readOptions == null ? 0 : readOptions.nativeHandle_,
+ key, offset, len);
+ if (result[0][0] == 0x0) {
+ valueHolder.setValue(null);
+ return false;
+ } else if (result[0][0] == 0x1) {
+ valueHolder.setValue(null);
+ return true;
+ } else {
+ valueHolder.setValue(result[1]);
+ return true;
+ }
+ }
+ }
+
+ /**
+ * <p>Return a heap-allocated iterator over the contents of the
+ * database. The result of newIterator() is initially invalid
+ * (caller must call one of the Seek methods on the iterator
+ * before using it).</p>
+ *
+ * <p>Caller should close the iterator when it is no longer needed.
+ * The returned iterator should be closed before this db is closed.
+ * </p>
+ *
+ * @return instance of iterator object.
+ */
+ public RocksIterator newIterator() {
+ return new RocksIterator(this, iterator(nativeHandle_));
+ }
+
+ /**
+ * <p>Return a heap-allocated iterator over the contents of the
+ * database. The result of newIterator() is initially invalid
+ * (caller must call one of the Seek methods on the iterator
+ * before using it).</p>
+ *
+ * <p>Caller should close the iterator when it is no longer needed.
+ * The returned iterator should be closed before this db is closed.
+ * </p>
+ *
+ * @param readOptions {@link ReadOptions} instance.
+ * @return instance of iterator object.
+ */
+ public RocksIterator newIterator(final ReadOptions readOptions) {
+ return new RocksIterator(this, iterator(nativeHandle_,
+ readOptions.nativeHandle_));
+ }
+
+ /**
+ * <p>Return a heap-allocated iterator over the contents of the
+ * database. The result of newIterator() is initially invalid
+ * (caller must call one of the Seek methods on the iterator
+ * before using it).</p>
+ *
+ * <p>Caller should close the iterator when it is no longer needed.
+ * The returned iterator should be closed before this db is closed.
+ * </p>
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @return instance of iterator object.
+ */
+ public RocksIterator newIterator(
+ final ColumnFamilyHandle columnFamilyHandle) {
+ return new RocksIterator(this, iteratorCF(nativeHandle_,
+ columnFamilyHandle.nativeHandle_));
+ }
+
+ /**
+ * <p>Return a heap-allocated iterator over the contents of the
+ * database. The result of newIterator() is initially invalid
+ * (caller must call one of the Seek methods on the iterator
+ * before using it).</p>
+ *
+ * <p>Caller should close the iterator when it is no longer needed.
+ * The returned iterator should be closed before this db is closed.
+ * </p>
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param readOptions {@link ReadOptions} instance.
+ * @return instance of iterator object.
+ */
+ public RocksIterator newIterator(final ColumnFamilyHandle columnFamilyHandle,
+ final ReadOptions readOptions) {
+ return new RocksIterator(this, iteratorCF(nativeHandle_,
+ columnFamilyHandle.nativeHandle_, readOptions.nativeHandle_));
+ }
+
+ /**
+ * Returns iterators from a consistent database state across multiple
+ * column families. Iterators are heap allocated and need to be deleted
+ * before the db is deleted
+ *
+ * @param columnFamilyHandleList {@link java.util.List} containing
+ * {@link org.rocksdb.ColumnFamilyHandle} instances.
+ * @return {@link java.util.List} containing {@link org.rocksdb.RocksIterator}
+ * instances
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public List<RocksIterator> newIterators(
+ final List<ColumnFamilyHandle> columnFamilyHandleList)
+ throws RocksDBException {
+ return newIterators(columnFamilyHandleList, new ReadOptions());
+ }
+
+ /**
+ * Returns iterators from a consistent database state across multiple
+ * column families. Iterators are heap allocated and need to be deleted
+ * before the db is deleted
+ *
+ * @param columnFamilyHandleList {@link java.util.List} containing
+ * {@link org.rocksdb.ColumnFamilyHandle} instances.
+ * @param readOptions {@link ReadOptions} instance.
+ * @return {@link java.util.List} containing {@link org.rocksdb.RocksIterator}
+ * instances
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public List<RocksIterator> newIterators(
+ final List<ColumnFamilyHandle> columnFamilyHandleList,
+ final ReadOptions readOptions) throws RocksDBException {
+
+ final long[] columnFamilyHandles = new long[columnFamilyHandleList.size()];
+ for (int i = 0; i < columnFamilyHandleList.size(); i++) {
+ columnFamilyHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
+ }
+
+ final long[] iteratorRefs = iterators(nativeHandle_, columnFamilyHandles,
+ readOptions.nativeHandle_);
+
+ final List<RocksIterator> iterators = new ArrayList<>(
+ columnFamilyHandleList.size());
+ for (int i=0; i<columnFamilyHandleList.size(); i++){
+ iterators.add(new RocksIterator(this, iteratorRefs[i]));
+ }
+ return iterators;
+ }
+
+
+ /**
+ * <p>Return a handle to the current DB state. Iterators created with
+ * this handle will all observe a stable snapshot of the current DB
+ * state. The caller must call ReleaseSnapshot(result) when the
+ * snapshot is no longer needed.</p>
+ *
+ * <p>nullptr will be returned if the DB fails to take a snapshot or does
+ * not support snapshot.</p>
+ *
+ * @return Snapshot {@link Snapshot} instance
+ */
+ public Snapshot getSnapshot() {
+ long snapshotHandle = getSnapshot(nativeHandle_);
+ if (snapshotHandle != 0) {
+ return new Snapshot(snapshotHandle);
+ }
+ return null;
+ }
+
+ /**
+ * Release a previously acquired snapshot.
+ *
+ * The caller must not use "snapshot" after this call.
+ *
+ * @param snapshot {@link Snapshot} instance
+ */
+ public void releaseSnapshot(final Snapshot snapshot) {
+ if (snapshot != null) {
+ releaseSnapshot(nativeHandle_, snapshot.nativeHandle_);
+ }
+ }
+
+ /**
+ * DB implements can export properties about their state
+ * via this method on a per column family level.
+ *
+ * <p>If {@code property} is a valid property understood by this DB
+ * implementation, fills {@code value} with its current value and
+ * returns true. Otherwise returns false.</p>
+ *
+ * <p>Valid property names include:
+ * <ul>
+ * <li>"rocksdb.num-files-at-level&lt;N&gt;" - return the number of files at
+ * level &lt;N&gt;, where &lt;N&gt; is an ASCII representation of a level
+ * number (e.g. "0").</li>
+ * <li>"rocksdb.stats" - returns a multi-line string that describes statistics
+ * about the internal operation of the DB.</li>
+ * <li>"rocksdb.sstables" - returns a multi-line string that describes all
+ * of the sstables that make up the db contents.</li>
+ * </ul>
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance, or null for the default column family.
+ * @param property to be fetched. See above for examples
+ * @return property value
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public String getProperty(
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle,
+ final String property) throws RocksDBException {
+ return getProperty(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_,
+ property, property.length());
+ }
+
+ /**
+ * DB implementations can export properties about their state
+ * via this method. If "property" is a valid property understood by this
+ * DB implementation, fills "*value" with its current value and returns
+ * true. Otherwise returns false.
+ *
+ * <p>Valid property names include:
+ * <ul>
+ * <li>"rocksdb.num-files-at-level&lt;N&gt;" - return the number of files at
+ * level &lt;N&gt;, where &lt;N&gt; is an ASCII representation of a level
+ * number (e.g. "0").</li>
+ * <li>"rocksdb.stats" - returns a multi-line string that describes statistics
+ * about the internal operation of the DB.</li>
+ * <li>"rocksdb.sstables" - returns a multi-line string that describes all
+ * of the sstables that make up the db contents.</li>
+ *</ul>
+ *
+ * @param property to be fetched. See above for examples
+ * @return property value
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public String getProperty(final String property) throws RocksDBException {
+ return getProperty(null, property);
+ }
+
+
+ /**
+ * Gets a property map.
+ *
+ * @param property to be fetched.
+ *
+ * @return the property map
+ *
+ * @throws RocksDBException if an error happens in the underlying native code.
+ */
+ public Map<String, String> getMapProperty(final String property)
+ throws RocksDBException {
+ return getMapProperty(null, property);
+ }
+
+ /**
+ * Gets a property map.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance, or null for the default column family.
+ * @param property to be fetched.
+ *
+ * @return the property map
+ *
+ * @throws RocksDBException if an error happens in the underlying native code.
+ */
+ public Map<String, String> getMapProperty(
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle,
+ final String property) throws RocksDBException {
+ return getMapProperty(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_,
+ property, property.length());
+ }
+
+ /**
+ * <p> Similar to GetProperty(), but only works for a subset of properties
+ * whose return value is a numerical value. Return the value as long.</p>
+ *
+ * <p><strong>Note</strong>: As the returned property is of type
+ * {@code uint64_t} on C++ side the returning value can be negative
+ * because Java supports in Java 7 only signed long values.</p>
+ *
+ * <p><strong>Java 7</strong>: To mitigate the problem of the non
+ * existent unsigned long tpye, values should be encapsulated using
+ * {@link java.math.BigInteger} to reflect the correct value. The correct
+ * behavior is guaranteed if {@code 2^64} is added to negative values.</p>
+ *
+ * <p><strong>Java 8</strong>: In Java 8 the value should be treated as
+ * unsigned long using provided methods of type {@link Long}.</p>
+ *
+ * @param property to be fetched.
+ *
+ * @return numerical property value.
+ *
+ * @throws RocksDBException if an error happens in the underlying native code.
+ */
+ public long getLongProperty(final String property) throws RocksDBException {
+ return getLongProperty(null, property);
+ }
+
+ /**
+ * <p> Similar to GetProperty(), but only works for a subset of properties
+ * whose return value is a numerical value. Return the value as long.</p>
+ *
+ * <p><strong>Note</strong>: As the returned property is of type
+ * {@code uint64_t} on C++ side the returning value can be negative
+ * because Java supports in Java 7 only signed long values.</p>
+ *
+ * <p><strong>Java 7</strong>: To mitigate the problem of the non
+ * existent unsigned long tpye, values should be encapsulated using
+ * {@link java.math.BigInteger} to reflect the correct value. The correct
+ * behavior is guaranteed if {@code 2^64} is added to negative values.</p>
+ *
+ * <p><strong>Java 8</strong>: In Java 8 the value should be treated as
+ * unsigned long using provided methods of type {@link Long}.</p>
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance, or null for the default column family
+ * @param property to be fetched.
+ *
+ * @return numerical property value
+ *
+ * @throws RocksDBException if an error happens in the underlying native code.
+ */
+ public long getLongProperty(
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle,
+ final String property) throws RocksDBException {
+ return getLongProperty(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_,
+ property, property.length());
+ }
+
+ /**
+ * Reset internal stats for DB and all column families.
+ *
+ * Note this doesn't reset {@link Options#statistics()} as it is not
+ * owned by DB.
+ *
+ * @throws RocksDBException if an error occurs whilst reseting the stats
+ */
+ public void resetStats() throws RocksDBException {
+ resetStats(nativeHandle_);
+ }
+
+ /**
+ * <p> Return sum of the getLongProperty of all the column families</p>
+ *
+ * <p><strong>Note</strong>: As the returned property is of type
+ * {@code uint64_t} on C++ side the returning value can be negative
+ * because Java supports in Java 7 only signed long values.</p>
+ *
+ * <p><strong>Java 7</strong>: To mitigate the problem of the non
+ * existent unsigned long tpye, values should be encapsulated using
+ * {@link java.math.BigInteger} to reflect the correct value. The correct
+ * behavior is guaranteed if {@code 2^64} is added to negative values.</p>
+ *
+ * <p><strong>Java 8</strong>: In Java 8 the value should be treated as
+ * unsigned long using provided methods of type {@link Long}.</p>
+ *
+ * @param property to be fetched.
+ *
+ * @return numerical property value
+ *
+ * @throws RocksDBException if an error happens in the underlying native code.
+ */
+ public long getAggregatedLongProperty(final String property)
+ throws RocksDBException {
+ return getAggregatedLongProperty(nativeHandle_, property,
+ property.length());
+ }
+
+ /**
+ * Get the approximate file system space used by keys in each range.
+ *
+ * Note that the returned sizes measure file system space usage, so
+ * if the user data compresses by a factor of ten, the returned
+ * sizes will be one-tenth the size of the corresponding user data size.
+ *
+ * If {@code sizeApproximationFlags} defines whether the returned size
+ * should include the recently written data in the mem-tables (if
+ * the mem-table type supports it), data serialized to disk, or both.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance, or null for the default column family
+ * @param ranges the ranges over which to approximate sizes
+ * @param sizeApproximationFlags flags to determine what to include in the
+ * approximation.
+ *
+ * @return the sizes
+ */
+ public long[] getApproximateSizes(
+ /*@Nullable*/ final ColumnFamilyHandle columnFamilyHandle,
+ final List<Range> ranges,
+ final SizeApproximationFlag... sizeApproximationFlags) {
+
+ byte flags = 0x0;
+ for (final SizeApproximationFlag sizeApproximationFlag
+ : sizeApproximationFlags) {
+ flags |= sizeApproximationFlag.getValue();
+ }
+
+ return getApproximateSizes(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_,
+ toRangeSliceHandles(ranges), flags);
+ }
+
+ /**
+ * Get the approximate file system space used by keys in each range for
+ * the default column family.
+ *
+ * Note that the returned sizes measure file system space usage, so
+ * if the user data compresses by a factor of ten, the returned
+ * sizes will be one-tenth the size of the corresponding user data size.
+ *
+ * If {@code sizeApproximationFlags} defines whether the returned size
+ * should include the recently written data in the mem-tables (if
+ * the mem-table type supports it), data serialized to disk, or both.
+ *
+ * @param ranges the ranges over which to approximate sizes
+ * @param sizeApproximationFlags flags to determine what to include in the
+ * approximation.
+ *
+ * @return the sizes.
+ */
+ public long[] getApproximateSizes(final List<Range> ranges,
+ final SizeApproximationFlag... sizeApproximationFlags) {
+ return getApproximateSizes(null, ranges, sizeApproximationFlags);
+ }
+
+ public static class CountAndSize {
+ public final long count;
+ public final long size;
+
+ public CountAndSize(final long count, final long size) {
+ this.count = count;
+ this.size = size;
+ }
+ }
+
+ /**
+ * This method is similar to
+ * {@link #getApproximateSizes(ColumnFamilyHandle, List, SizeApproximationFlag...)},
+ * except that it returns approximate number of records and size in memtables.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance, or null for the default column family
+ * @param range the ranges over which to get the memtable stats
+ *
+ * @return the count and size for the range
+ */
+ public CountAndSize getApproximateMemTableStats(
+ /*@Nullable*/ final ColumnFamilyHandle columnFamilyHandle,
+ final Range range) {
+ final long[] result = getApproximateMemTableStats(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_,
+ range.start.getNativeHandle(),
+ range.limit.getNativeHandle());
+ return new CountAndSize(result[0], result[1]);
+ }
+
+ /**
+ * This method is similar to
+ * {@link #getApproximateSizes(ColumnFamilyHandle, List, SizeApproximationFlag...)},
+ * except that it returns approximate number of records and size in memtables.
+ *
+ * @param range the ranges over which to get the memtable stats
+ *
+ * @return the count and size for the range
+ */
+ public CountAndSize getApproximateMemTableStats(
+ final Range range) {
+ return getApproximateMemTableStats(null, range);
+ }
+
+ /**
+ * <p>Range compaction of database.</p>
+ * <p><strong>Note</strong>: After the database has been compacted,
+ * all data will have been pushed down to the last level containing
+ * any data.</p>
+ *
+ * <p><strong>See also</strong></p>
+ * <ul>
+ * <li>{@link #compactRange(boolean, int, int)}</li>
+ * <li>{@link #compactRange(byte[], byte[])}</li>
+ * <li>{@link #compactRange(byte[], byte[], boolean, int, int)}</li>
+ * </ul>
+ *
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ public void compactRange() throws RocksDBException {
+ compactRange(null);
+ }
+
+ /**
+ * <p>Range compaction of column family.</p>
+ * <p><strong>Note</strong>: After the database has been compacted,
+ * all data will have been pushed down to the last level containing
+ * any data.</p>
+ *
+ * <p><strong>See also</strong></p>
+ * <ul>
+ * <li>
+ * {@link #compactRange(ColumnFamilyHandle, boolean, int, int)}
+ * </li>
+ * <li>
+ * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
+ * </li>
+ * <li>
+ * {@link #compactRange(ColumnFamilyHandle, byte[], byte[],
+ * boolean, int, int)}
+ * </li>
+ * </ul>
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance, or null for the default column family.
+ *
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ public void compactRange(
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle)
+ throws RocksDBException {
+ compactRange(nativeHandle_, null, -1, null, -1, 0,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * <p>Range compaction of database.</p>
+ * <p><strong>Note</strong>: After the database has been compacted,
+ * all data will have been pushed down to the last level containing
+ * any data.</p>
+ *
+ * <p><strong>See also</strong></p>
+ * <ul>
+ * <li>{@link #compactRange()}</li>
+ * <li>{@link #compactRange(boolean, int, int)}</li>
+ * <li>{@link #compactRange(byte[], byte[], boolean, int, int)}</li>
+ * </ul>
+ *
+ * @param begin start of key range (included in range)
+ * @param end end of key range (excluded from range)
+ *
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ public void compactRange(final byte[] begin, final byte[] end)
+ throws RocksDBException {
+ compactRange(null, begin, end);
+ }
+
+ /**
+ * <p>Range compaction of column family.</p>
+ * <p><strong>Note</strong>: After the database has been compacted,
+ * all data will have been pushed down to the last level containing
+ * any data.</p>
+ *
+ * <p><strong>See also</strong></p>
+ * <ul>
+ * <li>{@link #compactRange(ColumnFamilyHandle)}</li>
+ * <li>
+ * {@link #compactRange(ColumnFamilyHandle, boolean, int, int)}
+ * </li>
+ * <li>
+ * {@link #compactRange(ColumnFamilyHandle, byte[], byte[],
+ * boolean, int, int)}
+ * </li>
+ * </ul>
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance, or null for the default column family.
+ * @param begin start of key range (included in range)
+ * @param end end of key range (excluded from range)
+ *
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ public void compactRange(
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] begin, final byte[] end) throws RocksDBException {
+ compactRange(nativeHandle_,
+ begin, begin == null ? -1 : begin.length,
+ end, end == null ? -1 : end.length,
+ 0, columnFamilyHandle == null ? 0: columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * <p>Range compaction of database.</p>
+ * <p><strong>Note</strong>: After the database has been compacted,
+ * all data will have been pushed down to the last level containing
+ * any data.</p>
+ *
+ * <p>Compaction outputs should be placed in options.db_paths
+ * [target_path_id]. Behavior is undefined if target_path_id is
+ * out of range.</p>
+ *
+ * <p><strong>See also</strong></p>
+ * <ul>
+ * <li>{@link #compactRange()}</li>
+ * <li>{@link #compactRange(byte[], byte[])}</li>
+ * <li>{@link #compactRange(byte[], byte[], boolean, int, int)}</li>
+ * </ul>
+ *
+ * @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} instead
+ *
+ * @param changeLevel reduce level after compaction
+ * @param targetLevel target level to compact to
+ * @param targetPathId the target path id of output path
+ *
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ @Deprecated
+ public void compactRange(final boolean changeLevel, final int targetLevel,
+ final int targetPathId) throws RocksDBException {
+ compactRange(null, changeLevel, targetLevel, targetPathId);
+ }
+
+ /**
+ * <p>Range compaction of column family.</p>
+ * <p><strong>Note</strong>: After the database has been compacted,
+ * all data will have been pushed down to the last level containing
+ * any data.</p>
+ *
+ * <p>Compaction outputs should be placed in options.db_paths
+ * [target_path_id]. Behavior is undefined if target_path_id is
+ * out of range.</p>
+ *
+ * <p><strong>See also</strong></p>
+ * <ul>
+ * <li>{@link #compactRange(ColumnFamilyHandle)}</li>
+ * <li>
+ * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
+ * </li>
+ * <li>
+ * {@link #compactRange(ColumnFamilyHandle, byte[], byte[],
+ * boolean, int, int)}
+ * </li>
+ * </ul>
+ *
+ * @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} instead
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance, or null for the default column family.
+ * @param changeLevel reduce level after compaction
+ * @param targetLevel target level to compact to
+ * @param targetPathId the target path id of output path
+ *
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ @Deprecated
+ public void compactRange(
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle,
+ final boolean changeLevel, final int targetLevel, final int targetPathId)
+ throws RocksDBException {
+ final CompactRangeOptions options = new CompactRangeOptions();
+ options.setChangeLevel(changeLevel);
+ options.setTargetLevel(targetLevel);
+ options.setTargetPathId(targetPathId);
+ compactRange(nativeHandle_,
+ null, -1,
+ null, -1,
+ options.nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * <p>Range compaction of database.</p>
+ * <p><strong>Note</strong>: After the database has been compacted,
+ * all data will have been pushed down to the last level containing
+ * any data.</p>
+ *
+ * <p>Compaction outputs should be placed in options.db_paths
+ * [target_path_id]. Behavior is undefined if target_path_id is
+ * out of range.</p>
+ *
+ * <p><strong>See also</strong></p>
+ * <ul>
+ * <li>{@link #compactRange()}</li>
+ * <li>{@link #compactRange(boolean, int, int)}</li>
+ * <li>{@link #compactRange(byte[], byte[])}</li>
+ * </ul>
+ *
+ * @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)}
+ * instead
+ *
+ * @param begin start of key range (included in range)
+ * @param end end of key range (excluded from range)
+ * @param changeLevel reduce level after compaction
+ * @param targetLevel target level to compact to
+ * @param targetPathId the target path id of output path
+ *
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ @Deprecated
+ public void compactRange(final byte[] begin, final byte[] end,
+ final boolean changeLevel, final int targetLevel,
+ final int targetPathId) throws RocksDBException {
+ compactRange(null, begin, end, changeLevel, targetLevel, targetPathId);
+ }
+
+ /**
+ * <p>Range compaction of column family.</p>
+ * <p><strong>Note</strong>: After the database has been compacted,
+ * all data will have been pushed down to the last level containing
+ * any data.</p>
+ *
+ * <p>Compaction outputs should be placed in options.db_paths
+ * [target_path_id]. Behavior is undefined if target_path_id is
+ * out of range.</p>
+ *
+ * <p><strong>See also</strong></p>
+ * <ul>
+ * <li>{@link #compactRange(ColumnFamilyHandle)}</li>
+ * <li>
+ * {@link #compactRange(ColumnFamilyHandle, boolean, int, int)}
+ * </li>
+ * <li>
+ * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
+ * </li>
+ * </ul>
+ *
+ * @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} instead
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance.
+ * @param begin start of key range (included in range)
+ * @param end end of key range (excluded from range)
+ * @param changeLevel reduce level after compaction
+ * @param targetLevel target level to compact to
+ * @param targetPathId the target path id of output path
+ *
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ @Deprecated
+ public void compactRange(
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] begin, final byte[] end, final boolean changeLevel,
+ final int targetLevel, final int targetPathId)
+ throws RocksDBException {
+ final CompactRangeOptions options = new CompactRangeOptions();
+ options.setChangeLevel(changeLevel);
+ options.setTargetLevel(targetLevel);
+ options.setTargetPathId(targetPathId);
+ compactRange(nativeHandle_,
+ begin, begin == null ? -1 : begin.length,
+ end, end == null ? -1 : end.length,
+ options.nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * <p>Range compaction of column family.</p>
+ * <p><strong>Note</strong>: After the database has been compacted,
+ * all data will have been pushed down to the last level containing
+ * any data.</p>
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance.
+ * @param begin start of key range (included in range)
+ * @param end end of key range (excluded from range)
+ * @param compactRangeOptions options for the compaction
+ *
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ public void compactRange(
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] begin, final byte[] end,
+ final CompactRangeOptions compactRangeOptions) throws RocksDBException {
+ compactRange(nativeHandle_,
+ begin, begin == null ? -1 : begin.length,
+ end, end == null ? -1 : end.length,
+ compactRangeOptions.nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Change the options for the column family handle.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance, or null for the default column family.
+ * @param mutableColumnFamilyOptions the options.
+ *
+ * @throws RocksDBException if an error occurs whilst setting the options
+ */
+ public void setOptions(
+ /* @Nullable */final ColumnFamilyHandle columnFamilyHandle,
+ final MutableColumnFamilyOptions mutableColumnFamilyOptions)
+ throws RocksDBException {
+ setOptions(nativeHandle_, columnFamilyHandle.nativeHandle_,
+ mutableColumnFamilyOptions.getKeys(),
+ mutableColumnFamilyOptions.getValues());
+ }
+
+ /**
+ * Change the options for the default column family handle.
+ *
+ * @param mutableColumnFamilyOptions the options.
+ *
+ * @throws RocksDBException if an error occurs whilst setting the options
+ */
+ public void setOptions(
+ final MutableColumnFamilyOptions mutableColumnFamilyOptions)
+ throws RocksDBException {
+ setOptions(null, mutableColumnFamilyOptions);
+ }
+
+ /**
+ * Set the options for the column family handle.
+ *
+ * @param mutableDBoptions the options.
+ *
+ * @throws RocksDBException if an error occurs whilst setting the options
+ */
+ public void setDBOptions(final MutableDBOptions mutableDBoptions)
+ throws RocksDBException {
+ setDBOptions(nativeHandle_,
+ mutableDBoptions.getKeys(),
+ mutableDBoptions.getValues());
+ }
+
+ /**
+ * Takes a list of files specified by file names and
+ * compacts them to the specified level.
+ *
+ * Note that the behavior is different from
+ * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
+ * in that CompactFiles() performs the compaction job using the CURRENT
+ * thread.
+ *
+ * @param compactionOptions compaction options
+ * @param inputFileNames the name of the files to compact
+ * @param outputLevel the level to which they should be compacted
+ * @param outputPathId the id of the output path, or -1
+ * @param compactionJobInfo the compaction job info, this parameter
+ * will be updated with the info from compacting the files,
+ * can just be null if you don't need it.
+ *
+ * @return the list of compacted files
+ *
+ * @throws RocksDBException if an error occurs during compaction
+ */
+ public List<String> compactFiles(
+ final CompactionOptions compactionOptions,
+ final List<String> inputFileNames,
+ final int outputLevel,
+ final int outputPathId,
+ /* @Nullable */ final CompactionJobInfo compactionJobInfo)
+ throws RocksDBException {
+ return compactFiles(compactionOptions, null, inputFileNames, outputLevel,
+ outputPathId, compactionJobInfo);
+ }
+
+ /**
+ * Takes a list of files specified by file names and
+ * compacts them to the specified level.
+ *
+ * Note that the behavior is different from
+ * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
+ * in that CompactFiles() performs the compaction job using the CURRENT
+ * thread.
+ *
+ * @param compactionOptions compaction options
+ * @param columnFamilyHandle columnFamilyHandle, or null for the
+ * default column family
+ * @param inputFileNames the name of the files to compact
+ * @param outputLevel the level to which they should be compacted
+ * @param outputPathId the id of the output path, or -1
+ * @param compactionJobInfo the compaction job info, this parameter
+ * will be updated with the info from compacting the files,
+ * can just be null if you don't need it.
+ *
+ * @return the list of compacted files
+ *
+ * @throws RocksDBException if an error occurs during compaction
+ */
+ public List<String> compactFiles(
+ final CompactionOptions compactionOptions,
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle,
+ final List<String> inputFileNames,
+ final int outputLevel,
+ final int outputPathId,
+ /* @Nullable */ final CompactionJobInfo compactionJobInfo)
+ throws RocksDBException {
+ return Arrays.asList(compactFiles(nativeHandle_, compactionOptions.nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_,
+ inputFileNames.toArray(new String[0]),
+ outputLevel,
+ outputPathId,
+ compactionJobInfo == null ? 0 : compactionJobInfo.nativeHandle_));
+ }
+
+ /**
+ * This function will wait until all currently running background processes
+ * finish. After it returns, no background process will be run until
+ * {@link #continueBackgroundWork()} is called
+ *
+ * @throws RocksDBException if an error occurs when pausing background work
+ */
+ public void pauseBackgroundWork() throws RocksDBException {
+ pauseBackgroundWork(nativeHandle_);
+ }
+
+ /**
+ * Resumes background work which was suspended by
+ * previously calling {@link #pauseBackgroundWork()}
+ *
+ * @throws RocksDBException if an error occurs when resuming background work
+ */
+ public void continueBackgroundWork() throws RocksDBException {
+ continueBackgroundWork(nativeHandle_);
+ }
+
+ /**
+ * Enable automatic compactions for the given column
+ * families if they were previously disabled.
+ *
+ * The function will first set the
+ * {@link ColumnFamilyOptions#disableAutoCompactions()} option for each
+ * column family to false, after which it will schedule a flush/compaction.
+ *
+ * NOTE: Setting disableAutoCompactions to 'false' through
+ * {@link #setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
+ * does NOT schedule a flush/compaction afterwards, and only changes the
+ * parameter itself within the column family option.
+ *
+ * @param columnFamilyHandles the column family handles
+ *
+ * @throws RocksDBException if an error occurs whilst enabling auto-compaction
+ */
+ public void enableAutoCompaction(
+ final List<ColumnFamilyHandle> columnFamilyHandles)
+ throws RocksDBException {
+ enableAutoCompaction(nativeHandle_,
+ toNativeHandleList(columnFamilyHandles));
+ }
+
+ /**
+ * Number of levels used for this DB.
+ *
+ * @return the number of levels
+ */
+ public int numberLevels() {
+ return numberLevels(null);
+ }
+
+ /**
+ * Number of levels used for a column family in this DB.
+ *
+ * @param columnFamilyHandle the column family handle, or null
+ * for the default column family
+ *
+ * @return the number of levels
+ */
+ public int numberLevels(/* @Nullable */final ColumnFamilyHandle columnFamilyHandle) {
+ return numberLevels(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Maximum level to which a new compacted memtable is pushed if it
+ * does not create overlap.
+ *
+ * @return the maximum level
+ */
+ public int maxMemCompactionLevel() {
+ return maxMemCompactionLevel(null);
+ }
+
+ /**
+ * Maximum level to which a new compacted memtable is pushed if it
+ * does not create overlap.
+ *
+ * @param columnFamilyHandle the column family handle
+ *
+ * @return the maximum level
+ */
+ public int maxMemCompactionLevel(
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle) {
+ return maxMemCompactionLevel(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Number of files in level-0 that would stop writes.
+ *
+ * @return the number of files
+ */
+ public int level0StopWriteTrigger() {
+ return level0StopWriteTrigger(null);
+ }
+
+ /**
+ * Number of files in level-0 that would stop writes.
+ *
+ * @param columnFamilyHandle the column family handle
+ *
+ * @return the number of files
+ */
+ public int level0StopWriteTrigger(
+ /* @Nullable */final ColumnFamilyHandle columnFamilyHandle) {
+ return level0StopWriteTrigger(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Get DB name -- the exact same name that was provided as an argument to
+ * as path to {@link #open(Options, String)}.
+ *
+ * @return the DB name
+ */
+ public String getName() {
+ return getName(nativeHandle_);
+ }
+
+ /**
+ * Get the Env object from the DB
+ *
+ * @return the env
+ */
+ public Env getEnv() {
+ final long envHandle = getEnv(nativeHandle_);
+ if (envHandle == Env.getDefault().nativeHandle_) {
+ return Env.getDefault();
+ } else {
+ final Env env = new RocksEnv(envHandle);
+ env.disOwnNativeHandle(); // we do not own the Env!
+ return env;
+ }
+ }
+
+ /**
+ * <p>Flush all memory table data.</p>
+ *
+ * <p>Note: it must be ensured that the FlushOptions instance
+ * is not GC'ed before this method finishes. If the wait parameter is
+ * set to false, flush processing is asynchronous.</p>
+ *
+ * @param flushOptions {@link org.rocksdb.FlushOptions} instance.
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ public void flush(final FlushOptions flushOptions)
+ throws RocksDBException {
+ flush(flushOptions, (List<ColumnFamilyHandle>) null);
+ }
+
+ /**
+ * <p>Flush all memory table data.</p>
+ *
+ * <p>Note: it must be ensured that the FlushOptions instance
+ * is not GC'ed before this method finishes. If the wait parameter is
+ * set to false, flush processing is asynchronous.</p>
+ *
+ * @param flushOptions {@link org.rocksdb.FlushOptions} instance.
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance.
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ public void flush(final FlushOptions flushOptions,
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle)
+ throws RocksDBException {
+ flush(flushOptions,
+ columnFamilyHandle == null ? null : Arrays.asList(columnFamilyHandle));
+ }
+
+ /**
+ * Flushes multiple column families.
+ *
+ * If atomic flush is not enabled, this is equivalent to calling
+ * {@link #flush(FlushOptions, ColumnFamilyHandle)} multiple times.
+ *
+ * If atomic flush is enabled, this will flush all column families
+ * specified up to the latest sequence number at the time when flush is
+ * requested.
+ *
+ * @param flushOptions {@link org.rocksdb.FlushOptions} instance.
+ * @param columnFamilyHandles column family handles.
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ public void flush(final FlushOptions flushOptions,
+ /* @Nullable */ final List<ColumnFamilyHandle> columnFamilyHandles)
+ throws RocksDBException {
+ flush(nativeHandle_, flushOptions.nativeHandle_,
+ toNativeHandleList(columnFamilyHandles));
+ }
+
+ /**
+ * Flush the WAL memory buffer to the file. If {@code sync} is true,
+ * it calls {@link #syncWal()} afterwards.
+ *
+ * @param sync true to also fsync to disk.
+ *
+ * @throws RocksDBException if an error occurs whilst flushing
+ */
+ public void flushWal(final boolean sync) throws RocksDBException {
+ flushWal(nativeHandle_, sync);
+ }
+
+ /**
+ * Sync the WAL.
+ *
+ * Note that {@link #write(WriteOptions, WriteBatch)} followed by
+ * {@link #syncWal()} is not exactly the same as
+ * {@link #write(WriteOptions, WriteBatch)} with
+ * {@link WriteOptions#sync()} set to true; In the latter case the changes
+ * won't be visible until the sync is done.
+ *
+ * Currently only works if {@link Options#allowMmapWrites()} is set to false.
+ *
+ * @throws RocksDBException if an error occurs whilst syncing
+ */
+ public void syncWal() throws RocksDBException {
+ syncWal(nativeHandle_);
+ }
+
+ /**
+ * <p>The sequence number of the most recent transaction.</p>
+ *
+ * @return sequence number of the most
+ * recent transaction.
+ */
+ public long getLatestSequenceNumber() {
+ return getLatestSequenceNumber(nativeHandle_);
+ }
+
+ /**
+ * Instructs DB to preserve deletes with sequence numbers &gt;= sequenceNumber.
+ *
+ * Has no effect if DBOptions#preserveDeletes() is set to false.
+ *
+ * This function assumes that user calls this function with monotonically
+ * increasing seqnums (otherwise we can't guarantee that a particular delete
+ * hasn't been already processed).
+ *
+ * @param sequenceNumber the minimum sequence number to preserve
+ *
+ * @return true if the value was successfully updated,
+ * false if user attempted to call if with
+ * sequenceNumber &lt;= current value.
+ */
+ public boolean setPreserveDeletesSequenceNumber(final long sequenceNumber) {
+ return setPreserveDeletesSequenceNumber(nativeHandle_, sequenceNumber);
+ }
+
+ /**
+ * <p>Prevent file deletions. Compactions will continue to occur,
+ * but no obsolete files will be deleted. Calling this multiple
+ * times have the same effect as calling it once.</p>
+ *
+ * @throws RocksDBException thrown if operation was not performed
+ * successfully.
+ */
+ public void disableFileDeletions() throws RocksDBException {
+ disableFileDeletions(nativeHandle_);
+ }
+
+ /**
+ * <p>Allow compactions to delete obsolete files.
+ * If force == true, the call to EnableFileDeletions()
+ * will guarantee that file deletions are enabled after
+ * the call, even if DisableFileDeletions() was called
+ * multiple times before.</p>
+ *
+ * <p>If force == false, EnableFileDeletions will only
+ * enable file deletion after it's been called at least
+ * as many times as DisableFileDeletions(), enabling
+ * the two methods to be called by two threads
+ * concurrently without synchronization
+ * -- i.e., file deletions will be enabled only after both
+ * threads call EnableFileDeletions()</p>
+ *
+ * @param force boolean value described above.
+ *
+ * @throws RocksDBException thrown if operation was not performed
+ * successfully.
+ */
+ public void enableFileDeletions(final boolean force)
+ throws RocksDBException {
+ enableFileDeletions(nativeHandle_, force);
+ }
+
+ public static class LiveFiles {
+ /**
+ * The valid size of the manifest file. The manifest file is an ever growing
+ * file, but only the portion specified here is valid for this snapshot.
+ */
+ public final long manifestFileSize;
+
+ /**
+ * The files are relative to the {@link #getName()} and are not
+ * absolute paths. Despite being relative paths, the file names begin
+ * with "/".
+ */
+ public final List<String> files;
+
+ LiveFiles(final long manifestFileSize, final List<String> files) {
+ this.manifestFileSize = manifestFileSize;
+ this.files = files;
+ }
+ }
+
+ /**
+ * Retrieve the list of all files in the database after flushing the memtable.
+ *
+ * See {@link #getLiveFiles(boolean)}.
+ *
+ * @return the live files
+ *
+ * @throws RocksDBException if an error occurs whilst retrieving the list
+ * of live files
+ */
+ public LiveFiles getLiveFiles() throws RocksDBException {
+ return getLiveFiles(true);
+ }
+
+ /**
+ * Retrieve the list of all files in the database.
+ *
+ * In case you have multiple column families, even if {@code flushMemtable}
+ * is true, you still need to call {@link #getSortedWalFiles()}
+ * after {@link #getLiveFiles(boolean)} to compensate for new data that
+ * arrived to already-flushed column families while other column families
+ * were flushing.
+ *
+ * NOTE: Calling {@link #getLiveFiles(boolean)} followed by
+ * {@link #getSortedWalFiles()} can generate a lossless backup.
+ *
+ * @param flushMemtable set to true to flush before recoding the live
+ * files. Setting to false is useful when we don't want to wait for flush
+ * which may have to wait for compaction to complete taking an
+ * indeterminate time.
+ *
+ * @return the live files
+ *
+ * @throws RocksDBException if an error occurs whilst retrieving the list
+ * of live files
+ */
+ public LiveFiles getLiveFiles(final boolean flushMemtable)
+ throws RocksDBException {
+ final String[] result = getLiveFiles(nativeHandle_, flushMemtable);
+ if (result == null) {
+ return null;
+ }
+ final String[] files = Arrays.copyOf(result, result.length - 1);
+ final long manifestFileSize = Long.parseLong(result[result.length - 1]);
+
+ return new LiveFiles(manifestFileSize, Arrays.asList(files));
+ }
+
+ /**
+ * Retrieve the sorted list of all wal files with earliest file first.
+ *
+ * @return the log files
+ *
+ * @throws RocksDBException if an error occurs whilst retrieving the list
+ * of sorted WAL files
+ */
+ public List<LogFile> getSortedWalFiles() throws RocksDBException {
+ final LogFile[] logFiles = getSortedWalFiles(nativeHandle_);
+ return Arrays.asList(logFiles);
+ }
+
+ /**
+ * <p>Returns an iterator that is positioned at a write-batch containing
+ * seq_number. If the sequence number is non existent, it returns an iterator
+ * at the first available seq_no after the requested seq_no.</p>
+ *
+ * <p>Must set WAL_ttl_seconds or WAL_size_limit_MB to large values to
+ * use this api, else the WAL files will get
+ * cleared aggressively and the iterator might keep getting invalid before
+ * an update is read.</p>
+ *
+ * @param sequenceNumber sequence number offset
+ *
+ * @return {@link org.rocksdb.TransactionLogIterator} instance.
+ *
+ * @throws org.rocksdb.RocksDBException if iterator cannot be retrieved
+ * from native-side.
+ */
+ public TransactionLogIterator getUpdatesSince(final long sequenceNumber)
+ throws RocksDBException {
+ return new TransactionLogIterator(
+ getUpdatesSince(nativeHandle_, sequenceNumber));
+ }
+
+ /**
+ * Delete the file name from the db directory and update the internal state to
+ * reflect that. Supports deletion of sst and log files only. 'name' must be
+ * path relative to the db directory. eg. 000001.sst, /archive/000003.log
+ *
+ * @param name the file name
+ *
+ * @throws RocksDBException if an error occurs whilst deleting the file
+ */
+ public void deleteFile(final String name) throws RocksDBException {
+ deleteFile(nativeHandle_, name);
+ }
+
+ /**
+ * Gets a list of all table files metadata.
+ *
+ * @return table files metadata.
+ */
+ public List<LiveFileMetaData> getLiveFilesMetaData() {
+ return Arrays.asList(getLiveFilesMetaData(nativeHandle_));
+ }
+
+ /**
+ * Obtains the meta data of the specified column family of the DB.
+ *
+ * @param columnFamilyHandle the column family
+ *
+ * @return the column family metadata
+ */
+ public ColumnFamilyMetaData getColumnFamilyMetaData(
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle) {
+ return getColumnFamilyMetaData(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Obtains the meta data of the default column family of the DB.
+ *
+ * @return the column family metadata
+ */
+ public ColumnFamilyMetaData GetColumnFamilyMetaData() {
+ return getColumnFamilyMetaData(null);
+ }
+
+ /**
+ * ingestExternalFile will load a list of external SST files (1) into the DB
+ * We will try to find the lowest possible level that the file can fit in, and
+ * ingest the file into this level (2). A file that have a key range that
+ * overlap with the memtable key range will require us to Flush the memtable
+ * first before ingesting the file.
+ *
+ * (1) External SST files can be created using {@link SstFileWriter}
+ * (2) We will try to ingest the files to the lowest possible level
+ * even if the file compression doesn't match the level compression
+ *
+ * @param filePathList The list of files to ingest
+ * @param ingestExternalFileOptions the options for the ingestion
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void ingestExternalFile(final List<String> filePathList,
+ final IngestExternalFileOptions ingestExternalFileOptions)
+ throws RocksDBException {
+ ingestExternalFile(nativeHandle_, getDefaultColumnFamily().nativeHandle_,
+ filePathList.toArray(new String[0]),
+ filePathList.size(), ingestExternalFileOptions.nativeHandle_);
+ }
+
+ /**
+ * ingestExternalFile will load a list of external SST files (1) into the DB
+ * We will try to find the lowest possible level that the file can fit in, and
+ * ingest the file into this level (2). A file that have a key range that
+ * overlap with the memtable key range will require us to Flush the memtable
+ * first before ingesting the file.
+ *
+ * (1) External SST files can be created using {@link SstFileWriter}
+ * (2) We will try to ingest the files to the lowest possible level
+ * even if the file compression doesn't match the level compression
+ *
+ * @param columnFamilyHandle The column family for the ingested files
+ * @param filePathList The list of files to ingest
+ * @param ingestExternalFileOptions the options for the ingestion
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void ingestExternalFile(final ColumnFamilyHandle columnFamilyHandle,
+ final List<String> filePathList,
+ final IngestExternalFileOptions ingestExternalFileOptions)
+ throws RocksDBException {
+ ingestExternalFile(nativeHandle_, columnFamilyHandle.nativeHandle_,
+ filePathList.toArray(new String[0]),
+ filePathList.size(), ingestExternalFileOptions.nativeHandle_);
+ }
+
+ /**
+ * Verify checksum
+ *
+ * @throws RocksDBException if the checksum is not valid
+ */
+ public void verifyChecksum() throws RocksDBException {
+ verifyChecksum(nativeHandle_);
+ }
+
+ /**
+ * Gets the handle for the default column family
+ *
+ * @return The handle of the default column family
+ */
+ public ColumnFamilyHandle getDefaultColumnFamily() {
+ final ColumnFamilyHandle cfHandle = new ColumnFamilyHandle(this,
+ getDefaultColumnFamily(nativeHandle_));
+ cfHandle.disOwnNativeHandle();
+ return cfHandle;
+ }
+
+ /**
+ * Get the properties of all tables.
+ *
+ * @param columnFamilyHandle the column family handle, or null for the default
+ * column family.
+ *
+ * @return the properties
+ *
+ * @throws RocksDBException if an error occurs whilst getting the properties
+ */
+ public Map<String, TableProperties> getPropertiesOfAllTables(
+ /* @Nullable */final ColumnFamilyHandle columnFamilyHandle)
+ throws RocksDBException {
+ return getPropertiesOfAllTables(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Get the properties of all tables in the default column family.
+ *
+ * @return the properties
+ *
+ * @throws RocksDBException if an error occurs whilst getting the properties
+ */
+ public Map<String, TableProperties> getPropertiesOfAllTables()
+ throws RocksDBException {
+ return getPropertiesOfAllTables(null);
+ }
+
+ /**
+ * Get the properties of tables in range.
+ *
+ * @param columnFamilyHandle the column family handle, or null for the default
+ * column family.
+ * @param ranges the ranges over which to get the table properties
+ *
+ * @return the properties
+ *
+ * @throws RocksDBException if an error occurs whilst getting the properties
+ */
+ public Map<String, TableProperties> getPropertiesOfTablesInRange(
+ /* @Nullable */final ColumnFamilyHandle columnFamilyHandle,
+ final List<Range> ranges) throws RocksDBException {
+ return getPropertiesOfTablesInRange(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_,
+ toRangeSliceHandles(ranges));
+ }
+
+ /**
+ * Get the properties of tables in range for the default column family.
+ *
+ * @param ranges the ranges over which to get the table properties
+ *
+ * @return the properties
+ *
+ * @throws RocksDBException if an error occurs whilst getting the properties
+ */
+ public Map<String, TableProperties> getPropertiesOfTablesInRange(
+ final List<Range> ranges) throws RocksDBException {
+ return getPropertiesOfTablesInRange(null, ranges);
+ }
+
+ /**
+ * Suggest the range to compact.
+ *
+ * @param columnFamilyHandle the column family handle, or null for the default
+ * column family.
+ *
+ * @return the suggested range.
+ *
+ * @throws RocksDBException if an error occurs whilst suggesting the range
+ */
+ public Range suggestCompactRange(
+ /* @Nullable */final ColumnFamilyHandle columnFamilyHandle)
+ throws RocksDBException {
+ final long[] rangeSliceHandles = suggestCompactRange(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
+ return new Range(new Slice(rangeSliceHandles[0]),
+ new Slice(rangeSliceHandles[1]));
+ }
+
+ /**
+ * Suggest the range to compact for the default column family.
+ *
+ * @return the suggested range.
+ *
+ * @throws RocksDBException if an error occurs whilst suggesting the range
+ */
+ public Range suggestCompactRange()
+ throws RocksDBException {
+ return suggestCompactRange(null);
+ }
+
+ /**
+ * Promote L0.
+ *
+ * @param columnFamilyHandle the column family handle,
+ * or null for the default column family.
+ * @param targetLevel the target level for L0
+ *
+ * @throws RocksDBException if an error occurs whilst promoting L0
+ */
+ public void promoteL0(
+ /* @Nullable */final ColumnFamilyHandle columnFamilyHandle,
+ final int targetLevel) throws RocksDBException {
+ promoteL0(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_,
+ targetLevel);
+ }
+
+ /**
+ * Promote L0 for the default column family.
+ *
+ * @param targetLevel the target level for L0
+ *
+ * @throws RocksDBException if an error occurs whilst promoting L0
+ */
+ public void promoteL0(final int targetLevel)
+ throws RocksDBException {
+ promoteL0(null, targetLevel);
+ }
+
+ /**
+ * Trace DB operations.
+ *
+ * Use {@link #endTrace()} to stop tracing.
+ *
+ * @param traceOptions the options
+ * @param traceWriter the trace writer
+ *
+ * @throws RocksDBException if an error occurs whilst starting the trace
+ */
+ public void startTrace(final TraceOptions traceOptions,
+ final AbstractTraceWriter traceWriter) throws RocksDBException {
+ startTrace(nativeHandle_, traceOptions.getMaxTraceFileSize(),
+ traceWriter.nativeHandle_);
+ /**
+ * NOTE: {@link #startTrace(long, long, long) transfers the ownership
+ * from Java to C++, so we must disown the native handle here.
+ */
+ traceWriter.disOwnNativeHandle();
+ }
+
+ /**
+ * Stop tracing DB operations.
+ *
+ * See {@link #startTrace(TraceOptions, AbstractTraceWriter)}
+ *
+ * @throws RocksDBException if an error occurs whilst ending the trace
+ */
+ public void endTrace() throws RocksDBException {
+ endTrace(nativeHandle_);
+ }
+
+ /**
+ * Delete files in multiple ranges at once.
+ * Delete files in a lot of ranges one at a time can be slow, use this API for
+ * better performance in that case.
+ *
+ * @param columnFamily - The column family for operation (null for default)
+ * @param includeEnd - Whether ranges should include end
+ * @param ranges - pairs of ranges (from1, to1, from2, to2, ...)
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void deleteFilesInRanges(final ColumnFamilyHandle columnFamily,
+ final List<byte[]> ranges, final boolean includeEnd)
+ throws RocksDBException {
+ if (ranges.size() == 0) {
+ return;
+ }
+ if ((ranges.size() % 2) != 0) {
+ throw new IllegalArgumentException("Ranges size needs to be multiple of 2 "
+ + "(from1, to1, from2, to2, ...), but is " + ranges.size());
+ }
+
+ final byte[][] rangesArray = ranges.toArray(new byte[ranges.size()][]);
+
+ deleteFilesInRanges(nativeHandle_, columnFamily == null ? 0 : columnFamily.nativeHandle_,
+ rangesArray, includeEnd);
+ }
+
+ /**
+ * Static method to destroy the contents of the specified database.
+ * Be very careful using this method.
+ *
+ * @param path the path to the Rocksdb database.
+ * @param options {@link org.rocksdb.Options} instance.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public static void destroyDB(final String path, final Options options)
+ throws RocksDBException {
+ destroyDB(path, options.nativeHandle_);
+ }
+
+ private /* @Nullable */ long[] toNativeHandleList(
+ /* @Nullable */ final List<? extends RocksObject> objectList) {
+ if (objectList == null) {
+ return null;
+ }
+ final int len = objectList.size();
+ final long[] handleList = new long[len];
+ for (int i = 0; i < len; i++) {
+ handleList[i] = objectList.get(i).nativeHandle_;
+ }
+ return handleList;
+ }
+
+ private static long[] toRangeSliceHandles(final List<Range> ranges) {
+ final long rangeSliceHandles[] = new long [ranges.size() * 2];
+ for (int i = 0, j = 0; i < ranges.size(); i++) {
+ final Range range = ranges.get(i);
+ rangeSliceHandles[j++] = range.start.getNativeHandle();
+ rangeSliceHandles[j++] = range.limit.getNativeHandle();
+ }
+ return rangeSliceHandles;
+ }
+
+ protected void storeOptionsInstance(DBOptionsInterface options) {
+ options_ = options;
+ }
+
+ private static void checkBounds(int offset, int len, int size) {
+ if ((offset | len | (offset + len) | (size - (offset + len))) < 0) {
+ throw new IndexOutOfBoundsException(String.format("offset(%d), len(%d), size(%d)", offset, len, size));
+ }
+ }
+
+ private static int computeCapacityHint(final int estimatedNumberOfItems) {
+ // Default load factor for HashMap is 0.75, so N * 1.5 will be at the load
+ // limit. We add +1 for a buffer.
+ return (int)Math.ceil(estimatedNumberOfItems * 1.5 + 1.0);
+ }
+
+ // native methods
+ private native static long open(final long optionsHandle,
+ final String path) throws RocksDBException;
+
+ /**
+ * @param optionsHandle Native handle pointing to an Options object
+ * @param path The directory path for the database files
+ * @param columnFamilyNames An array of column family names
+ * @param columnFamilyOptions An array of native handles pointing to
+ * ColumnFamilyOptions objects
+ *
+ * @return An array of native handles, [0] is the handle of the RocksDB object
+ * [1..1+n] are handles of the ColumnFamilyReferences
+ *
+ * @throws RocksDBException thrown if the database could not be opened
+ */
+ private native static long[] open(final long optionsHandle,
+ final String path, final byte[][] columnFamilyNames,
+ final long[] columnFamilyOptions) throws RocksDBException;
+
+ private native static long openROnly(final long optionsHandle,
+ final String path) throws RocksDBException;
+
+ /**
+ * @param optionsHandle Native handle pointing to an Options object
+ * @param path The directory path for the database files
+ * @param columnFamilyNames An array of column family names
+ * @param columnFamilyOptions An array of native handles pointing to
+ * ColumnFamilyOptions objects
+ *
+ * @return An array of native handles, [0] is the handle of the RocksDB object
+ * [1..1+n] are handles of the ColumnFamilyReferences
+ *
+ * @throws RocksDBException thrown if the database could not be opened
+ */
+ private native static long[] openROnly(final long optionsHandle,
+ final String path, final byte[][] columnFamilyNames,
+ final long[] columnFamilyOptions
+ ) throws RocksDBException;
+
+ @Override protected native void disposeInternal(final long handle);
+
+ private native static void closeDatabase(final long handle)
+ throws RocksDBException;
+ private native static byte[][] listColumnFamilies(final long optionsHandle,
+ final String path) throws RocksDBException;
+ private native long createColumnFamily(final long handle,
+ final byte[] columnFamilyName, final int columnFamilyNamelen,
+ final long columnFamilyOptions) throws RocksDBException;
+ private native long[] createColumnFamilies(final long handle,
+ final long columnFamilyOptionsHandle, final byte[][] columnFamilyNames)
+ throws RocksDBException;
+ private native long[] createColumnFamilies(final long handle,
+ final long columnFamilyOptionsHandles[], final byte[][] columnFamilyNames)
+ throws RocksDBException;
+ private native void dropColumnFamily(
+ final long handle, final long cfHandle) throws RocksDBException;
+ private native void dropColumnFamilies(final long handle,
+ final long[] cfHandles) throws RocksDBException;
+ //TODO(AR) best way to express DestroyColumnFamilyHandle? ...maybe in ColumnFamilyHandle?
+ private native void put(final long handle, final byte[] key,
+ final int keyOffset, final int keyLength, final byte[] value,
+ final int valueOffset, int valueLength) throws RocksDBException;
+ private native void put(final long handle, final byte[] key, final int keyOffset,
+ final int keyLength, final byte[] value, final int valueOffset,
+ final int valueLength, final long cfHandle) throws RocksDBException;
+ private native void put(final long handle, final long writeOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength,
+ final byte[] value, final int valueOffset, final int valueLength)
+ throws RocksDBException;
+ private native void put(final long handle, final long writeOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength,
+ final byte[] value, final int valueOffset, final int valueLength,
+ final long cfHandle) throws RocksDBException;
+ private native void delete(final long handle, final byte[] key,
+ final int keyOffset, final int keyLength) throws RocksDBException;
+ private native void delete(final long handle, final byte[] key,
+ final int keyOffset, final int keyLength, final long cfHandle)
+ throws RocksDBException;
+ private native void delete(final long handle, final long writeOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength)
+ throws RocksDBException;
+ private native void delete(final long handle, final long writeOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength,
+ final long cfHandle) throws RocksDBException;
+ private native void singleDelete(
+ final long handle, final byte[] key, final int keyLen)
+ throws RocksDBException;
+ private native void singleDelete(
+ final long handle, final byte[] key, final int keyLen,
+ final long cfHandle) throws RocksDBException;
+ private native void singleDelete(
+ final long handle, final long writeOptHandle, final byte[] key,
+ final int keyLen) throws RocksDBException;
+ private native void singleDelete(
+ final long handle, final long writeOptHandle,
+ final byte[] key, final int keyLen, final long cfHandle)
+ throws RocksDBException;
+ private native void deleteRange(final long handle, final byte[] beginKey,
+ final int beginKeyOffset, final int beginKeyLength, final byte[] endKey,
+ final int endKeyOffset, final int endKeyLength) throws RocksDBException;
+ private native void deleteRange(final long handle, final byte[] beginKey,
+ final int beginKeyOffset, final int beginKeyLength, final byte[] endKey,
+ final int endKeyOffset, final int endKeyLength, final long cfHandle)
+ throws RocksDBException;
+ private native void deleteRange(final long handle, final long writeOptHandle,
+ final byte[] beginKey, final int beginKeyOffset, final int beginKeyLength,
+ final byte[] endKey, final int endKeyOffset, final int endKeyLength)
+ throws RocksDBException;
+ private native void deleteRange(
+ final long handle, final long writeOptHandle, final byte[] beginKey,
+ final int beginKeyOffset, final int beginKeyLength, final byte[] endKey,
+ final int endKeyOffset, final int endKeyLength, final long cfHandle)
+ throws RocksDBException;
+ private native void merge(final long handle, final byte[] key,
+ final int keyOffset, final int keyLength, final byte[] value,
+ final int valueOffset, final int valueLength) throws RocksDBException;
+ private native void merge(final long handle, final byte[] key,
+ final int keyOffset, final int keyLength, final byte[] value,
+ final int valueOffset, final int valueLength, final long cfHandle)
+ throws RocksDBException;
+ private native void merge(final long handle, final long writeOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength,
+ final byte[] value, final int valueOffset, final int valueLength)
+ throws RocksDBException;
+ private native void merge(final long handle, final long writeOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength,
+ final byte[] value, final int valueOffset, final int valueLength,
+ final long cfHandle) throws RocksDBException;
+ private native void write0(final long handle, final long writeOptHandle,
+ final long wbHandle) throws RocksDBException;
+ private native void write1(final long handle, final long writeOptHandle,
+ final long wbwiHandle) throws RocksDBException;
+ private native int get(final long handle, final byte[] key,
+ final int keyOffset, final int keyLength, final byte[] value,
+ final int valueOffset, final int valueLength) throws RocksDBException;
+ private native int get(final long handle, final byte[] key,
+ final int keyOffset, final int keyLength, byte[] value,
+ final int valueOffset, final int valueLength, final long cfHandle)
+ throws RocksDBException;
+ private native int get(final long handle, final long readOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength,
+ final byte[] value, final int valueOffset, final int valueLength)
+ throws RocksDBException;
+ private native int get(final long handle, final long readOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength,
+ final byte[] value, final int valueOffset, final int valueLength,
+ final long cfHandle) throws RocksDBException;
+ private native byte[] get(final long handle, byte[] key, final int keyOffset,
+ final int keyLength) throws RocksDBException;
+ private native byte[] get(final long handle, final byte[] key,
+ final int keyOffset, final int keyLength, final long cfHandle)
+ throws RocksDBException;
+ private native byte[] get(final long handle, final long readOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength)
+ throws RocksDBException;
+ private native byte[] get(final long handle,
+ final long readOptHandle, final byte[] key, final int keyOffset,
+ final int keyLength, final long cfHandle) throws RocksDBException;
+ private native byte[][] multiGet(final long dbHandle, final byte[][] keys,
+ final int[] keyOffsets, final int[] keyLengths);
+ private native byte[][] multiGet(final long dbHandle, final byte[][] keys,
+ final int[] keyOffsets, final int[] keyLengths,
+ final long[] columnFamilyHandles);
+ private native byte[][] multiGet(final long dbHandle, final long rOptHandle,
+ final byte[][] keys, final int[] keyOffsets, final int[] keyLengths);
+ private native byte[][] multiGet(final long dbHandle, final long rOptHandle,
+ final byte[][] keys, final int[] keyOffsets, final int[] keyLengths,
+ final long[] columnFamilyHandles);
+ private native boolean keyMayExist(
+ final long handle, final long cfHandle, final long readOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength);
+ private native byte[][] keyMayExistFoundValue(
+ final long handle, final long cfHandle, final long readOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength);
+ private native void putDirect(long handle, long writeOptHandle, ByteBuffer key, int keyOffset,
+ int keyLength, ByteBuffer value, int valueOffset, int valueLength, long cfHandle)
+ throws RocksDBException;
+ private native long iterator(final long handle);
+ private native long iterator(final long handle, final long readOptHandle);
+ private native long iteratorCF(final long handle, final long cfHandle);
+ private native long iteratorCF(final long handle, final long cfHandle,
+ final long readOptHandle);
+ private native long[] iterators(final long handle,
+ final long[] columnFamilyHandles, final long readOptHandle)
+ throws RocksDBException;
+ private native long getSnapshot(final long nativeHandle);
+ private native void releaseSnapshot(
+ final long nativeHandle, final long snapshotHandle);
+ private native String getProperty(final long nativeHandle,
+ final long cfHandle, final String property, final int propertyLength)
+ throws RocksDBException;
+ private native Map<String, String> getMapProperty(final long nativeHandle,
+ final long cfHandle, final String property, final int propertyLength)
+ throws RocksDBException;
+ private native int getDirect(long handle, long readOptHandle, ByteBuffer key, int keyOffset,
+ int keyLength, ByteBuffer value, int valueOffset, int valueLength, long cfHandle)
+ throws RocksDBException;
+ private native void deleteDirect(long handle, long optHandle, ByteBuffer key, int keyOffset,
+ int keyLength, long cfHandle) throws RocksDBException;
+ private native long getLongProperty(final long nativeHandle,
+ final long cfHandle, final String property, final int propertyLength)
+ throws RocksDBException;
+ private native void resetStats(final long nativeHandle)
+ throws RocksDBException;
+ private native long getAggregatedLongProperty(final long nativeHandle,
+ final String property, int propertyLength) throws RocksDBException;
+ private native long[] getApproximateSizes(final long nativeHandle,
+ final long columnFamilyHandle, final long[] rangeSliceHandles,
+ final byte includeFlags);
+ private final native long[] getApproximateMemTableStats(
+ final long nativeHandle, final long columnFamilyHandle,
+ final long rangeStartSliceHandle, final long rangeLimitSliceHandle);
+ private native void compactRange(final long handle,
+ /* @Nullable */ final byte[] begin, final int beginLen,
+ /* @Nullable */ final byte[] end, final int endLen,
+ final long compactRangeOptHandle, final long cfHandle)
+ throws RocksDBException;
+ private native void setOptions(final long handle, final long cfHandle,
+ final String[] keys, final String[] values) throws RocksDBException;
+ private native void setDBOptions(final long handle,
+ final String[] keys, final String[] values) throws RocksDBException;
+ private native String[] compactFiles(final long handle,
+ final long compactionOptionsHandle,
+ final long columnFamilyHandle,
+ final String[] inputFileNames,
+ final int outputLevel,
+ final int outputPathId,
+ final long compactionJobInfoHandle) throws RocksDBException;
+ private native void pauseBackgroundWork(final long handle)
+ throws RocksDBException;
+ private native void continueBackgroundWork(final long handle)
+ throws RocksDBException;
+ private native void enableAutoCompaction(final long handle,
+ final long[] columnFamilyHandles) throws RocksDBException;
+ private native int numberLevels(final long handle,
+ final long columnFamilyHandle);
+ private native int maxMemCompactionLevel(final long handle,
+ final long columnFamilyHandle);
+ private native int level0StopWriteTrigger(final long handle,
+ final long columnFamilyHandle);
+ private native String getName(final long handle);
+ private native long getEnv(final long handle);
+ private native void flush(final long handle, final long flushOptHandle,
+ /* @Nullable */ final long[] cfHandles) throws RocksDBException;
+ private native void flushWal(final long handle, final boolean sync)
+ throws RocksDBException;
+ private native void syncWal(final long handle) throws RocksDBException;
+ private native long getLatestSequenceNumber(final long handle);
+ private native boolean setPreserveDeletesSequenceNumber(final long handle,
+ final long sequenceNumber);
+ private native void disableFileDeletions(long handle) throws RocksDBException;
+ private native void enableFileDeletions(long handle, boolean force)
+ throws RocksDBException;
+ private native String[] getLiveFiles(final long handle,
+ final boolean flushMemtable) throws RocksDBException;
+ private native LogFile[] getSortedWalFiles(final long handle)
+ throws RocksDBException;
+ private native long getUpdatesSince(final long handle,
+ final long sequenceNumber) throws RocksDBException;
+ private native void deleteFile(final long handle, final String name)
+ throws RocksDBException;
+ private native LiveFileMetaData[] getLiveFilesMetaData(final long handle);
+ private native ColumnFamilyMetaData getColumnFamilyMetaData(
+ final long handle, final long columnFamilyHandle);
+ private native void ingestExternalFile(final long handle,
+ final long columnFamilyHandle, final String[] filePathList,
+ final int filePathListLen, final long ingestExternalFileOptionsHandle)
+ throws RocksDBException;
+ private native void verifyChecksum(final long handle) throws RocksDBException;
+ private native long getDefaultColumnFamily(final long handle);
+ private native Map<String, TableProperties> getPropertiesOfAllTables(
+ final long handle, final long columnFamilyHandle) throws RocksDBException;
+ private native Map<String, TableProperties> getPropertiesOfTablesInRange(
+ final long handle, final long columnFamilyHandle,
+ final long[] rangeSliceHandles);
+ private native long[] suggestCompactRange(final long handle,
+ final long columnFamilyHandle) throws RocksDBException;
+ private native void promoteL0(final long handle,
+ final long columnFamilyHandle, final int tragetLevel)
+ throws RocksDBException;
+ private native void startTrace(final long handle, final long maxTraceFileSize,
+ final long traceWriterHandle) throws RocksDBException;
+ private native void endTrace(final long handle) throws RocksDBException;
+ private native void deleteFilesInRanges(long handle, long cfHandle, final byte[][] ranges,
+ boolean include_end) throws RocksDBException;
+
+ private native static void destroyDB(final String path,
+ final long optionsHandle) throws RocksDBException;
+
+ protected DBOptionsInterface options_;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RocksDBException.java b/src/rocksdb/java/src/main/java/org/rocksdb/RocksDBException.java
new file mode 100644
index 000000000..8b035f458
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/RocksDBException.java
@@ -0,0 +1,44 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * A RocksDBException encapsulates the error of an operation. This exception
+ * type is used to describe an internal error from the c++ rocksdb library.
+ */
+public class RocksDBException extends Exception {
+
+ /* @Nullable */ private final Status status;
+
+ /**
+ * The private construct used by a set of public static factory method.
+ *
+ * @param msg the specified error message.
+ */
+ public RocksDBException(final String msg) {
+ this(msg, null);
+ }
+
+ public RocksDBException(final String msg, final Status status) {
+ super(msg);
+ this.status = status;
+ }
+
+ public RocksDBException(final Status status) {
+ super(status.getState() != null ? status.getState()
+ : status.getCodeString());
+ this.status = status;
+ }
+
+ /**
+ * Get the status returned from RocksDB
+ *
+ * @return The status reported by RocksDB, or null if no status is available
+ */
+ public Status getStatus() {
+ return status;
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RocksEnv.java b/src/rocksdb/java/src/main/java/org/rocksdb/RocksEnv.java
new file mode 100644
index 000000000..b3681d77d
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/RocksEnv.java
@@ -0,0 +1,32 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * <p>A RocksEnv is an interface used by the rocksdb implementation to access
+ * operating system functionality like the filesystem etc.</p>
+ *
+ * <p>All Env implementations are safe for concurrent access from
+ * multiple threads without any external synchronization.</p>
+ */
+public class RocksEnv extends Env {
+
+ /**
+ * <p>Package-private constructor that uses the specified native handle
+ * to construct a RocksEnv.</p>
+ *
+ * <p>Note that the ownership of the input handle
+ * belongs to the caller, and the newly created RocksEnv will not take
+ * the ownership of the input handle. As a result, calling
+ * {@code dispose()} of the created RocksEnv will be no-op.</p>
+ */
+ RocksEnv(final long handle) {
+ super(handle);
+ }
+
+ @Override
+ protected native final void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RocksIterator.java b/src/rocksdb/java/src/main/java/org/rocksdb/RocksIterator.java
new file mode 100644
index 000000000..94611cd7a
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/RocksIterator.java
@@ -0,0 +1,118 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.ByteBuffer;
+
+/**
+ * <p>An iterator that yields a sequence of key/value pairs from a source.
+ * Multiple implementations are provided by this library.
+ * In particular, iterators are provided
+ * to access the contents of a Table or a DB.</p>
+ *
+ * <p>Multiple threads can invoke const methods on an RocksIterator without
+ * external synchronization, but if any of the threads may call a
+ * non-const method, all threads accessing the same RocksIterator must use
+ * external synchronization.</p>
+ *
+ * @see org.rocksdb.RocksObject
+ */
+public class RocksIterator extends AbstractRocksIterator<RocksDB> {
+ protected RocksIterator(RocksDB rocksDB, long nativeHandle) {
+ super(rocksDB, nativeHandle);
+ }
+
+ /**
+ * <p>Return the key for the current entry. The underlying storage for
+ * the returned slice is valid only until the next modification of
+ * the iterator.</p>
+ *
+ * <p>REQUIRES: {@link #isValid()}</p>
+ *
+ * @return key for the current entry.
+ */
+ public byte[] key() {
+ assert(isOwningHandle());
+ return key0(nativeHandle_);
+ }
+
+ /**
+ * <p>Return the key for the current entry. The underlying storage for
+ * the returned slice is valid only until the next modification of
+ * the iterator.</p>
+ *
+ * <p>REQUIRES: {@link #isValid()}</p>
+ *
+ * @param key the out-value to receive the retrieved key.
+ * It is using position and limit. Limit is set according to key size.
+ * Supports direct buffer only.
+ * @return The size of the actual key. If the return key is greater than the
+ * length of {@code key}, then it indicates that the size of the
+ * input buffer {@code key} is insufficient and partial result will
+ * be returned.
+ */
+ public int key(ByteBuffer key) {
+ assert (isOwningHandle() && key.isDirect());
+ int result = keyDirect0(nativeHandle_, key, key.position(), key.remaining());
+ key.limit(Math.min(key.position() + result, key.limit()));
+ return result;
+ }
+
+ /**
+ * <p>Return the value for the current entry. The underlying storage for
+ * the returned slice is valid only until the next modification of
+ * the iterator.</p>
+ *
+ * <p>REQUIRES: !AtEnd() &amp;&amp; !AtStart()</p>
+ * @return value for the current entry.
+ */
+ public byte[] value() {
+ assert(isOwningHandle());
+ return value0(nativeHandle_);
+ }
+
+ /**
+ * <p>Return the value for the current entry. The underlying storage for
+ * the returned slice is valid only until the next modification of
+ * the iterator.</p>
+ *
+ * <p>REQUIRES: {@link #isValid()}</p>
+ *
+ * @param value the out-value to receive the retrieved value.
+ * It is using position and limit. Limit is set according to value size.
+ * Supports direct buffer only.
+ * @return The size of the actual value. If the return value is greater than the
+ * length of {@code value}, then it indicates that the size of the
+ * input buffer {@code value} is insufficient and partial result will
+ * be returned.
+ */
+ public int value(ByteBuffer value) {
+ assert (isOwningHandle() && value.isDirect());
+ int result = valueDirect0(nativeHandle_, value, value.position(), value.remaining());
+ value.limit(Math.min(value.position() + result, value.limit()));
+ return result;
+ }
+
+ @Override protected final native void disposeInternal(final long handle);
+ @Override final native boolean isValid0(long handle);
+ @Override final native void seekToFirst0(long handle);
+ @Override final native void seekToLast0(long handle);
+ @Override final native void next0(long handle);
+ @Override final native void prev0(long handle);
+ @Override final native void seek0(long handle, byte[] target, int targetLen);
+ @Override final native void seekForPrev0(long handle, byte[] target, int targetLen);
+ @Override
+ final native void seekDirect0(long handle, ByteBuffer target, int targetOffset, int targetLen);
+ @Override
+ final native void seekForPrevDirect0(
+ long handle, ByteBuffer target, int targetOffset, int targetLen);
+ @Override final native void status0(long handle) throws RocksDBException;
+
+ private native byte[] key0(long handle);
+ private native byte[] value0(long handle);
+ private native int keyDirect0(long handle, ByteBuffer buffer, int bufferOffset, int bufferLen);
+ private native int valueDirect0(long handle, ByteBuffer buffer, int bufferOffset, int bufferLen);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RocksIteratorInterface.java b/src/rocksdb/java/src/main/java/org/rocksdb/RocksIteratorInterface.java
new file mode 100644
index 000000000..ddd2593c1
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/RocksIteratorInterface.java
@@ -0,0 +1,117 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.ByteBuffer;
+
+/**
+ * <p>Defines the interface for an Iterator which provides
+ * access to data one entry at a time. Multiple implementations
+ * are provided by this library. In particular, iterators are provided
+ * to access the contents of a DB and Write Batch.</p>
+ *
+ * <p>Multiple threads can invoke const methods on an RocksIterator without
+ * external synchronization, but if any of the threads may call a
+ * non-const method, all threads accessing the same RocksIterator must use
+ * external synchronization.</p>
+ *
+ * @see org.rocksdb.RocksObject
+ */
+public interface RocksIteratorInterface {
+
+ /**
+ * <p>An iterator is either positioned at an entry, or
+ * not valid. This method returns true if the iterator is valid.</p>
+ *
+ * @return true if iterator is valid.
+ */
+ boolean isValid();
+
+ /**
+ * <p>Position at the first entry in the source. The iterator is Valid()
+ * after this call if the source is not empty.</p>
+ */
+ void seekToFirst();
+
+ /**
+ * <p>Position at the last entry in the source. The iterator is
+ * valid after this call if the source is not empty.</p>
+ */
+ void seekToLast();
+
+ /**
+ * <p>Position at the first entry in the source whose key is at or
+ * past target.</p>
+ *
+ * <p>The iterator is valid after this call if the source contains
+ * a key that comes at or past target.</p>
+ *
+ * @param target byte array describing a key or a
+ * key prefix to seek for.
+ */
+ void seek(byte[] target);
+
+ /**
+ * <p>Position at the first entry in the source whose key is that or
+ * before target.</p>
+ *
+ * <p>The iterator is valid after this call if the source contains
+ * a key that comes at or before target.</p>
+ *
+ * @param target byte array describing a key or a
+ * key prefix to seek for.
+ */
+ void seekForPrev(byte[] target);
+
+ /**
+ * <p>Position at the first entry in the source whose key is that or
+ * past target.</p>
+ *
+ * <p>The iterator is valid after this call if the source contains
+ * a key that comes at or past target.</p>
+ *
+ * @param target byte array describing a key or a
+ * key prefix to seek for. Supports direct buffer only.
+ */
+ void seek(ByteBuffer target);
+
+ /**
+ * <p>Position at the last key that is less than or equal to the target key.</p>
+ *
+ * <p>The iterator is valid after this call if the source contains
+ * a key that comes at or past target.</p>
+ *
+ * @param target byte array describing a key or a
+ * key prefix to seek for. Supports direct buffer only.
+ */
+ void seekForPrev(ByteBuffer target);
+
+ /**
+ * <p>Moves to the next entry in the source. After this call, Valid() is
+ * true if the iterator was not positioned at the last entry in the source.</p>
+ *
+ * <p>REQUIRES: {@link #isValid()}</p>
+ */
+ void next();
+
+ /**
+ * <p>Moves to the previous entry in the source. After this call, Valid() is
+ * true if the iterator was not positioned at the first entry in source.</p>
+ *
+ * <p>REQUIRES: {@link #isValid()}</p>
+ */
+ void prev();
+
+ /**
+ * <p>If an error has occurred, return it. Else return an ok status.
+ * If non-blocking IO is requested and this operation cannot be
+ * satisfied without doing some IO, then this returns Status::Incomplete().</p>
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ void status() throws RocksDBException;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RocksMemEnv.java b/src/rocksdb/java/src/main/java/org/rocksdb/RocksMemEnv.java
new file mode 100644
index 000000000..0afa5f662
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/RocksMemEnv.java
@@ -0,0 +1,39 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Memory environment.
+ */
+//TODO(AR) rename to MemEnv
+public class RocksMemEnv extends Env {
+
+ /**
+ * <p>Creates a new environment that stores its data
+ * in memory and delegates all non-file-storage tasks to
+ * {@code baseEnv}.</p>
+ *
+ * <p>The caller must delete the result when it is
+ * no longer needed.</p>
+ *
+ * @param baseEnv the base environment,
+ * must remain live while the result is in use.
+ */
+ public RocksMemEnv(final Env baseEnv) {
+ super(createMemEnv(baseEnv.nativeHandle_));
+ }
+
+ /**
+ * @deprecated Use {@link #RocksMemEnv(Env)}.
+ */
+ @Deprecated
+ public RocksMemEnv() {
+ this(Env.getDefault());
+ }
+
+ private static native long createMemEnv(final long baseEnvHandle);
+ @Override protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RocksMutableObject.java b/src/rocksdb/java/src/main/java/org/rocksdb/RocksMutableObject.java
new file mode 100644
index 000000000..e92289dc0
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/RocksMutableObject.java
@@ -0,0 +1,87 @@
+// Copyright (c) 2016, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * RocksMutableObject is an implementation of {@link AbstractNativeReference}
+ * whose reference to the underlying native C++ object can change.
+ *
+ * <p>The use of {@code RocksMutableObject} should be kept to a minimum, as it
+ * has synchronization overheads and introduces complexity. Instead it is
+ * recommended to use {@link RocksObject} where possible.</p>
+ */
+public abstract class RocksMutableObject extends AbstractNativeReference {
+
+ /**
+ * An mutable reference to the value of the C++ pointer pointing to some
+ * underlying native RocksDB C++ object.
+ */
+ private long nativeHandle_;
+ private boolean owningHandle_;
+
+ protected RocksMutableObject() {
+ }
+
+ protected RocksMutableObject(final long nativeHandle) {
+ this.nativeHandle_ = nativeHandle;
+ this.owningHandle_ = true;
+ }
+
+ /**
+ * Closes the existing handle, and changes the handle to the new handle
+ *
+ * @param newNativeHandle The C++ pointer to the new native object
+ * @param owningNativeHandle true if we own the new native object
+ */
+ public synchronized void resetNativeHandle(final long newNativeHandle,
+ final boolean owningNativeHandle) {
+ close();
+ setNativeHandle(newNativeHandle, owningNativeHandle);
+ }
+
+ /**
+ * Sets the handle (C++ pointer) of the underlying C++ native object
+ *
+ * @param nativeHandle The C++ pointer to the native object
+ * @param owningNativeHandle true if we own the native object
+ */
+ public synchronized void setNativeHandle(final long nativeHandle,
+ final boolean owningNativeHandle) {
+ this.nativeHandle_ = nativeHandle;
+ this.owningHandle_ = owningNativeHandle;
+ }
+
+ @Override
+ protected synchronized boolean isOwningHandle() {
+ return this.owningHandle_;
+ }
+
+ /**
+ * Gets the value of the C++ pointer pointing to the underlying
+ * native C++ object
+ *
+ * @return the pointer value for the native object
+ */
+ protected synchronized long getNativeHandle() {
+ assert (this.nativeHandle_ != 0);
+ return this.nativeHandle_;
+ }
+
+ @Override
+ public synchronized final void close() {
+ if (isOwningHandle()) {
+ disposeInternal();
+ this.owningHandle_ = false;
+ this.nativeHandle_ = 0;
+ }
+ }
+
+ protected void disposeInternal() {
+ disposeInternal(nativeHandle_);
+ }
+
+ protected abstract void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RocksObject.java b/src/rocksdb/java/src/main/java/org/rocksdb/RocksObject.java
new file mode 100644
index 000000000..545dd896a
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/RocksObject.java
@@ -0,0 +1,41 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * RocksObject is an implementation of {@link AbstractNativeReference} which
+ * has an immutable and therefore thread-safe reference to the underlying
+ * native C++ RocksDB object.
+ * <p>
+ * RocksObject is the base-class of almost all RocksDB classes that have a
+ * pointer to some underlying native C++ {@code rocksdb} object.</p>
+ * <p>
+ * The use of {@code RocksObject} should always be preferred over
+ * {@link RocksMutableObject}.</p>
+ */
+public abstract class RocksObject extends AbstractImmutableNativeReference {
+
+ /**
+ * An immutable reference to the value of the C++ pointer pointing to some
+ * underlying native RocksDB C++ object.
+ */
+ protected final long nativeHandle_;
+
+ protected RocksObject(final long nativeHandle) {
+ super(true);
+ this.nativeHandle_ = nativeHandle;
+ }
+
+ /**
+ * Deletes underlying C++ object pointer.
+ */
+ @Override
+ protected void disposeInternal() {
+ disposeInternal(nativeHandle_);
+ }
+
+ protected abstract void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/SizeApproximationFlag.java b/src/rocksdb/java/src/main/java/org/rocksdb/SizeApproximationFlag.java
new file mode 100644
index 000000000..fe3c2dd05
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/SizeApproximationFlag.java
@@ -0,0 +1,31 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+import java.util.List;
+
+/**
+ * Flags for
+ * {@link RocksDB#getApproximateSizes(ColumnFamilyHandle, List, SizeApproximationFlag...)}
+ * that specify whether memtable stats should be included,
+ * or file stats approximation or both.
+ */
+public enum SizeApproximationFlag {
+ NONE((byte)0x0),
+ INCLUDE_MEMTABLES((byte)0x1),
+ INCLUDE_FILES((byte)0x2);
+
+ private final byte value;
+
+ SizeApproximationFlag(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the internal byte representation.
+ *
+ * @return the internal representation.
+ */
+ byte getValue() {
+ return value;
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java b/src/rocksdb/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java
new file mode 100644
index 000000000..e2c1b97d8
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java
@@ -0,0 +1,51 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+/**
+ * The config for skip-list memtable representation.
+ */
+public class SkipListMemTableConfig extends MemTableConfig {
+
+ public static final long DEFAULT_LOOKAHEAD = 0;
+
+ /**
+ * SkipListMemTableConfig constructor
+ */
+ public SkipListMemTableConfig() {
+ lookahead_ = DEFAULT_LOOKAHEAD;
+ }
+
+ /**
+ * Sets lookahead for SkipList
+ *
+ * @param lookahead If non-zero, each iterator's seek operation
+ * will start the search from the previously visited record
+ * (doing at most 'lookahead' steps). This is an
+ * optimization for the access pattern including many
+ * seeks with consecutive keys.
+ * @return the current instance of SkipListMemTableConfig
+ */
+ public SkipListMemTableConfig setLookahead(final long lookahead) {
+ lookahead_ = lookahead;
+ return this;
+ }
+
+ /**
+ * Returns the currently set lookahead value.
+ *
+ * @return lookahead value
+ */
+ public long lookahead() {
+ return lookahead_;
+ }
+
+
+ @Override protected long newMemTableFactoryHandle() {
+ return newMemTableFactoryHandle0(lookahead_);
+ }
+
+ private native long newMemTableFactoryHandle0(long lookahead)
+ throws IllegalArgumentException;
+
+ private long lookahead_;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Slice.java b/src/rocksdb/java/src/main/java/org/rocksdb/Slice.java
new file mode 100644
index 000000000..50d9f7652
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/Slice.java
@@ -0,0 +1,136 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * <p>Base class for slices which will receive
+ * byte[] based access to the underlying data.</p>
+ *
+ * <p>byte[] backed slices typically perform better with
+ * small keys and values. When using larger keys and
+ * values consider using {@link org.rocksdb.DirectSlice}</p>
+ */
+public class Slice extends AbstractSlice<byte[]> {
+
+ /**
+ * Indicates whether we have to free the memory pointed to by the Slice
+ */
+ private volatile boolean cleared;
+ private volatile long internalBufferOffset = 0;
+
+ /**
+ * <p>Called from JNI to construct a new Java Slice
+ * without an underlying C++ object set
+ * at creation time.</p>
+ *
+ * <p>Note: You should be aware that
+ * {@see org.rocksdb.RocksObject#disOwnNativeHandle()} is intentionally
+ * called from the default Slice constructor, and that it is marked as
+ * private. This is so that developers cannot construct their own default
+ * Slice objects (at present). As developers cannot construct their own
+ * Slice objects through this, they are not creating underlying C++ Slice
+ * objects, and so there is nothing to free (dispose) from Java.</p>
+ */
+ @SuppressWarnings("unused")
+ private Slice() {
+ super();
+ }
+
+ /**
+ * <p>Package-private Slice constructor which is used to construct
+ * Slice instances from C++ side. As the reference to this
+ * object is also managed from C++ side the handle will be disowned.</p>
+ *
+ * @param nativeHandle address of native instance.
+ */
+ Slice(final long nativeHandle) {
+ this(nativeHandle, false);
+ }
+
+ /**
+ * <p>Package-private Slice constructor which is used to construct
+ * Slice instances using a handle. </p>
+ *
+ * @param nativeHandle address of native instance.
+ * @param owningNativeHandle true if the Java side owns the memory pointed to
+ * by this reference, false if ownership belongs to the C++ side
+ */
+ Slice(final long nativeHandle, final boolean owningNativeHandle) {
+ super();
+ setNativeHandle(nativeHandle, owningNativeHandle);
+ }
+
+ /**
+ * <p>Constructs a slice where the data is taken from
+ * a String.</p>
+ *
+ * @param str String value.
+ */
+ public Slice(final String str) {
+ super(createNewSliceFromString(str));
+ }
+
+ /**
+ * <p>Constructs a slice where the data is a copy of
+ * the byte array from a specific offset.</p>
+ *
+ * @param data byte array.
+ * @param offset offset within the byte array.
+ */
+ public Slice(final byte[] data, final int offset) {
+ super(createNewSlice0(data, offset));
+ }
+
+ /**
+ * <p>Constructs a slice where the data is a copy of
+ * the byte array.</p>
+ *
+ * @param data byte array.
+ */
+ public Slice(final byte[] data) {
+ super(createNewSlice1(data));
+ }
+
+ @Override
+ public void clear() {
+ clear0(getNativeHandle(), !cleared, internalBufferOffset);
+ cleared = true;
+ }
+
+ @Override
+ public void removePrefix(final int n) {
+ removePrefix0(getNativeHandle(), n);
+ this.internalBufferOffset += n;
+ }
+
+ /**
+ * <p>Deletes underlying C++ slice pointer
+ * and any buffered data.</p>
+ *
+ * <p>
+ * Note that this function should be called only after all
+ * RocksDB instances referencing the slice are closed.
+ * Otherwise an undefined behavior will occur.</p>
+ */
+ @Override
+ protected void disposeInternal() {
+ final long nativeHandle = getNativeHandle();
+ if(!cleared) {
+ disposeInternalBuf(nativeHandle, internalBufferOffset);
+ }
+ super.disposeInternal(nativeHandle);
+ }
+
+ @Override protected final native byte[] data0(long handle);
+ private native static long createNewSlice0(final byte[] data,
+ final int length);
+ private native static long createNewSlice1(final byte[] data);
+ private native void clear0(long handle, boolean internalBuffer,
+ long internalBufferOffset);
+ private native void removePrefix0(long handle, int length);
+ private native void disposeInternalBuf(final long handle,
+ long internalBufferOffset);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Snapshot.java b/src/rocksdb/java/src/main/java/org/rocksdb/Snapshot.java
new file mode 100644
index 000000000..39cdf0c2d
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/Snapshot.java
@@ -0,0 +1,41 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Snapshot of database
+ */
+public class Snapshot extends RocksObject {
+ Snapshot(final long nativeHandle) {
+ super(nativeHandle);
+
+ // The pointer to the snapshot is always released
+ // by the database instance.
+ disOwnNativeHandle();
+ }
+
+ /**
+ * Return the associated sequence number;
+ *
+ * @return the associated sequence number of
+ * this snapshot.
+ */
+ public long getSequenceNumber() {
+ return getSequenceNumber(nativeHandle_);
+ }
+
+ @Override
+ protected final void disposeInternal(final long handle) {
+ /**
+ * Nothing to release, we never own the pointer for a
+ * Snapshot. The pointer
+ * to the snapshot is released by the database
+ * instance.
+ */
+ }
+
+ private native long getSequenceNumber(long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/SstFileManager.java b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileManager.java
new file mode 100644
index 000000000..8805410aa
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileManager.java
@@ -0,0 +1,251 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.Map;
+
+/**
+ * SstFileManager is used to track SST files in the DB and control their
+ * deletion rate.
+ *
+ * All SstFileManager public functions are thread-safe.
+ *
+ * SstFileManager is not extensible.
+ */
+//@ThreadSafe
+public final class SstFileManager extends RocksObject {
+
+ public static final long RATE_BYTES_PER_SEC_DEFAULT = 0;
+ public static final boolean DELETE_EXISTING_TRASH_DEFAULT = true;
+ public static final double MAX_TRASH_DB_RATION_DEFAULT = 0.25;
+ public static final long BYTES_MAX_DELETE_CHUNK_DEFAULT = 64 * 1024 * 1024;
+
+ /**
+ * Create a new SstFileManager that can be shared among multiple RocksDB
+ * instances to track SST file and control there deletion rate.
+ *
+ * @param env the environment.
+ *
+ * @throws RocksDBException thrown if error happens in underlying native library.
+ */
+ public SstFileManager(final Env env) throws RocksDBException {
+ this(env, null);
+ }
+
+ /**
+ * Create a new SstFileManager that can be shared among multiple RocksDB
+ * instances to track SST file and control there deletion rate.
+ *
+ * @param env the environment.
+ * @param logger if not null, the logger will be used to log errors.
+ *
+ * @throws RocksDBException thrown if error happens in underlying native library.
+ */
+ public SstFileManager(final Env env, /*@Nullable*/ final Logger logger)
+ throws RocksDBException {
+ this(env, logger, RATE_BYTES_PER_SEC_DEFAULT);
+ }
+
+ /**
+ * Create a new SstFileManager that can be shared among multiple RocksDB
+ * instances to track SST file and control there deletion rate.
+ *
+ * @param env the environment.
+ * @param logger if not null, the logger will be used to log errors.
+ *
+ * == Deletion rate limiting specific arguments ==
+ * @param rateBytesPerSec how many bytes should be deleted per second, If
+ * this value is set to 1024 (1 Kb / sec) and we deleted a file of size
+ * 4 Kb in 1 second, we will wait for another 3 seconds before we delete
+ * other files, Set to 0 to disable deletion rate limiting.
+ *
+ * @throws RocksDBException thrown if error happens in underlying native library.
+ */
+ public SstFileManager(final Env env, /*@Nullable*/ final Logger logger,
+ final long rateBytesPerSec) throws RocksDBException {
+ this(env, logger, rateBytesPerSec, MAX_TRASH_DB_RATION_DEFAULT);
+ }
+
+ /**
+ * Create a new SstFileManager that can be shared among multiple RocksDB
+ * instances to track SST file and control there deletion rate.
+ *
+ * @param env the environment.
+ * @param logger if not null, the logger will be used to log errors.
+ *
+ * == Deletion rate limiting specific arguments ==
+ * @param rateBytesPerSec how many bytes should be deleted per second, If
+ * this value is set to 1024 (1 Kb / sec) and we deleted a file of size
+ * 4 Kb in 1 second, we will wait for another 3 seconds before we delete
+ * other files, Set to 0 to disable deletion rate limiting.
+ * @param maxTrashDbRatio if the trash size constitutes for more than this
+ * fraction of the total DB size we will start deleting new files passed
+ * to DeleteScheduler immediately.
+ *
+ * @throws RocksDBException thrown if error happens in underlying native library.
+ */
+ public SstFileManager(final Env env, /*@Nullable*/ final Logger logger,
+ final long rateBytesPerSec, final double maxTrashDbRatio)
+ throws RocksDBException {
+ this(env, logger, rateBytesPerSec, maxTrashDbRatio,
+ BYTES_MAX_DELETE_CHUNK_DEFAULT);
+ }
+
+ /**
+ * Create a new SstFileManager that can be shared among multiple RocksDB
+ * instances to track SST file and control there deletion rate.
+ *
+ * @param env the environment.
+ * @param logger if not null, the logger will be used to log errors.
+ *
+ * == Deletion rate limiting specific arguments ==
+ * @param rateBytesPerSec how many bytes should be deleted per second, If
+ * this value is set to 1024 (1 Kb / sec) and we deleted a file of size
+ * 4 Kb in 1 second, we will wait for another 3 seconds before we delete
+ * other files, Set to 0 to disable deletion rate limiting.
+ * @param maxTrashDbRatio if the trash size constitutes for more than this
+ * fraction of the total DB size we will start deleting new files passed
+ * to DeleteScheduler immediately.
+ * @param bytesMaxDeleteChunk if a single file is larger than delete chunk,
+ * ftruncate the file by this size each time, rather than dropping the whole
+ * file. 0 means to always delete the whole file.
+ *
+ * @throws RocksDBException thrown if error happens in underlying native library.
+ */
+ public SstFileManager(final Env env, /*@Nullable*/final Logger logger,
+ final long rateBytesPerSec, final double maxTrashDbRatio,
+ final long bytesMaxDeleteChunk) throws RocksDBException {
+ super(newSstFileManager(env.nativeHandle_,
+ logger != null ? logger.nativeHandle_ : 0,
+ rateBytesPerSec, maxTrashDbRatio, bytesMaxDeleteChunk));
+ }
+
+
+ /**
+ * Update the maximum allowed space that should be used by RocksDB, if
+ * the total size of the SST files exceeds {@code maxAllowedSpace}, writes to
+ * RocksDB will fail.
+ *
+ * Setting {@code maxAllowedSpace} to 0 will disable this feature;
+ * maximum allowed space will be infinite (Default value).
+ *
+ * @param maxAllowedSpace the maximum allowed space that should be used by
+ * RocksDB.
+ */
+ public void setMaxAllowedSpaceUsage(final long maxAllowedSpace) {
+ setMaxAllowedSpaceUsage(nativeHandle_, maxAllowedSpace);
+ }
+
+ /**
+ * Set the amount of buffer room each compaction should be able to leave.
+ * In other words, at its maximum disk space consumption, the compaction
+ * should still leave {@code compactionBufferSize} available on the disk so
+ * that other background functions may continue, such as logging and flushing.
+ *
+ * @param compactionBufferSize the amount of buffer room each compaction
+ * should be able to leave.
+ */
+ public void setCompactionBufferSize(final long compactionBufferSize) {
+ setCompactionBufferSize(nativeHandle_, compactionBufferSize);
+ }
+
+ /**
+ * Determines if the total size of SST files exceeded the maximum allowed
+ * space usage.
+ *
+ * @return true when the maximum allows space usage has been exceeded.
+ */
+ public boolean isMaxAllowedSpaceReached() {
+ return isMaxAllowedSpaceReached(nativeHandle_);
+ }
+
+ /**
+ * Determines if the total size of SST files as well as estimated size
+ * of ongoing compactions exceeds the maximums allowed space usage.
+ *
+ * @return true when the total size of SST files as well as estimated size
+ * of ongoing compactions exceeds the maximums allowed space usage.
+ */
+ public boolean isMaxAllowedSpaceReachedIncludingCompactions() {
+ return isMaxAllowedSpaceReachedIncludingCompactions(nativeHandle_);
+ }
+
+ /**
+ * Get the total size of all tracked files.
+ *
+ * @return the total size of all tracked files.
+ */
+ public long getTotalSize() {
+ return getTotalSize(nativeHandle_);
+ }
+
+ /**
+ * Gets all tracked files and their corresponding sizes.
+ *
+ * @return a map containing all tracked files and there corresponding sizes.
+ */
+ public Map<String, Long> getTrackedFiles() {
+ return getTrackedFiles(nativeHandle_);
+ }
+
+ /**
+ * Gets the delete rate limit.
+ *
+ * @return the delete rate limit (in bytes per second).
+ */
+ public long getDeleteRateBytesPerSecond() {
+ return getDeleteRateBytesPerSecond(nativeHandle_);
+ }
+
+ /**
+ * Set the delete rate limit.
+ *
+ * Zero means disable delete rate limiting and delete files immediately.
+ *
+ * @param deleteRate the delete rate limit (in bytes per second).
+ */
+ public void setDeleteRateBytesPerSecond(final long deleteRate) {
+ setDeleteRateBytesPerSecond(nativeHandle_, deleteRate);
+ }
+
+ /**
+ * Get the trash/DB size ratio where new files will be deleted immediately.
+ *
+ * @return the trash/DB size ratio.
+ */
+ public double getMaxTrashDBRatio() {
+ return getMaxTrashDBRatio(nativeHandle_);
+ }
+
+ /**
+ * Set the trash/DB size ratio where new files will be deleted immediately.
+ *
+ * @param ratio the trash/DB size ratio.
+ */
+ public void setMaxTrashDBRatio(final double ratio) {
+ setMaxTrashDBRatio(nativeHandle_, ratio);
+ }
+
+ private native static long newSstFileManager(final long handle,
+ final long logger_handle, final long rateBytesPerSec,
+ final double maxTrashDbRatio, final long bytesMaxDeleteChunk)
+ throws RocksDBException;
+ private native void setMaxAllowedSpaceUsage(final long handle,
+ final long maxAllowedSpace);
+ private native void setCompactionBufferSize(final long handle,
+ final long compactionBufferSize);
+ private native boolean isMaxAllowedSpaceReached(final long handle);
+ private native boolean isMaxAllowedSpaceReachedIncludingCompactions(
+ final long handle);
+ private native long getTotalSize(final long handle);
+ private native Map<String, Long> getTrackedFiles(final long handle);
+ private native long getDeleteRateBytesPerSecond(final long handle);
+ private native void setDeleteRateBytesPerSecond(final long handle,
+ final long deleteRate);
+ private native double getMaxTrashDBRatio(final long handle);
+ private native void setMaxTrashDBRatio(final long handle, final double ratio);
+ @Override protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/SstFileMetaData.java b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileMetaData.java
new file mode 100644
index 000000000..a04d05cb5
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileMetaData.java
@@ -0,0 +1,162 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * The metadata that describes a SST file.
+ */
+public class SstFileMetaData {
+ private final String fileName;
+ private final String path;
+ private final long size;
+ private final long smallestSeqno;
+ private final long largestSeqno;
+ private final byte[] smallestKey;
+ private final byte[] largestKey;
+ private final long numReadsSampled;
+ private final boolean beingCompacted;
+ private final long numEntries;
+ private final long numDeletions;
+
+ /**
+ * Called from JNI C++
+ *
+ * @param fileName the file name
+ * @param path the file path
+ * @param size the size of the file
+ * @param smallestSeqno the smallest sequence number
+ * @param largestSeqno the largest sequence number
+ * @param smallestKey the smallest key
+ * @param largestKey the largest key
+ * @param numReadsSampled the number of reads sampled
+ * @param beingCompacted true if the file is being compacted, false otherwise
+ * @param numEntries the number of entries
+ * @param numDeletions the number of deletions
+ */
+ protected SstFileMetaData(
+ final String fileName,
+ final String path,
+ final long size,
+ final long smallestSeqno,
+ final long largestSeqno,
+ final byte[] smallestKey,
+ final byte[] largestKey,
+ final long numReadsSampled,
+ final boolean beingCompacted,
+ final long numEntries,
+ final long numDeletions) {
+ this.fileName = fileName;
+ this.path = path;
+ this.size = size;
+ this.smallestSeqno = smallestSeqno;
+ this.largestSeqno = largestSeqno;
+ this.smallestKey = smallestKey;
+ this.largestKey = largestKey;
+ this.numReadsSampled = numReadsSampled;
+ this.beingCompacted = beingCompacted;
+ this.numEntries = numEntries;
+ this.numDeletions = numDeletions;
+ }
+
+ /**
+ * Get the name of the file.
+ *
+ * @return the name of the file.
+ */
+ public String fileName() {
+ return fileName;
+ }
+
+ /**
+ * Get the full path where the file locates.
+ *
+ * @return the full path
+ */
+ public String path() {
+ return path;
+ }
+
+ /**
+ * Get the file size in bytes.
+ *
+ * @return file size
+ */
+ public long size() {
+ return size;
+ }
+
+ /**
+ * Get the smallest sequence number in file.
+ *
+ * @return the smallest sequence number
+ */
+ public long smallestSeqno() {
+ return smallestSeqno;
+ }
+
+ /**
+ * Get the largest sequence number in file.
+ *
+ * @return the largest sequence number
+ */
+ public long largestSeqno() {
+ return largestSeqno;
+ }
+
+ /**
+ * Get the smallest user defined key in the file.
+ *
+ * @return the smallest user defined key
+ */
+ public byte[] smallestKey() {
+ return smallestKey;
+ }
+
+ /**
+ * Get the largest user defined key in the file.
+ *
+ * @return the largest user defined key
+ */
+ public byte[] largestKey() {
+ return largestKey;
+ }
+
+ /**
+ * Get the number of times the file has been read.
+ *
+ * @return the number of times the file has been read
+ */
+ public long numReadsSampled() {
+ return numReadsSampled;
+ }
+
+ /**
+ * Returns true if the file is currently being compacted.
+ *
+ * @return true if the file is currently being compacted, false otherwise.
+ */
+ public boolean beingCompacted() {
+ return beingCompacted;
+ }
+
+ /**
+ * Get the number of entries.
+ *
+ * @return the number of entries.
+ */
+ public long numEntries() {
+ return numEntries;
+ }
+
+ /**
+ * Get the number of deletions.
+ *
+ * @return the number of deletions.
+ */
+ public long numDeletions() {
+ return numDeletions;
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/SstFileReader.java b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileReader.java
new file mode 100644
index 000000000..bb1e94ee0
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileReader.java
@@ -0,0 +1,82 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+public class SstFileReader extends RocksObject {
+ static {
+ RocksDB.loadLibrary();
+ }
+
+ public SstFileReader(final Options options) {
+ super(newSstFileReader(options.nativeHandle_));
+ }
+
+ /**
+ * Returns an iterator that will iterate on all keys in the default
+ * column family including both keys in the DB and uncommitted keys in this
+ * transaction.
+ *
+ * Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read
+ * from the DB but will NOT change which keys are read from this transaction
+ * (the keys in this transaction do not yet belong to any snapshot and will be
+ * fetched regardless).
+ *
+ * Caller is responsible for deleting the returned Iterator.
+ *
+ * @param readOptions Read options.
+ *
+ * @return instance of iterator object.
+ */
+ public SstFileReaderIterator newIterator(final ReadOptions readOptions) {
+ assert (isOwningHandle());
+ long iter = newIterator(nativeHandle_, readOptions.nativeHandle_);
+ return new SstFileReaderIterator(this, iter);
+ }
+
+ /**
+ * Prepare SstFileReader to read a file.
+ *
+ * @param filePath the location of file
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void open(final String filePath) throws RocksDBException {
+ open(nativeHandle_, filePath);
+ }
+
+ /**
+ * Verify checksum
+ *
+ * @throws RocksDBException if the checksum is not valid
+ */
+ public void verifyChecksum() throws RocksDBException {
+ verifyChecksum(nativeHandle_);
+ }
+
+ /**
+ * Get the properties of the table.
+ *
+ * @return the properties
+ *
+ * @throws RocksDBException if an error occurs whilst getting the table
+ * properties
+ */
+ public TableProperties getTableProperties() throws RocksDBException {
+ return getTableProperties(nativeHandle_);
+ }
+
+ @Override protected final native void disposeInternal(final long handle);
+ private native long newIterator(final long handle, final long readOptionsHandle);
+
+ private native void open(final long handle, final String filePath)
+ throws RocksDBException;
+
+ private native static long newSstFileReader(final long optionsHandle);
+ private native void verifyChecksum(final long handle) throws RocksDBException;
+ private native TableProperties getTableProperties(final long handle)
+ throws RocksDBException;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/SstFileReaderIterator.java b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileReaderIterator.java
new file mode 100644
index 000000000..8ba39ba03
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileReaderIterator.java
@@ -0,0 +1,120 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.ByteBuffer;
+
+/**
+ * <p>An iterator that yields a sequence of key/value pairs from a source.
+ * Multiple implementations are provided by this library.
+ * In particular, iterators are provided
+ * to access the contents of a Table or a DB.</p>
+ *
+ * <p>Multiple threads can invoke const methods on an RocksIterator without
+ * external synchronization, but if any of the threads may call a
+ * non-const method, all threads accessing the same RocksIterator must use
+ * external synchronization.</p>
+ *
+ * @see RocksObject
+ */
+public class SstFileReaderIterator extends AbstractRocksIterator<SstFileReader> {
+ protected SstFileReaderIterator(SstFileReader reader, long nativeHandle) {
+ super(reader, nativeHandle);
+ }
+
+ /**
+ * <p>Return the key for the current entry. The underlying storage for
+ * the returned slice is valid only until the next modification of
+ * the iterator.</p>
+ *
+ * <p>REQUIRES: {@link #isValid()}</p>
+ *
+ * @return key for the current entry.
+ */
+ public byte[] key() {
+ assert (isOwningHandle());
+ return key0(nativeHandle_);
+ }
+
+ /**
+ * <p>Return the key for the current entry. The underlying storage for
+ * the returned slice is valid only until the next modification of
+ * the iterator.</p>
+ *
+ * <p>REQUIRES: {@link #isValid()}</p>
+ *
+ * @param key the out-value to receive the retrieved key.
+ * It is using position and limit. Limit is set according to key size.
+ * Supports direct buffer only.
+ * @return The size of the actual key. If the return key is greater than the
+ * length of {@code key}, then it indicates that the size of the
+ * input buffer {@code key} is insufficient and partial result will
+ * be returned.
+ */
+ public int key(ByteBuffer key) {
+ assert (isOwningHandle() && key.isDirect());
+ int result = keyDirect0(nativeHandle_, key, key.position(), key.remaining());
+ key.limit(Math.min(key.position() + result, key.limit()));
+ return result;
+ }
+
+ /**
+ * <p>Return the value for the current entry. The underlying storage for
+ * the returned slice is valid only until the next modification of
+ * the iterator.</p>
+ *
+ * <p>REQUIRES: !AtEnd() &amp;&amp; !AtStart()</p>
+ * @return value for the current entry.
+ */
+ public byte[] value() {
+ assert (isOwningHandle());
+ return value0(nativeHandle_);
+ }
+
+ /**
+ * <p>Return the value for the current entry. The underlying storage for
+ * the returned slice is valid only until the next modification of
+ * the iterator.</p>
+ *
+ * <p>REQUIRES: {@link #isValid()}</p>
+ *
+ * @param value the out-value to receive the retrieved value.
+ * It is using position and limit. Limit is set according to value size.
+ * Supports direct buffer only.
+ * @return The size of the actual value. If the return value is greater than the
+ * length of {@code value}, then it indicates that the size of the
+ * input buffer {@code value} is insufficient and partial result will
+ * be returned.
+ */
+ public int value(ByteBuffer value) {
+ assert (isOwningHandle() && value.isDirect());
+ int result = valueDirect0(nativeHandle_, value, value.position(), value.remaining());
+ value.limit(Math.min(value.position() + result, value.limit()));
+ return result;
+ }
+
+ @Override protected final native void disposeInternal(final long handle);
+ @Override final native boolean isValid0(long handle);
+ @Override final native void seekToFirst0(long handle);
+ @Override final native void seekToLast0(long handle);
+ @Override final native void next0(long handle);
+ @Override final native void prev0(long handle);
+ @Override final native void seek0(long handle, byte[] target, int targetLen);
+ @Override final native void seekForPrev0(long handle, byte[] target, int targetLen);
+ @Override final native void status0(long handle) throws RocksDBException;
+
+ private native byte[] key0(long handle);
+ private native byte[] value0(long handle);
+
+ private native int keyDirect0(long handle, ByteBuffer buffer, int bufferOffset, int bufferLen);
+ private native int valueDirect0(long handle, ByteBuffer buffer, int bufferOffset, int bufferLen);
+
+ @Override
+ final native void seekDirect0(long handle, ByteBuffer target, int targetOffset, int targetLen);
+ @Override
+ final native void seekForPrevDirect0(
+ long handle, ByteBuffer target, int targetOffset, int targetLen);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/SstFileWriter.java b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileWriter.java
new file mode 100644
index 000000000..6d9c559bf
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileWriter.java
@@ -0,0 +1,290 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.ByteBuffer;
+
+/**
+ * SstFileWriter is used to create sst files that can be added to the
+ * database later. All keys in files generated by SstFileWriter will have
+ * sequence number = 0.
+ */
+public class SstFileWriter extends RocksObject {
+ static {
+ RocksDB.loadLibrary();
+ }
+
+ /**
+ * SstFileWriter Constructor.
+ *
+ * @param envOptions {@link org.rocksdb.EnvOptions} instance.
+ * @param options {@link org.rocksdb.Options} instance.
+ * @param comparator the comparator to specify the ordering of keys.
+ *
+ * @deprecated Use {@link #SstFileWriter(EnvOptions, Options)}.
+ * Passing an explicit comparator is deprecated in lieu of passing the
+ * comparator as part of options. Use the other constructor instead.
+ */
+ @Deprecated
+ public SstFileWriter(final EnvOptions envOptions, final Options options,
+ final AbstractComparator comparator) {
+ super(newSstFileWriter(
+ envOptions.nativeHandle_, options.nativeHandle_, comparator.nativeHandle_,
+ comparator.getComparatorType().getValue()));
+ }
+
+ /**
+ * SstFileWriter Constructor.
+ *
+ * @param envOptions {@link org.rocksdb.EnvOptions} instance.
+ * @param options {@link org.rocksdb.Options} instance.
+ */
+ public SstFileWriter(final EnvOptions envOptions, final Options options) {
+ super(newSstFileWriter(
+ envOptions.nativeHandle_, options.nativeHandle_));
+ }
+
+ /**
+ * Prepare SstFileWriter to write to a file.
+ *
+ * @param filePath the location of file
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void open(final String filePath) throws RocksDBException {
+ open(nativeHandle_, filePath);
+ }
+
+ /**
+ * Add a Put key with value to currently opened file.
+ *
+ * @param key the specified key to be inserted.
+ * @param value the value associated with the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ *
+ * @deprecated Use {@link #put(Slice, Slice)}
+ */
+ @Deprecated
+ public void add(final Slice key, final Slice value)
+ throws RocksDBException {
+ put(nativeHandle_, key.getNativeHandle(), value.getNativeHandle());
+ }
+
+ /**
+ * Add a Put key with value to currently opened file.
+ *
+ * @param key the specified key to be inserted.
+ * @param value the value associated with the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ *
+ * @deprecated Use {@link #put(DirectSlice, DirectSlice)}
+ */
+ @Deprecated
+ public void add(final DirectSlice key, final DirectSlice value)
+ throws RocksDBException {
+ put(nativeHandle_, key.getNativeHandle(), value.getNativeHandle());
+ }
+
+ /**
+ * Add a Put key with value to currently opened file.
+ *
+ * @param key the specified key to be inserted.
+ * @param value the value associated with the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void put(final Slice key, final Slice value) throws RocksDBException {
+ put(nativeHandle_, key.getNativeHandle(), value.getNativeHandle());
+ }
+
+ /**
+ * Add a Put key with value to currently opened file.
+ *
+ * @param key the specified key to be inserted.
+ * @param value the value associated with the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void put(final DirectSlice key, final DirectSlice value)
+ throws RocksDBException {
+ put(nativeHandle_, key.getNativeHandle(), value.getNativeHandle());
+ }
+
+ /**
+ * Add a Put key with value to currently opened file.
+ *
+ * @param key the specified key to be inserted.
+ * @param value the value associated with the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void put(final ByteBuffer key, final ByteBuffer value) throws RocksDBException {
+ assert key.isDirect() && value.isDirect();
+ putDirect(nativeHandle_, key, key.position(), key.remaining(), value, value.position(),
+ value.remaining());
+ key.position(key.limit());
+ value.position(value.limit());
+ }
+
+ /**
+ * Add a Put key with value to currently opened file.
+ *
+ * @param key the specified key to be inserted.
+ * @param value the value associated with the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void put(final byte[] key, final byte[] value) throws RocksDBException {
+ put(nativeHandle_, key, value);
+ }
+
+ /**
+ * Add a Merge key with value to currently opened file.
+ *
+ * @param key the specified key to be merged.
+ * @param value the value to be merged with the current value for
+ * the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void merge(final Slice key, final Slice value)
+ throws RocksDBException {
+ merge(nativeHandle_, key.getNativeHandle(), value.getNativeHandle());
+ }
+
+ /**
+ * Add a Merge key with value to currently opened file.
+ *
+ * @param key the specified key to be merged.
+ * @param value the value to be merged with the current value for
+ * the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void merge(final byte[] key, final byte[] value)
+ throws RocksDBException {
+ merge(nativeHandle_, key, value);
+ }
+
+ /**
+ * Add a Merge key with value to currently opened file.
+ *
+ * @param key the specified key to be merged.
+ * @param value the value to be merged with the current value for
+ * the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void merge(final DirectSlice key, final DirectSlice value)
+ throws RocksDBException {
+ merge(nativeHandle_, key.getNativeHandle(), value.getNativeHandle());
+ }
+
+ /**
+ * Add a deletion key to currently opened file.
+ *
+ * @param key the specified key to be deleted.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void delete(final Slice key) throws RocksDBException {
+ delete(nativeHandle_, key.getNativeHandle());
+ }
+
+ /**
+ * Add a deletion key to currently opened file.
+ *
+ * @param key the specified key to be deleted.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void delete(final DirectSlice key) throws RocksDBException {
+ delete(nativeHandle_, key.getNativeHandle());
+ }
+
+ /**
+ * Add a deletion key to currently opened file.
+ *
+ * @param key the specified key to be deleted.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void delete(final byte[] key) throws RocksDBException {
+ delete(nativeHandle_, key);
+ }
+
+ /**
+ * Finish the process and close the sst file.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void finish() throws RocksDBException {
+ finish(nativeHandle_);
+ }
+
+ /**
+ * Return the current file size.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public long fileSize() throws RocksDBException {
+ return fileSize(nativeHandle_);
+ }
+
+ private native static long newSstFileWriter(
+ final long envOptionsHandle, final long optionsHandle,
+ final long userComparatorHandle, final byte comparatorType);
+
+ private native static long newSstFileWriter(final long envOptionsHandle,
+ final long optionsHandle);
+
+ private native void open(final long handle, final String filePath)
+ throws RocksDBException;
+
+ private native void put(final long handle, final long keyHandle,
+ final long valueHandle) throws RocksDBException;
+
+ private native void put(final long handle, final byte[] key,
+ final byte[] value) throws RocksDBException;
+
+ private native void putDirect(long handle, ByteBuffer key, int keyOffset, int keyLength,
+ ByteBuffer value, int valueOffset, int valueLength) throws RocksDBException;
+
+ private native long fileSize(long handle) throws RocksDBException;
+
+ private native void merge(final long handle, final long keyHandle,
+ final long valueHandle) throws RocksDBException;
+
+ private native void merge(final long handle, final byte[] key,
+ final byte[] value) throws RocksDBException;
+
+ private native void delete(final long handle, final long keyHandle)
+ throws RocksDBException;
+
+ private native void delete(final long handle, final byte[] key)
+ throws RocksDBException;
+
+ private native void finish(final long handle) throws RocksDBException;
+
+ @Override protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/StateType.java b/src/rocksdb/java/src/main/java/org/rocksdb/StateType.java
new file mode 100644
index 000000000..803456bb2
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/StateType.java
@@ -0,0 +1,53 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * The type used to refer to a thread state.
+ *
+ * A state describes lower-level action of a thread
+ * such as reading / writing a file or waiting for a mutex.
+ */
+public enum StateType {
+ STATE_UNKNOWN((byte)0x0),
+ STATE_MUTEX_WAIT((byte)0x1);
+
+ private final byte value;
+
+ StateType(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the internal representation value.
+ *
+ * @return the internal representation value.
+ */
+ byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get the State type from the internal representation value.
+ *
+ * @param value the internal representation value.
+ *
+ * @return the state type
+ *
+ * @throws IllegalArgumentException if the value does not match
+ * a StateType
+ */
+ static StateType fromValue(final byte value)
+ throws IllegalArgumentException {
+ for (final StateType threadType : StateType.values()) {
+ if (threadType.value == value) {
+ return threadType;
+ }
+ }
+ throw new IllegalArgumentException(
+ "Unknown value for StateType: " + value);
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Statistics.java b/src/rocksdb/java/src/main/java/org/rocksdb/Statistics.java
new file mode 100644
index 000000000..0938a6d58
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/Statistics.java
@@ -0,0 +1,152 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.EnumSet;
+
+/**
+ * Statistics to analyze the performance of a db. Pointer for statistics object
+ * is managed by Options class.
+ */
+public class Statistics extends RocksObject {
+
+ public Statistics() {
+ super(newStatistics());
+ }
+
+ public Statistics(final Statistics otherStatistics) {
+ super(newStatistics(otherStatistics.nativeHandle_));
+ }
+
+ public Statistics(final EnumSet<HistogramType> ignoreHistograms) {
+ super(newStatistics(toArrayValues(ignoreHistograms)));
+ }
+
+ public Statistics(final EnumSet<HistogramType> ignoreHistograms, final Statistics otherStatistics) {
+ super(newStatistics(toArrayValues(ignoreHistograms), otherStatistics.nativeHandle_));
+ }
+
+ /**
+ * Intentionally package-private.
+ *
+ * Used from {@link DBOptions#statistics()}
+ *
+ * @param existingStatisticsHandle The C++ pointer to an existing statistics object
+ */
+ Statistics(final long existingStatisticsHandle) {
+ super(existingStatisticsHandle);
+ }
+
+ private static byte[] toArrayValues(final EnumSet<HistogramType> histogramTypes) {
+ final byte[] values = new byte[histogramTypes.size()];
+ int i = 0;
+ for(final HistogramType histogramType : histogramTypes) {
+ values[i++] = histogramType.getValue();
+ }
+ return values;
+ }
+
+ /**
+ * Gets the current stats level.
+ *
+ * @return The stats level.
+ */
+ public StatsLevel statsLevel() {
+ return StatsLevel.getStatsLevel(statsLevel(nativeHandle_));
+ }
+
+ /**
+ * Sets the stats level.
+ *
+ * @param statsLevel The stats level to set.
+ */
+ public void setStatsLevel(final StatsLevel statsLevel) {
+ setStatsLevel(nativeHandle_, statsLevel.getValue());
+ }
+
+ /**
+ * Get the count for a ticker.
+ *
+ * @param tickerType The ticker to get the count for
+ *
+ * @return The count for the ticker
+ */
+ public long getTickerCount(final TickerType tickerType) {
+ assert(isOwningHandle());
+ return getTickerCount(nativeHandle_, tickerType.getValue());
+ }
+
+ /**
+ * Get the count for a ticker and reset the tickers count.
+ *
+ * @param tickerType The ticker to get the count for
+ *
+ * @return The count for the ticker
+ */
+ public long getAndResetTickerCount(final TickerType tickerType) {
+ assert(isOwningHandle());
+ return getAndResetTickerCount(nativeHandle_, tickerType.getValue());
+ }
+
+ /**
+ * Gets the histogram data for a particular histogram.
+ *
+ * @param histogramType The histogram to retrieve the data for
+ *
+ * @return The histogram data
+ */
+ public HistogramData getHistogramData(final HistogramType histogramType) {
+ assert(isOwningHandle());
+ return getHistogramData(nativeHandle_, histogramType.getValue());
+ }
+
+ /**
+ * Gets a string representation of a particular histogram.
+ *
+ * @param histogramType The histogram to retrieve the data for
+ *
+ * @return A string representation of the histogram data
+ */
+ public String getHistogramString(final HistogramType histogramType) {
+ assert(isOwningHandle());
+ return getHistogramString(nativeHandle_, histogramType.getValue());
+ }
+
+ /**
+ * Resets all ticker and histogram stats.
+ *
+ * @throws RocksDBException if an error occurs when resetting the statistics.
+ */
+ public void reset() throws RocksDBException {
+ assert(isOwningHandle());
+ reset(nativeHandle_);
+ }
+
+ /**
+ * String representation of the statistic object.
+ */
+ @Override
+ public String toString() {
+ assert(isOwningHandle());
+ return toString(nativeHandle_);
+ }
+
+ private native static long newStatistics();
+ private native static long newStatistics(final long otherStatisticsHandle);
+ private native static long newStatistics(final byte[] ignoreHistograms);
+ private native static long newStatistics(final byte[] ignoreHistograms, final long otherStatisticsHandle);
+
+ @Override protected final native void disposeInternal(final long handle);
+
+ private native byte statsLevel(final long handle);
+ private native void setStatsLevel(final long handle, final byte statsLevel);
+ private native long getTickerCount(final long handle, final byte tickerType);
+ private native long getAndResetTickerCount(final long handle, final byte tickerType);
+ private native HistogramData getHistogramData(final long handle, final byte histogramType);
+ private native String getHistogramString(final long handle, final byte histogramType);
+ private native void reset(final long nativeHandle) throws RocksDBException;
+ private native String toString(final long nativeHandle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollector.java b/src/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollector.java
new file mode 100644
index 000000000..fb3f57150
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollector.java
@@ -0,0 +1,111 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.List;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * <p>Helper class to collect DB statistics periodically at a period specified in
+ * constructor. Callback function (provided in constructor) is called with
+ * every statistics collection.</p>
+ *
+ * <p>Caller should call start() to start statistics collection. Shutdown() should
+ * be called to stop stats collection and should be called before statistics (
+ * provided in constructor) reference has been disposed.</p>
+ */
+public class StatisticsCollector {
+ private final List<StatsCollectorInput> _statsCollectorInputList;
+ private final ExecutorService _executorService;
+ private final int _statsCollectionInterval;
+ private volatile boolean _isRunning = true;
+
+ /**
+ * Constructor for statistics collector.
+ *
+ * @param statsCollectorInputList List of statistics collector input.
+ * @param statsCollectionIntervalInMilliSeconds Statistics collection time
+ * period (specified in milliseconds).
+ */
+ public StatisticsCollector(
+ final List<StatsCollectorInput> statsCollectorInputList,
+ final int statsCollectionIntervalInMilliSeconds) {
+ _statsCollectorInputList = statsCollectorInputList;
+ _statsCollectionInterval = statsCollectionIntervalInMilliSeconds;
+
+ _executorService = Executors.newSingleThreadExecutor();
+ }
+
+ public void start() {
+ _executorService.submit(collectStatistics());
+ }
+
+ /**
+ * Shuts down statistics collector.
+ *
+ * @param shutdownTimeout Time in milli-seconds to wait for shutdown before
+ * killing the collection process.
+ * @throws java.lang.InterruptedException thrown if Threads are interrupted.
+ */
+ public void shutDown(final int shutdownTimeout) throws InterruptedException {
+ _isRunning = false;
+
+ _executorService.shutdownNow();
+ // Wait for collectStatistics runnable to finish so that disposal of
+ // statistics does not cause any exceptions to be thrown.
+ _executorService.awaitTermination(shutdownTimeout, TimeUnit.MILLISECONDS);
+ }
+
+ private Runnable collectStatistics() {
+ return new Runnable() {
+
+ @Override
+ public void run() {
+ while (_isRunning) {
+ try {
+ if(Thread.currentThread().isInterrupted()) {
+ break;
+ }
+ for(final StatsCollectorInput statsCollectorInput :
+ _statsCollectorInputList) {
+ Statistics statistics = statsCollectorInput.getStatistics();
+ StatisticsCollectorCallback statsCallback =
+ statsCollectorInput.getCallback();
+
+ // Collect ticker data
+ for(final TickerType ticker : TickerType.values()) {
+ if(ticker != TickerType.TICKER_ENUM_MAX) {
+ final long tickerValue = statistics.getTickerCount(ticker);
+ statsCallback.tickerCallback(ticker, tickerValue);
+ }
+ }
+
+ // Collect histogram data
+ for(final HistogramType histogramType : HistogramType.values()) {
+ if(histogramType != HistogramType.HISTOGRAM_ENUM_MAX) {
+ final HistogramData histogramData =
+ statistics.getHistogramData(histogramType);
+ statsCallback.histogramCallback(histogramType, histogramData);
+ }
+ }
+ }
+
+ Thread.sleep(_statsCollectionInterval);
+ }
+ catch (final InterruptedException e) {
+ Thread.currentThread().interrupt();
+ break;
+ }
+ catch (final Exception e) {
+ throw new RuntimeException("Error while calculating statistics", e);
+ }
+ }
+ }
+ };
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java b/src/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java
new file mode 100644
index 000000000..f3785b15f
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java
@@ -0,0 +1,32 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Callback interface provided to StatisticsCollector.
+ *
+ * Thread safety:
+ * StatisticsCollector doesn't make any guarantees about thread safety.
+ * If the same reference of StatisticsCollectorCallback is passed to multiple
+ * StatisticsCollector references, then its the responsibility of the
+ * user to make StatisticsCollectorCallback's implementation thread-safe.
+ *
+ */
+public interface StatisticsCollectorCallback {
+ /**
+ * Callback function to get ticker values.
+ * @param tickerType Ticker type.
+ * @param tickerCount Value of ticker type.
+ */
+ void tickerCallback(TickerType tickerType, long tickerCount);
+
+ /**
+ * Callback function to get histogram values.
+ * @param histType Histogram type.
+ * @param histData Histogram data.
+ */
+ void histogramCallback(HistogramType histType, HistogramData histData);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/StatsCollectorInput.java b/src/rocksdb/java/src/main/java/org/rocksdb/StatsCollectorInput.java
new file mode 100644
index 000000000..5bf43ade5
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/StatsCollectorInput.java
@@ -0,0 +1,35 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Contains all information necessary to collect statistics from one instance
+ * of DB statistics.
+ */
+public class StatsCollectorInput {
+ private final Statistics _statistics;
+ private final StatisticsCollectorCallback _statsCallback;
+
+ /**
+ * Constructor for StatsCollectorInput.
+ *
+ * @param statistics Reference of DB statistics.
+ * @param statsCallback Reference of statistics callback interface.
+ */
+ public StatsCollectorInput(final Statistics statistics,
+ final StatisticsCollectorCallback statsCallback) {
+ _statistics = statistics;
+ _statsCallback = statsCallback;
+ }
+
+ public Statistics getStatistics() {
+ return _statistics;
+ }
+
+ public StatisticsCollectorCallback getCallback() {
+ return _statsCallback;
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/StatsLevel.java b/src/rocksdb/java/src/main/java/org/rocksdb/StatsLevel.java
new file mode 100644
index 000000000..58504b84a
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/StatsLevel.java
@@ -0,0 +1,65 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * The level of Statistics to report.
+ */
+public enum StatsLevel {
+ /**
+ * Collect all stats except time inside mutex lock AND time spent on
+ * compression.
+ */
+ EXCEPT_DETAILED_TIMERS((byte) 0x0),
+
+ /**
+ * Collect all stats except the counters requiring to get time inside the
+ * mutex lock.
+ */
+ EXCEPT_TIME_FOR_MUTEX((byte) 0x1),
+
+ /**
+ * Collect all stats, including measuring duration of mutex operations.
+ *
+ * If getting time is expensive on the platform to run, it can
+ * reduce scalability to more threads, especially for writes.
+ */
+ ALL((byte) 0x2);
+
+ private final byte value;
+
+ StatsLevel(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * <p>Returns the byte value of the enumerations value.</p>
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get StatsLevel by byte value.
+ *
+ * @param value byte representation of StatsLevel.
+ *
+ * @return {@link org.rocksdb.StatsLevel} instance.
+ * @throws java.lang.IllegalArgumentException if an invalid
+ * value is provided.
+ */
+ public static StatsLevel getStatsLevel(final byte value) {
+ for (final StatsLevel statsLevel : StatsLevel.values()) {
+ if (statsLevel.getValue() == value){
+ return statsLevel;
+ }
+ }
+ throw new IllegalArgumentException(
+ "Illegal value provided for StatsLevel.");
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Status.java b/src/rocksdb/java/src/main/java/org/rocksdb/Status.java
new file mode 100644
index 000000000..e633940c2
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/Status.java
@@ -0,0 +1,138 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Represents the status returned by a function call in RocksDB.
+ *
+ * Currently only used with {@link RocksDBException} when the
+ * status is not {@link Code#Ok}
+ */
+public class Status {
+ private final Code code;
+ /* @Nullable */ private final SubCode subCode;
+ /* @Nullable */ private final String state;
+
+ public Status(final Code code, final SubCode subCode, final String state) {
+ this.code = code;
+ this.subCode = subCode;
+ this.state = state;
+ }
+
+ /**
+ * Intentionally private as this will be called from JNI
+ */
+ private Status(final byte code, final byte subCode, final String state) {
+ this.code = Code.getCode(code);
+ this.subCode = SubCode.getSubCode(subCode);
+ this.state = state;
+ }
+
+ public Code getCode() {
+ return code;
+ }
+
+ public SubCode getSubCode() {
+ return subCode;
+ }
+
+ public String getState() {
+ return state;
+ }
+
+ public String getCodeString() {
+ final StringBuilder builder = new StringBuilder()
+ .append(code.name());
+ if(subCode != null && subCode != SubCode.None) {
+ builder.append("(")
+ .append(subCode.name())
+ .append(")");
+ }
+ return builder.toString();
+ }
+
+ // should stay in sync with /include/rocksdb/status.h:Code and /java/rocksjni/portal.h:toJavaStatusCode
+ public enum Code {
+ Ok( (byte)0x0),
+ NotFound( (byte)0x1),
+ Corruption( (byte)0x2),
+ NotSupported( (byte)0x3),
+ InvalidArgument( (byte)0x4),
+ IOError( (byte)0x5),
+ MergeInProgress( (byte)0x6),
+ Incomplete( (byte)0x7),
+ ShutdownInProgress( (byte)0x8),
+ TimedOut( (byte)0x9),
+ Aborted( (byte)0xA),
+ Busy( (byte)0xB),
+ Expired( (byte)0xC),
+ TryAgain( (byte)0xD),
+ Undefined( (byte)0x7F);
+
+ private final byte value;
+
+ Code(final byte value) {
+ this.value = value;
+ }
+
+ public static Code getCode(final byte value) {
+ for (final Code code : Code.values()) {
+ if (code.value == value){
+ return code;
+ }
+ }
+ throw new IllegalArgumentException(
+ "Illegal value provided for Code (" + value + ").");
+ }
+
+ /**
+ * Returns the byte value of the enumerations value.
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value;
+ }
+ }
+
+ // should stay in sync with /include/rocksdb/status.h:SubCode and /java/rocksjni/portal.h:toJavaStatusSubCode
+ public enum SubCode {
+ None( (byte)0x0),
+ MutexTimeout( (byte)0x1),
+ LockTimeout( (byte)0x2),
+ LockLimit( (byte)0x3),
+ NoSpace( (byte)0x4),
+ Deadlock( (byte)0x5),
+ StaleFile( (byte)0x6),
+ MemoryLimit( (byte)0x7),
+ Undefined( (byte)0x7F);
+
+ private final byte value;
+
+ SubCode(final byte value) {
+ this.value = value;
+ }
+
+ public static SubCode getSubCode(final byte value) {
+ for (final SubCode subCode : SubCode.values()) {
+ if (subCode.value == value){
+ return subCode;
+ }
+ }
+ throw new IllegalArgumentException(
+ "Illegal value provided for SubCode (" + value + ").");
+ }
+
+ /**
+ * Returns the byte value of the enumerations value.
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value;
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/StringAppendOperator.java b/src/rocksdb/java/src/main/java/org/rocksdb/StringAppendOperator.java
new file mode 100644
index 000000000..ae525d4dc
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/StringAppendOperator.java
@@ -0,0 +1,24 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+// Copyright (c) 2014, Vlad Balan (vlad.gm@gmail.com). All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * StringAppendOperator is a merge operator that concatenates
+ * two strings.
+ */
+public class StringAppendOperator extends MergeOperator {
+ public StringAppendOperator() {
+ this(',');
+ }
+
+ public StringAppendOperator(char delim) {
+ super(newSharedStringAppendOperator(delim));
+ }
+
+ private native static long newSharedStringAppendOperator(final char delim);
+ @Override protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TableFilter.java b/src/rocksdb/java/src/main/java/org/rocksdb/TableFilter.java
new file mode 100644
index 000000000..a39a329fb
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/TableFilter.java
@@ -0,0 +1,21 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+/**
+ * Filter for iterating a table.
+ */
+public interface TableFilter {
+
+ /**
+ * A callback to determine whether relevant keys for this scan exist in a
+ * given table based on the table's properties. The callback is passed the
+ * properties of each table during iteration. If the callback returns false,
+ * the table will not be scanned. This option only affects Iterators and has
+ * no impact on point lookups.
+ *
+ * @param tableProperties the table properties.
+ *
+ * @return true if the table should be scanned, false otherwise.
+ */
+ boolean filter(final TableProperties tableProperties);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TableFormatConfig.java b/src/rocksdb/java/src/main/java/org/rocksdb/TableFormatConfig.java
new file mode 100644
index 000000000..dbe524c42
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/TableFormatConfig.java
@@ -0,0 +1,22 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+/**
+ * TableFormatConfig is used to config the internal Table format of a RocksDB.
+ * To make a RocksDB to use a specific Table format, its associated
+ * TableFormatConfig should be properly set and passed into Options via
+ * Options.setTableFormatConfig() and open the db using that Options.
+ */
+public abstract class TableFormatConfig {
+ /**
+ * <p>This function should only be called by Options.setTableFormatConfig(),
+ * which will create a c++ shared-pointer to the c++ TableFactory
+ * that associated with the Java TableFormatConfig.</p>
+ *
+ * @return native handle address to native table instance.
+ */
+ abstract protected long newTableFactoryHandle();
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TableProperties.java b/src/rocksdb/java/src/main/java/org/rocksdb/TableProperties.java
new file mode 100644
index 000000000..8c0b7e370
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/TableProperties.java
@@ -0,0 +1,366 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+import java.util.Map;
+
+/**
+ * TableProperties contains read-only properties of its associated
+ * table.
+ */
+public class TableProperties {
+ private final long dataSize;
+ private final long indexSize;
+ private final long indexPartitions;
+ private final long topLevelIndexSize;
+ private final long indexKeyIsUserKey;
+ private final long indexValueIsDeltaEncoded;
+ private final long filterSize;
+ private final long rawKeySize;
+ private final long rawValueSize;
+ private final long numDataBlocks;
+ private final long numEntries;
+ private final long numDeletions;
+ private final long numMergeOperands;
+ private final long numRangeDeletions;
+ private final long formatVersion;
+ private final long fixedKeyLen;
+ private final long columnFamilyId;
+ private final long creationTime;
+ private final long oldestKeyTime;
+ private final byte[] columnFamilyName;
+ private final String filterPolicyName;
+ private final String comparatorName;
+ private final String mergeOperatorName;
+ private final String prefixExtractorName;
+ private final String propertyCollectorsNames;
+ private final String compressionName;
+ private final Map<String, String> userCollectedProperties;
+ private final Map<String, String> readableProperties;
+ private final Map<String, Long> propertiesOffsets;
+
+ /**
+ * Access is private as this will only be constructed from
+ * C++ via JNI.
+ */
+ private TableProperties(final long dataSize, final long indexSize,
+ final long indexPartitions, final long topLevelIndexSize,
+ final long indexKeyIsUserKey, final long indexValueIsDeltaEncoded,
+ final long filterSize, final long rawKeySize, final long rawValueSize,
+ final long numDataBlocks, final long numEntries, final long numDeletions,
+ final long numMergeOperands, final long numRangeDeletions,
+ final long formatVersion, final long fixedKeyLen,
+ final long columnFamilyId, final long creationTime,
+ final long oldestKeyTime, final byte[] columnFamilyName,
+ final String filterPolicyName, final String comparatorName,
+ final String mergeOperatorName, final String prefixExtractorName,
+ final String propertyCollectorsNames, final String compressionName,
+ final Map<String, String> userCollectedProperties,
+ final Map<String, String> readableProperties,
+ final Map<String, Long> propertiesOffsets) {
+ this.dataSize = dataSize;
+ this.indexSize = indexSize;
+ this.indexPartitions = indexPartitions;
+ this.topLevelIndexSize = topLevelIndexSize;
+ this.indexKeyIsUserKey = indexKeyIsUserKey;
+ this.indexValueIsDeltaEncoded = indexValueIsDeltaEncoded;
+ this.filterSize = filterSize;
+ this.rawKeySize = rawKeySize;
+ this.rawValueSize = rawValueSize;
+ this.numDataBlocks = numDataBlocks;
+ this.numEntries = numEntries;
+ this.numDeletions = numDeletions;
+ this.numMergeOperands = numMergeOperands;
+ this.numRangeDeletions = numRangeDeletions;
+ this.formatVersion = formatVersion;
+ this.fixedKeyLen = fixedKeyLen;
+ this.columnFamilyId = columnFamilyId;
+ this.creationTime = creationTime;
+ this.oldestKeyTime = oldestKeyTime;
+ this.columnFamilyName = columnFamilyName;
+ this.filterPolicyName = filterPolicyName;
+ this.comparatorName = comparatorName;
+ this.mergeOperatorName = mergeOperatorName;
+ this.prefixExtractorName = prefixExtractorName;
+ this.propertyCollectorsNames = propertyCollectorsNames;
+ this.compressionName = compressionName;
+ this.userCollectedProperties = userCollectedProperties;
+ this.readableProperties = readableProperties;
+ this.propertiesOffsets = propertiesOffsets;
+ }
+
+ /**
+ * Get the total size of all data blocks.
+ *
+ * @return the total size of all data blocks.
+ */
+ public long getDataSize() {
+ return dataSize;
+ }
+
+ /**
+ * Get the size of index block.
+ *
+ * @return the size of index block.
+ */
+ public long getIndexSize() {
+ return indexSize;
+ }
+
+ /**
+ * Get the total number of index partitions
+ * if {@link IndexType#kTwoLevelIndexSearch} is used.
+ *
+ * @return the total number of index partitions.
+ */
+ public long getIndexPartitions() {
+ return indexPartitions;
+ }
+
+ /**
+ * Size of the top-level index
+ * if {@link IndexType#kTwoLevelIndexSearch} is used.
+ *
+ * @return the size of the top-level index.
+ */
+ public long getTopLevelIndexSize() {
+ return topLevelIndexSize;
+ }
+
+ /**
+ * Whether the index key is user key.
+ * Otherwise it includes 8 byte of sequence
+ * number added by internal key format.
+ *
+ * @return the index key
+ */
+ public long getIndexKeyIsUserKey() {
+ return indexKeyIsUserKey;
+ }
+
+ /**
+ * Whether delta encoding is used to encode the index values.
+ *
+ * @return whether delta encoding is used to encode the index values.
+ */
+ public long getIndexValueIsDeltaEncoded() {
+ return indexValueIsDeltaEncoded;
+ }
+
+ /**
+ * Get the size of filter block.
+ *
+ * @return the size of filter block.
+ */
+ public long getFilterSize() {
+ return filterSize;
+ }
+
+ /**
+ * Get the total raw key size.
+ *
+ * @return the total raw key size.
+ */
+ public long getRawKeySize() {
+ return rawKeySize;
+ }
+
+ /**
+ * Get the total raw value size.
+ *
+ * @return the total raw value size.
+ */
+ public long getRawValueSize() {
+ return rawValueSize;
+ }
+
+ /**
+ * Get the number of blocks in this table.
+ *
+ * @return the number of blocks in this table.
+ */
+ public long getNumDataBlocks() {
+ return numDataBlocks;
+ }
+
+ /**
+ * Get the number of entries in this table.
+ *
+ * @return the number of entries in this table.
+ */
+ public long getNumEntries() {
+ return numEntries;
+ }
+
+ /**
+ * Get the number of deletions in the table.
+ *
+ * @return the number of deletions in the table.
+ */
+ public long getNumDeletions() {
+ return numDeletions;
+ }
+
+ /**
+ * Get the number of merge operands in the table.
+ *
+ * @return the number of merge operands in the table.
+ */
+ public long getNumMergeOperands() {
+ return numMergeOperands;
+ }
+
+ /**
+ * Get the number of range deletions in this table.
+ *
+ * @return the number of range deletions in this table.
+ */
+ public long getNumRangeDeletions() {
+ return numRangeDeletions;
+ }
+
+ /**
+ * Get the format version, reserved for backward compatibility.
+ *
+ * @return the format version.
+ */
+ public long getFormatVersion() {
+ return formatVersion;
+ }
+
+ /**
+ * Get the length of the keys.
+ *
+ * @return 0 when the key is variable length, otherwise number of
+ * bytes for each key.
+ */
+ public long getFixedKeyLen() {
+ return fixedKeyLen;
+ }
+
+ /**
+ * Get the ID of column family for this SST file,
+ * corresponding to the column family identified by
+ * {@link #getColumnFamilyName()}.
+ *
+ * @return the id of the column family.
+ */
+ public long getColumnFamilyId() {
+ return columnFamilyId;
+ }
+
+ /**
+ * The time when the SST file was created.
+ * Since SST files are immutable, this is equivalent
+ * to last modified time.
+ *
+ * @return the created time.
+ */
+ public long getCreationTime() {
+ return creationTime;
+ }
+
+ /**
+ * Get the timestamp of the earliest key.
+ *
+ * @return 0 means unknown, otherwise the timestamp.
+ */
+ public long getOldestKeyTime() {
+ return oldestKeyTime;
+ }
+
+ /**
+ * Get the name of the column family with which this
+ * SST file is associated.
+ *
+ * @return the name of the column family, or null if the
+ * column family is unknown.
+ */
+ /*@Nullable*/ public byte[] getColumnFamilyName() {
+ return columnFamilyName;
+ }
+
+ /**
+ * Get the name of the filter policy used in this table.
+ *
+ * @return the name of the filter policy, or null if
+ * no filter policy is used.
+ */
+ /*@Nullable*/ public String getFilterPolicyName() {
+ return filterPolicyName;
+ }
+
+ /**
+ * Get the name of the comparator used in this table.
+ *
+ * @return the name of the comparator.
+ */
+ public String getComparatorName() {
+ return comparatorName;
+ }
+
+ /**
+ * Get the name of the merge operator used in this table.
+ *
+ * @return the name of the merge operator, or null if no merge operator
+ * is used.
+ */
+ /*@Nullable*/ public String getMergeOperatorName() {
+ return mergeOperatorName;
+ }
+
+ /**
+ * Get the name of the prefix extractor used in this table.
+ *
+ * @return the name of the prefix extractor, or null if no prefix
+ * extractor is used.
+ */
+ /*@Nullable*/ public String getPrefixExtractorName() {
+ return prefixExtractorName;
+ }
+
+ /**
+ * Get the names of the property collectors factories used in this table.
+ *
+ * @return the names of the property collector factories separated
+ * by commas, e.g. {collector_name[1]},{collector_name[2]},...
+ */
+ public String getPropertyCollectorsNames() {
+ return propertyCollectorsNames;
+ }
+
+ /**
+ * Get the name of the compression algorithm used to compress the SST files.
+ *
+ * @return the name of the compression algorithm.
+ */
+ public String getCompressionName() {
+ return compressionName;
+ }
+
+ /**
+ * Get the user collected properties.
+ *
+ * @return the user collected properties.
+ */
+ public Map<String, String> getUserCollectedProperties() {
+ return userCollectedProperties;
+ }
+
+ /**
+ * Get the readable properties.
+ *
+ * @return the readable properties.
+ */
+ public Map<String, String> getReadableProperties() {
+ return readableProperties;
+ }
+
+ /**
+ * The offset of the value of each property in the file.
+ *
+ * @return the offset of each property.
+ */
+ public Map<String, Long> getPropertiesOffsets() {
+ return propertiesOffsets;
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ThreadStatus.java b/src/rocksdb/java/src/main/java/org/rocksdb/ThreadStatus.java
new file mode 100644
index 000000000..062df5889
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/ThreadStatus.java
@@ -0,0 +1,224 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.Map;
+
+public class ThreadStatus {
+ private final long threadId;
+ private final ThreadType threadType;
+ private final String dbName;
+ private final String cfName;
+ private final OperationType operationType;
+ private final long operationElapsedTime; // microseconds
+ private final OperationStage operationStage;
+ private final long operationProperties[];
+ private final StateType stateType;
+
+ /**
+ * Invoked from C++ via JNI
+ */
+ private ThreadStatus(final long threadId,
+ final byte threadTypeValue,
+ final String dbName,
+ final String cfName,
+ final byte operationTypeValue,
+ final long operationElapsedTime,
+ final byte operationStageValue,
+ final long[] operationProperties,
+ final byte stateTypeValue) {
+ this.threadId = threadId;
+ this.threadType = ThreadType.fromValue(threadTypeValue);
+ this.dbName = dbName;
+ this.cfName = cfName;
+ this.operationType = OperationType.fromValue(operationTypeValue);
+ this.operationElapsedTime = operationElapsedTime;
+ this.operationStage = OperationStage.fromValue(operationStageValue);
+ this.operationProperties = operationProperties;
+ this.stateType = StateType.fromValue(stateTypeValue);
+ }
+
+ /**
+ * Get the unique ID of the thread.
+ *
+ * @return the thread id
+ */
+ public long getThreadId() {
+ return threadId;
+ }
+
+ /**
+ * Get the type of the thread.
+ *
+ * @return the type of the thread.
+ */
+ public ThreadType getThreadType() {
+ return threadType;
+ }
+
+ /**
+ * The name of the DB instance that the thread is currently
+ * involved with.
+ *
+ * @return the name of the db, or null if the thread is not involved
+ * in any DB operation.
+ */
+ /* @Nullable */ public String getDbName() {
+ return dbName;
+ }
+
+ /**
+ * The name of the Column Family that the thread is currently
+ * involved with.
+ *
+ * @return the name of the db, or null if the thread is not involved
+ * in any column Family operation.
+ */
+ /* @Nullable */ public String getCfName() {
+ return cfName;
+ }
+
+ /**
+ * Get the operation (high-level action) that the current thread is involved
+ * with.
+ *
+ * @return the operation
+ */
+ public OperationType getOperationType() {
+ return operationType;
+ }
+
+ /**
+ * Get the elapsed time of the current thread operation in microseconds.
+ *
+ * @return the elapsed time
+ */
+ public long getOperationElapsedTime() {
+ return operationElapsedTime;
+ }
+
+ /**
+ * Get the current stage where the thread is involved in the current
+ * operation.
+ *
+ * @return the current stage of the current operation
+ */
+ public OperationStage getOperationStage() {
+ return operationStage;
+ }
+
+ /**
+ * Get the list of properties that describe some details about the current
+ * operation.
+ *
+ * Each field in might have different meanings for different operations.
+ *
+ * @return the properties
+ */
+ public long[] getOperationProperties() {
+ return operationProperties;
+ }
+
+ /**
+ * Get the state (lower-level action) that the current thread is involved
+ * with.
+ *
+ * @return the state
+ */
+ public StateType getStateType() {
+ return stateType;
+ }
+
+ /**
+ * Get the name of the thread type.
+ *
+ * @param threadType the thread type
+ *
+ * @return the name of the thread type.
+ */
+ public static String getThreadTypeName(final ThreadType threadType) {
+ return getThreadTypeName(threadType.getValue());
+ }
+
+ /**
+ * Get the name of an operation given its type.
+ *
+ * @param operationType the type of operation.
+ *
+ * @return the name of the operation.
+ */
+ public static String getOperationName(final OperationType operationType) {
+ return getOperationName(operationType.getValue());
+ }
+
+ public static String microsToString(final long operationElapsedTime) {
+ return microsToStringNative(operationElapsedTime);
+ }
+
+ /**
+ * Obtain a human-readable string describing the specified operation stage.
+ *
+ * @param operationStage the stage of the operation.
+ *
+ * @return the description of the operation stage.
+ */
+ public static String getOperationStageName(
+ final OperationStage operationStage) {
+ return getOperationStageName(operationStage.getValue());
+ }
+
+ /**
+ * Obtain the name of the "i"th operation property of the
+ * specified operation.
+ *
+ * @param operationType the operation type.
+ * @param i the index of the operation property.
+ *
+ * @return the name of the operation property
+ */
+ public static String getOperationPropertyName(
+ final OperationType operationType, final int i) {
+ return getOperationPropertyName(operationType.getValue(), i);
+ }
+
+ /**
+ * Translate the "i"th property of the specified operation given
+ * a property value.
+ *
+ * @param operationType the operation type.
+ * @param operationProperties the operation properties.
+ *
+ * @return the property values.
+ */
+ public static Map<String, Long> interpretOperationProperties(
+ final OperationType operationType, final long[] operationProperties) {
+ return interpretOperationProperties(operationType.getValue(),
+ operationProperties);
+ }
+
+ /**
+ * Obtain the name of a state given its type.
+ *
+ * @param stateType the state type.
+ *
+ * @return the name of the state.
+ */
+ public static String getStateName(final StateType stateType) {
+ return getStateName(stateType.getValue());
+ }
+
+ private static native String getThreadTypeName(final byte threadTypeValue);
+ private static native String getOperationName(final byte operationTypeValue);
+ private static native String microsToStringNative(
+ final long operationElapsedTime);
+ private static native String getOperationStageName(
+ final byte operationStageTypeValue);
+ private static native String getOperationPropertyName(
+ final byte operationTypeValue, final int i);
+ private static native Map<String, Long>interpretOperationProperties(
+ final byte operationTypeValue, final long[] operationProperties);
+ private static native String getStateName(final byte stateTypeValue);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ThreadType.java b/src/rocksdb/java/src/main/java/org/rocksdb/ThreadType.java
new file mode 100644
index 000000000..cc329f442
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/ThreadType.java
@@ -0,0 +1,65 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * The type of a thread.
+ */
+public enum ThreadType {
+ /**
+ * RocksDB BG thread in high-pri thread pool.
+ */
+ HIGH_PRIORITY((byte)0x0),
+
+ /**
+ * RocksDB BG thread in low-pri thread pool.
+ */
+ LOW_PRIORITY((byte)0x1),
+
+ /**
+ * User thread (Non-RocksDB BG thread).
+ */
+ USER((byte)0x2),
+
+ /**
+ * RocksDB BG thread in bottom-pri thread pool
+ */
+ BOTTOM_PRIORITY((byte)0x3);
+
+ private final byte value;
+
+ ThreadType(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the internal representation value.
+ *
+ * @return the internal representation value.
+ */
+ byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get the Thread type from the internal representation value.
+ *
+ * @param value the internal representation value.
+ *
+ * @return the thread type
+ *
+ * @throws IllegalArgumentException if the value does not match a ThreadType
+ */
+ static ThreadType fromValue(final byte value)
+ throws IllegalArgumentException {
+ for (final ThreadType threadType : ThreadType.values()) {
+ if (threadType.value == value) {
+ return threadType;
+ }
+ }
+ throw new IllegalArgumentException("Unknown value for ThreadType: " + value);
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TickerType.java b/src/rocksdb/java/src/main/java/org/rocksdb/TickerType.java
new file mode 100644
index 000000000..678733513
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/TickerType.java
@@ -0,0 +1,760 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * The logical mapping of tickers defined in rocksdb::Tickers.
+ *
+ * Java byte value mappings don't align 1:1 to the c++ values. c++ rocksdb::Tickers enumeration type
+ * is uint32_t and java org.rocksdb.TickerType is byte, this causes mapping issues when
+ * rocksdb::Tickers value is greater then 127 (0x7F) for jbyte jni interface as range greater is not
+ * available. Without breaking interface in minor versions, value mappings for
+ * org.rocksdb.TickerType leverage full byte range [-128 (-0x80), (0x7F)]. Newer tickers added
+ * should descend into negative values until TICKER_ENUM_MAX reaches -128 (-0x80).
+ */
+public enum TickerType {
+
+ /**
+ * total block cache misses
+ *
+ * REQUIRES: BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS +
+ * BLOCK_CACHE_FILTER_MISS +
+ * BLOCK_CACHE_DATA_MISS;
+ */
+ BLOCK_CACHE_MISS((byte) 0x0),
+
+ /**
+ * total block cache hit
+ *
+ * REQUIRES: BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT +
+ * BLOCK_CACHE_FILTER_HIT +
+ * BLOCK_CACHE_DATA_HIT;
+ */
+ BLOCK_CACHE_HIT((byte) 0x1),
+
+ BLOCK_CACHE_ADD((byte) 0x2),
+
+ /**
+ * # of failures when adding blocks to block cache.
+ */
+ BLOCK_CACHE_ADD_FAILURES((byte) 0x3),
+
+ /**
+ * # of times cache miss when accessing index block from block cache.
+ */
+ BLOCK_CACHE_INDEX_MISS((byte) 0x4),
+
+ /**
+ * # of times cache hit when accessing index block from block cache.
+ */
+ BLOCK_CACHE_INDEX_HIT((byte) 0x5),
+
+ /**
+ * # of index blocks added to block cache.
+ */
+ BLOCK_CACHE_INDEX_ADD((byte) 0x6),
+
+ /**
+ * # of bytes of index blocks inserted into cache
+ */
+ BLOCK_CACHE_INDEX_BYTES_INSERT((byte) 0x7),
+
+ /**
+ * # of bytes of index block erased from cache
+ */
+ BLOCK_CACHE_INDEX_BYTES_EVICT((byte) 0x8),
+
+ /**
+ * # of times cache miss when accessing filter block from block cache.
+ */
+ BLOCK_CACHE_FILTER_MISS((byte) 0x9),
+
+ /**
+ * # of times cache hit when accessing filter block from block cache.
+ */
+ BLOCK_CACHE_FILTER_HIT((byte) 0xA),
+
+ /**
+ * # of filter blocks added to block cache.
+ */
+ BLOCK_CACHE_FILTER_ADD((byte) 0xB),
+
+ /**
+ * # of bytes of bloom filter blocks inserted into cache
+ */
+ BLOCK_CACHE_FILTER_BYTES_INSERT((byte) 0xC),
+
+ /**
+ * # of bytes of bloom filter block erased from cache
+ */
+ BLOCK_CACHE_FILTER_BYTES_EVICT((byte) 0xD),
+
+ /**
+ * # of times cache miss when accessing data block from block cache.
+ */
+ BLOCK_CACHE_DATA_MISS((byte) 0xE),
+
+ /**
+ * # of times cache hit when accessing data block from block cache.
+ */
+ BLOCK_CACHE_DATA_HIT((byte) 0xF),
+
+ /**
+ * # of data blocks added to block cache.
+ */
+ BLOCK_CACHE_DATA_ADD((byte) 0x10),
+
+ /**
+ * # of bytes of data blocks inserted into cache
+ */
+ BLOCK_CACHE_DATA_BYTES_INSERT((byte) 0x11),
+
+ /**
+ * # of bytes read from cache.
+ */
+ BLOCK_CACHE_BYTES_READ((byte) 0x12),
+
+ /**
+ * # of bytes written into cache.
+ */
+ BLOCK_CACHE_BYTES_WRITE((byte) 0x13),
+
+ /**
+ * # of times bloom filter has avoided file reads.
+ */
+ BLOOM_FILTER_USEFUL((byte) 0x14),
+
+ /**
+ * # persistent cache hit
+ */
+ PERSISTENT_CACHE_HIT((byte) 0x15),
+
+ /**
+ * # persistent cache miss
+ */
+ PERSISTENT_CACHE_MISS((byte) 0x16),
+
+ /**
+ * # total simulation block cache hits
+ */
+ SIM_BLOCK_CACHE_HIT((byte) 0x17),
+
+ /**
+ * # total simulation block cache misses
+ */
+ SIM_BLOCK_CACHE_MISS((byte) 0x18),
+
+ /**
+ * # of memtable hits.
+ */
+ MEMTABLE_HIT((byte) 0x19),
+
+ /**
+ * # of memtable misses.
+ */
+ MEMTABLE_MISS((byte) 0x1A),
+
+ /**
+ * # of Get() queries served by L0
+ */
+ GET_HIT_L0((byte) 0x1B),
+
+ /**
+ * # of Get() queries served by L1
+ */
+ GET_HIT_L1((byte) 0x1C),
+
+ /**
+ * # of Get() queries served by L2 and up
+ */
+ GET_HIT_L2_AND_UP((byte) 0x1D),
+
+ /**
+ * COMPACTION_KEY_DROP_* count the reasons for key drop during compaction
+ * There are 4 reasons currently.
+ */
+
+ /**
+ * key was written with a newer value.
+ */
+ COMPACTION_KEY_DROP_NEWER_ENTRY((byte) 0x1E),
+
+ /**
+ * Also includes keys dropped for range del.
+ * The key is obsolete.
+ */
+ COMPACTION_KEY_DROP_OBSOLETE((byte) 0x1F),
+
+ /**
+ * key was covered by a range tombstone.
+ */
+ COMPACTION_KEY_DROP_RANGE_DEL((byte) 0x20),
+
+ /**
+ * User compaction function has dropped the key.
+ */
+ COMPACTION_KEY_DROP_USER((byte) 0x21),
+
+ /**
+ * all keys in range were deleted.
+ */
+ COMPACTION_RANGE_DEL_DROP_OBSOLETE((byte) 0x22),
+
+ /**
+ * Number of keys written to the database via the Put and Write call's.
+ */
+ NUMBER_KEYS_WRITTEN((byte) 0x23),
+
+ /**
+ * Number of Keys read.
+ */
+ NUMBER_KEYS_READ((byte) 0x24),
+
+ /**
+ * Number keys updated, if inplace update is enabled
+ */
+ NUMBER_KEYS_UPDATED((byte) 0x25),
+
+ /**
+ * The number of uncompressed bytes issued by DB::Put(), DB::Delete(),\
+ * DB::Merge(), and DB::Write().
+ */
+ BYTES_WRITTEN((byte) 0x26),
+
+ /**
+ * The number of uncompressed bytes read from DB::Get(). It could be
+ * either from memtables, cache, or table files.
+ *
+ * For the number of logical bytes read from DB::MultiGet(),
+ * please use {@link #NUMBER_MULTIGET_BYTES_READ}.
+ */
+ BYTES_READ((byte) 0x27),
+
+ /**
+ * The number of calls to seek.
+ */
+ NUMBER_DB_SEEK((byte) 0x28),
+
+ /**
+ * The number of calls to next.
+ */
+ NUMBER_DB_NEXT((byte) 0x29),
+
+ /**
+ * The number of calls to prev.
+ */
+ NUMBER_DB_PREV((byte) 0x2A),
+
+ /**
+ * The number of calls to seek that returned data.
+ */
+ NUMBER_DB_SEEK_FOUND((byte) 0x2B),
+
+ /**
+ * The number of calls to next that returned data.
+ */
+ NUMBER_DB_NEXT_FOUND((byte) 0x2C),
+
+ /**
+ * The number of calls to prev that returned data.
+ */
+ NUMBER_DB_PREV_FOUND((byte) 0x2D),
+
+ /**
+ * The number of uncompressed bytes read from an iterator.
+ * Includes size of key and value.
+ */
+ ITER_BYTES_READ((byte) 0x2E),
+
+ NO_FILE_CLOSES((byte) 0x2F),
+
+ NO_FILE_OPENS((byte) 0x30),
+
+ NO_FILE_ERRORS((byte) 0x31),
+
+ /**
+ * Time system had to wait to do LO-L1 compactions.
+ *
+ * @deprecated
+ */
+ @Deprecated
+ STALL_L0_SLOWDOWN_MICROS((byte) 0x32),
+
+ /**
+ * Time system had to wait to move memtable to L1.
+ *
+ * @deprecated
+ */
+ @Deprecated
+ STALL_MEMTABLE_COMPACTION_MICROS((byte) 0x33),
+
+ /**
+ * write throttle because of too many files in L0.
+ *
+ * @deprecated
+ */
+ @Deprecated
+ STALL_L0_NUM_FILES_MICROS((byte) 0x34),
+
+ /**
+ * Writer has to wait for compaction or flush to finish.
+ */
+ STALL_MICROS((byte) 0x35),
+
+ /**
+ * The wait time for db mutex.
+ *
+ * Disabled by default. To enable it set stats level to {@link StatsLevel#ALL}
+ */
+ DB_MUTEX_WAIT_MICROS((byte) 0x36),
+
+ RATE_LIMIT_DELAY_MILLIS((byte) 0x37),
+
+ /**
+ * Number of iterators created.
+ *
+ */
+ NO_ITERATORS((byte) 0x38),
+
+ /**
+ * Number of MultiGet calls.
+ */
+ NUMBER_MULTIGET_CALLS((byte) 0x39),
+
+ /**
+ * Number of MultiGet keys read.
+ */
+ NUMBER_MULTIGET_KEYS_READ((byte) 0x3A),
+
+ /**
+ * Number of MultiGet bytes read.
+ */
+ NUMBER_MULTIGET_BYTES_READ((byte) 0x3B),
+
+ /**
+ * Number of deletes records that were not required to be
+ * written to storage because key does not exist.
+ */
+ NUMBER_FILTERED_DELETES((byte) 0x3C),
+ NUMBER_MERGE_FAILURES((byte) 0x3D),
+
+ /**
+ * Number of times bloom was checked before creating iterator on a
+ * file, and the number of times the check was useful in avoiding
+ * iterator creation (and thus likely IOPs).
+ */
+ BLOOM_FILTER_PREFIX_CHECKED((byte) 0x3E),
+ BLOOM_FILTER_PREFIX_USEFUL((byte) 0x3F),
+
+ /**
+ * Number of times we had to reseek inside an iteration to skip
+ * over large number of keys with same userkey.
+ */
+ NUMBER_OF_RESEEKS_IN_ITERATION((byte) 0x40),
+
+ /**
+ * Record the number of calls to {@link RocksDB#getUpdatesSince(long)}. Useful to keep track of
+ * transaction log iterator refreshes.
+ */
+ GET_UPDATES_SINCE_CALLS((byte) 0x41),
+
+ /**
+ * Miss in the compressed block cache.
+ */
+ BLOCK_CACHE_COMPRESSED_MISS((byte) 0x42),
+
+ /**
+ * Hit in the compressed block cache.
+ */
+ BLOCK_CACHE_COMPRESSED_HIT((byte) 0x43),
+
+ /**
+ * Number of blocks added to compressed block cache.
+ */
+ BLOCK_CACHE_COMPRESSED_ADD((byte) 0x44),
+
+ /**
+ * Number of failures when adding blocks to compressed block cache.
+ */
+ BLOCK_CACHE_COMPRESSED_ADD_FAILURES((byte) 0x45),
+
+ /**
+ * Number of times WAL sync is done.
+ */
+ WAL_FILE_SYNCED((byte) 0x46),
+
+ /**
+ * Number of bytes written to WAL.
+ */
+ WAL_FILE_BYTES((byte) 0x47),
+
+ /**
+ * Writes can be processed by requesting thread or by the thread at the
+ * head of the writers queue.
+ */
+ WRITE_DONE_BY_SELF((byte) 0x48),
+
+ /**
+ * Equivalent to writes done for others.
+ */
+ WRITE_DONE_BY_OTHER((byte) 0x49),
+
+ /**
+ * Number of writes ending up with timed-out.
+ */
+ WRITE_TIMEDOUT((byte) 0x4A),
+
+ /**
+ * Number of Write calls that request WAL.
+ */
+ WRITE_WITH_WAL((byte) 0x4B),
+
+ /**
+ * Bytes read during compaction.
+ */
+ COMPACT_READ_BYTES((byte) 0x4C),
+
+ /**
+ * Bytes written during compaction.
+ */
+ COMPACT_WRITE_BYTES((byte) 0x4D),
+
+ /**
+ * Bytes written during flush.
+ */
+ FLUSH_WRITE_BYTES((byte) 0x4E),
+
+ /**
+ * Number of table's properties loaded directly from file, without creating
+ * table reader object.
+ */
+ NUMBER_DIRECT_LOAD_TABLE_PROPERTIES((byte) 0x4F),
+ NUMBER_SUPERVERSION_ACQUIRES((byte) 0x50),
+ NUMBER_SUPERVERSION_RELEASES((byte) 0x51),
+ NUMBER_SUPERVERSION_CLEANUPS((byte) 0x52),
+
+ /**
+ * # of compressions/decompressions executed
+ */
+ NUMBER_BLOCK_COMPRESSED((byte) 0x53),
+ NUMBER_BLOCK_DECOMPRESSED((byte) 0x54),
+
+ NUMBER_BLOCK_NOT_COMPRESSED((byte) 0x55),
+ MERGE_OPERATION_TOTAL_TIME((byte) 0x56),
+ FILTER_OPERATION_TOTAL_TIME((byte) 0x57),
+
+ /**
+ * Row cache.
+ */
+ ROW_CACHE_HIT((byte) 0x58),
+ ROW_CACHE_MISS((byte) 0x59),
+
+ /**
+ * Read amplification statistics.
+ *
+ * Read amplification can be calculated using this formula
+ * (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES)
+ *
+ * REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled
+ */
+
+ /**
+ * Estimate of total bytes actually used.
+ */
+ READ_AMP_ESTIMATE_USEFUL_BYTES((byte) 0x5A),
+
+ /**
+ * Total size of loaded data blocks.
+ */
+ READ_AMP_TOTAL_READ_BYTES((byte) 0x5B),
+
+ /**
+ * Number of refill intervals where rate limiter's bytes are fully consumed.
+ */
+ NUMBER_RATE_LIMITER_DRAINS((byte) 0x5C),
+
+ /**
+ * Number of internal skipped during iteration
+ */
+ NUMBER_ITER_SKIP((byte) 0x5D),
+
+ /**
+ * Number of MultiGet keys found (vs number requested)
+ */
+ NUMBER_MULTIGET_KEYS_FOUND((byte) 0x5E),
+
+ // -0x01 to fixate the new value that incorrectly changed TICKER_ENUM_MAX
+ /**
+ * Number of iterators created.
+ */
+ NO_ITERATOR_CREATED((byte) -0x01),
+
+ /**
+ * Number of iterators deleted.
+ */
+ NO_ITERATOR_DELETED((byte) 0x60),
+
+ /**
+ * Deletions obsoleted before bottom level due to file gap optimization.
+ */
+ COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE((byte) 0x61),
+
+ /**
+ * If a compaction was cancelled in sfm to prevent ENOSPC
+ */
+ COMPACTION_CANCELLED((byte) 0x62),
+
+ /**
+ * # of times bloom FullFilter has not avoided the reads.
+ */
+ BLOOM_FILTER_FULL_POSITIVE((byte) 0x63),
+
+ /**
+ * # of times bloom FullFilter has not avoided the reads and data actually
+ * exist.
+ */
+ BLOOM_FILTER_FULL_TRUE_POSITIVE((byte) 0x64),
+
+ /**
+ * BlobDB specific stats
+ * # of Put/PutTTL/PutUntil to BlobDB.
+ */
+ BLOB_DB_NUM_PUT((byte) 0x65),
+
+ /**
+ * # of Write to BlobDB.
+ */
+ BLOB_DB_NUM_WRITE((byte) 0x66),
+
+ /**
+ * # of Get to BlobDB.
+ */
+ BLOB_DB_NUM_GET((byte) 0x67),
+
+ /**
+ * # of MultiGet to BlobDB.
+ */
+ BLOB_DB_NUM_MULTIGET((byte) 0x68),
+
+ /**
+ * # of Seek/SeekToFirst/SeekToLast/SeekForPrev to BlobDB iterator.
+ */
+ BLOB_DB_NUM_SEEK((byte) 0x69),
+
+ /**
+ * # of Next to BlobDB iterator.
+ */
+ BLOB_DB_NUM_NEXT((byte) 0x6A),
+
+ /**
+ * # of Prev to BlobDB iterator.
+ */
+ BLOB_DB_NUM_PREV((byte) 0x6B),
+
+ /**
+ * # of keys written to BlobDB.
+ */
+ BLOB_DB_NUM_KEYS_WRITTEN((byte) 0x6C),
+
+ /**
+ * # of keys read from BlobDB.
+ */
+ BLOB_DB_NUM_KEYS_READ((byte) 0x6D),
+
+ /**
+ * # of bytes (key + value) written to BlobDB.
+ */
+ BLOB_DB_BYTES_WRITTEN((byte) 0x6E),
+
+ /**
+ * # of bytes (keys + value) read from BlobDB.
+ */
+ BLOB_DB_BYTES_READ((byte) 0x6F),
+
+ /**
+ * # of keys written by BlobDB as non-TTL inlined value.
+ */
+ BLOB_DB_WRITE_INLINED((byte) 0x70),
+
+ /**
+ * # of keys written by BlobDB as TTL inlined value.
+ */
+ BLOB_DB_WRITE_INLINED_TTL((byte) 0x71),
+
+ /**
+ * # of keys written by BlobDB as non-TTL blob value.
+ */
+ BLOB_DB_WRITE_BLOB((byte) 0x72),
+
+ /**
+ * # of keys written by BlobDB as TTL blob value.
+ */
+ BLOB_DB_WRITE_BLOB_TTL((byte) 0x73),
+
+ /**
+ * # of bytes written to blob file.
+ */
+ BLOB_DB_BLOB_FILE_BYTES_WRITTEN((byte) 0x74),
+
+ /**
+ * # of bytes read from blob file.
+ */
+ BLOB_DB_BLOB_FILE_BYTES_READ((byte) 0x75),
+
+ /**
+ * # of times a blob files being synced.
+ */
+ BLOB_DB_BLOB_FILE_SYNCED((byte) 0x76),
+
+ /**
+ * # of blob index evicted from base DB by BlobDB compaction filter because
+ * of expiration.
+ */
+ BLOB_DB_BLOB_INDEX_EXPIRED_COUNT((byte) 0x77),
+
+ /**
+ * Size of blob index evicted from base DB by BlobDB compaction filter
+ * because of expiration.
+ */
+ BLOB_DB_BLOB_INDEX_EXPIRED_SIZE((byte) 0x78),
+
+ /**
+ * # of blob index evicted from base DB by BlobDB compaction filter because
+ * of corresponding file deleted.
+ */
+ BLOB_DB_BLOB_INDEX_EVICTED_COUNT((byte) 0x79),
+
+ /**
+ * Size of blob index evicted from base DB by BlobDB compaction filter
+ * because of corresponding file deleted.
+ */
+ BLOB_DB_BLOB_INDEX_EVICTED_SIZE((byte) 0x7A),
+
+ /**
+ * # of blob files being garbage collected.
+ */
+ BLOB_DB_GC_NUM_FILES((byte) 0x7B),
+
+ /**
+ * # of blob files generated by garbage collection.
+ */
+ BLOB_DB_GC_NUM_NEW_FILES((byte) 0x7C),
+
+ /**
+ * # of BlobDB garbage collection failures.
+ */
+ BLOB_DB_GC_FAILURES((byte) 0x7D),
+
+ /**
+ * # of keys drop by BlobDB garbage collection because they had been
+ * overwritten.
+ */
+ BLOB_DB_GC_NUM_KEYS_OVERWRITTEN((byte) 0x7E),
+
+ /**
+ * # of keys drop by BlobDB garbage collection because of expiration.
+ */
+ BLOB_DB_GC_NUM_KEYS_EXPIRED((byte) 0x7F),
+
+ /**
+ * # of keys relocated to new blob file by garbage collection.
+ */
+ BLOB_DB_GC_NUM_KEYS_RELOCATED((byte) -0x02),
+
+ /**
+ * # of bytes drop by BlobDB garbage collection because they had been
+ * overwritten.
+ */
+ BLOB_DB_GC_BYTES_OVERWRITTEN((byte) -0x03),
+
+ /**
+ * # of bytes drop by BlobDB garbage collection because of expiration.
+ */
+ BLOB_DB_GC_BYTES_EXPIRED((byte) -0x04),
+
+ /**
+ * # of bytes relocated to new blob file by garbage collection.
+ */
+ BLOB_DB_GC_BYTES_RELOCATED((byte) -0x05),
+
+ /**
+ * # of blob files evicted because of BlobDB is full.
+ */
+ BLOB_DB_FIFO_NUM_FILES_EVICTED((byte) -0x06),
+
+ /**
+ * # of keys in the blob files evicted because of BlobDB is full.
+ */
+ BLOB_DB_FIFO_NUM_KEYS_EVICTED((byte) -0x07),
+
+ /**
+ * # of bytes in the blob files evicted because of BlobDB is full.
+ */
+ BLOB_DB_FIFO_BYTES_EVICTED((byte) -0x08),
+
+ /**
+ * These counters indicate a performance issue in WritePrepared transactions.
+ * We should not seem them ticking them much.
+ * # of times prepare_mutex_ is acquired in the fast path.
+ */
+ TXN_PREPARE_MUTEX_OVERHEAD((byte) -0x09),
+
+ /**
+ * # of times old_commit_map_mutex_ is acquired in the fast path.
+ */
+ TXN_OLD_COMMIT_MAP_MUTEX_OVERHEAD((byte) -0x0A),
+
+ /**
+ * # of times we checked a batch for duplicate keys.
+ */
+ TXN_DUPLICATE_KEY_OVERHEAD((byte) -0x0B),
+
+ /**
+ * # of times snapshot_mutex_ is acquired in the fast path.
+ */
+ TXN_SNAPSHOT_MUTEX_OVERHEAD((byte) -0x0C),
+
+ /**
+ * # of times ::Get returned TryAgain due to expired snapshot seq
+ */
+ TXN_GET_TRY_AGAIN((byte) -0x0D),
+
+ TICKER_ENUM_MAX((byte) 0x5F);
+
+ private final byte value;
+
+ TickerType(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Returns the byte value of the enumerations value
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get Ticker type by byte value.
+ *
+ * @param value byte representation of TickerType.
+ *
+ * @return {@link org.rocksdb.TickerType} instance.
+ * @throws java.lang.IllegalArgumentException if an invalid
+ * value is provided.
+ */
+ public static TickerType getTickerType(final byte value) {
+ for (final TickerType tickerType : TickerType.values()) {
+ if (tickerType.getValue() == value) {
+ return tickerType;
+ }
+ }
+ throw new IllegalArgumentException(
+ "Illegal value provided for TickerType.");
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TimedEnv.java b/src/rocksdb/java/src/main/java/org/rocksdb/TimedEnv.java
new file mode 100644
index 000000000..dc8b5d6ef
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/TimedEnv.java
@@ -0,0 +1,30 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Timed environment.
+ */
+public class TimedEnv extends Env {
+
+ /**
+ * <p>Creates a new environment that measures function call times for
+ * filesystem operations, reporting results to variables in PerfContext.</p>
+ *
+ *
+ * <p>The caller must delete the result when it is
+ * no longer needed.</p>
+ *
+ * @param baseEnv the base environment,
+ * must remain live while the result is in use.
+ */
+ public TimedEnv(final Env baseEnv) {
+ super(createTimedEnv(baseEnv.nativeHandle_));
+ }
+
+ private static native long createTimedEnv(final long baseEnvHandle);
+ @Override protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TraceOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/TraceOptions.java
new file mode 100644
index 000000000..657b263c6
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/TraceOptions.java
@@ -0,0 +1,32 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * TraceOptions is used for
+ * {@link RocksDB#startTrace(TraceOptions, AbstractTraceWriter)}.
+ */
+public class TraceOptions {
+ private final long maxTraceFileSize;
+
+ public TraceOptions() {
+ this.maxTraceFileSize = 64 * 1024 * 1024 * 1024; // 64 GB
+ }
+
+ public TraceOptions(final long maxTraceFileSize) {
+ this.maxTraceFileSize = maxTraceFileSize;
+ }
+
+ /**
+ * To avoid the trace file size grows large than the storage space,
+ * user can set the max trace file size in Bytes. Default is 64GB
+ *
+ * @return the max trace size
+ */
+ public long getMaxTraceFileSize() {
+ return maxTraceFileSize;
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TraceWriter.java b/src/rocksdb/java/src/main/java/org/rocksdb/TraceWriter.java
new file mode 100644
index 000000000..cb0234e9b
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/TraceWriter.java
@@ -0,0 +1,36 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * TraceWriter allows exporting RocksDB traces to any system,
+ * one operation at a time.
+ */
+public interface TraceWriter {
+
+ /**
+ * Write the data.
+ *
+ * @param data the data
+ *
+ * @throws RocksDBException if an error occurs whilst writing.
+ */
+ void write(final Slice data) throws RocksDBException;
+
+ /**
+ * Close the writer.
+ *
+ * @throws RocksDBException if an error occurs whilst closing the writer.
+ */
+ void closeWriter() throws RocksDBException;
+
+ /**
+ * Get the size of the file that this writer is writing to.
+ *
+ * @return the file size
+ */
+ long getFileSize();
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Transaction.java b/src/rocksdb/java/src/main/java/org/rocksdb/Transaction.java
new file mode 100644
index 000000000..d59be4c80
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/Transaction.java
@@ -0,0 +1,2012 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.List;
+
+/**
+ * Provides BEGIN/COMMIT/ROLLBACK transactions.
+ *
+ * To use transactions, you must first create either an
+ * {@link OptimisticTransactionDB} or a {@link TransactionDB}
+ *
+ * To create a transaction, use
+ * {@link OptimisticTransactionDB#beginTransaction(org.rocksdb.WriteOptions)} or
+ * {@link TransactionDB#beginTransaction(org.rocksdb.WriteOptions)}
+ *
+ * It is up to the caller to synchronize access to this object.
+ *
+ * See samples/src/main/java/OptimisticTransactionSample.java and
+ * samples/src/main/java/TransactionSample.java for some simple
+ * examples.
+ */
+public class Transaction extends RocksObject {
+
+ private final RocksDB parent;
+
+ /**
+ * Intentionally package private
+ * as this is called from
+ * {@link OptimisticTransactionDB#beginTransaction(org.rocksdb.WriteOptions)}
+ * or {@link TransactionDB#beginTransaction(org.rocksdb.WriteOptions)}
+ *
+ * @param parent This must be either {@link TransactionDB} or
+ * {@link OptimisticTransactionDB}
+ * @param transactionHandle The native handle to the underlying C++
+ * transaction object
+ */
+ Transaction(final RocksDB parent, final long transactionHandle) {
+ super(transactionHandle);
+ this.parent = parent;
+ }
+
+ /**
+ * If a transaction has a snapshot set, the transaction will ensure that
+ * any keys successfully written(or fetched via {@link #getForUpdate}) have
+ * not been modified outside of this transaction since the time the snapshot
+ * was set.
+ *
+ * If a snapshot has not been set, the transaction guarantees that keys have
+ * not been modified since the time each key was first written (or fetched via
+ * {@link #getForUpdate}).
+ *
+ * Using {@link #setSnapshot()} will provide stricter isolation guarantees
+ * at the expense of potentially more transaction failures due to conflicts
+ * with other writes.
+ *
+ * Calling {@link #setSnapshot()} has no effect on keys written before this
+ * function has been called.
+ *
+ * {@link #setSnapshot()} may be called multiple times if you would like to
+ * change the snapshot used for different operations in this transaction.
+ *
+ * Calling {@link #setSnapshot()} will not affect the version of Data returned
+ * by get(...) methods. See {@link #get} for more details.
+ */
+ public void setSnapshot() {
+ assert(isOwningHandle());
+ setSnapshot(nativeHandle_);
+ }
+
+ /**
+ * Similar to {@link #setSnapshot()}, but will not change the current snapshot
+ * until put/merge/delete/getForUpdate/multiGetForUpdate is called.
+ * By calling this function, the transaction will essentially call
+ * {@link #setSnapshot()} for you right before performing the next
+ * write/getForUpdate.
+ *
+ * Calling {@link #setSnapshotOnNextOperation()} will not affect what
+ * snapshot is returned by {@link #getSnapshot} until the next
+ * write/getForUpdate is executed.
+ *
+ * When the snapshot is created the notifier's snapshotCreated method will
+ * be called so that the caller can get access to the snapshot.
+ *
+ * This is an optimization to reduce the likelihood of conflicts that
+ * could occur in between the time {@link #setSnapshot()} is called and the
+ * first write/getForUpdate operation. i.e. this prevents the following
+ * race-condition:
+ *
+ * txn1-&gt;setSnapshot();
+ * txn2-&gt;put("A", ...);
+ * txn2-&gt;commit();
+ * txn1-&gt;getForUpdate(opts, "A", ...); * FAIL!
+ */
+ public void setSnapshotOnNextOperation() {
+ assert(isOwningHandle());
+ setSnapshotOnNextOperation(nativeHandle_);
+ }
+
+ /**
+ * Similar to {@link #setSnapshot()}, but will not change the current snapshot
+ * until put/merge/delete/getForUpdate/multiGetForUpdate is called.
+ * By calling this function, the transaction will essentially call
+ * {@link #setSnapshot()} for you right before performing the next
+ * write/getForUpdate.
+ *
+ * Calling {@link #setSnapshotOnNextOperation()} will not affect what
+ * snapshot is returned by {@link #getSnapshot} until the next
+ * write/getForUpdate is executed.
+ *
+ * When the snapshot is created the
+ * {@link AbstractTransactionNotifier#snapshotCreated(Snapshot)} method will
+ * be called so that the caller can get access to the snapshot.
+ *
+ * This is an optimization to reduce the likelihood of conflicts that
+ * could occur in between the time {@link #setSnapshot()} is called and the
+ * first write/getForUpdate operation. i.e. this prevents the following
+ * race-condition:
+ *
+ * txn1-&gt;setSnapshot();
+ * txn2-&gt;put("A", ...);
+ * txn2-&gt;commit();
+ * txn1-&gt;getForUpdate(opts, "A", ...); * FAIL!
+ *
+ * @param transactionNotifier A handler for receiving snapshot notifications
+ * for the transaction
+ *
+ */
+ public void setSnapshotOnNextOperation(
+ final AbstractTransactionNotifier transactionNotifier) {
+ assert(isOwningHandle());
+ setSnapshotOnNextOperation(nativeHandle_, transactionNotifier.nativeHandle_);
+ }
+
+ /**
+ * Returns the Snapshot created by the last call to {@link #setSnapshot()}.
+ *
+ * REQUIRED: The returned Snapshot is only valid up until the next time
+ * {@link #setSnapshot()}/{@link #setSnapshotOnNextOperation()} is called,
+ * {@link #clearSnapshot()} is called, or the Transaction is deleted.
+ *
+ * @return The snapshot or null if there is no snapshot
+ */
+ public Snapshot getSnapshot() {
+ assert(isOwningHandle());
+ final long snapshotNativeHandle = getSnapshot(nativeHandle_);
+ if(snapshotNativeHandle == 0) {
+ return null;
+ } else {
+ final Snapshot snapshot = new Snapshot(snapshotNativeHandle);
+ return snapshot;
+ }
+ }
+
+ /**
+ * Clears the current snapshot (i.e. no snapshot will be 'set')
+ *
+ * This removes any snapshot that currently exists or is set to be created
+ * on the next update operation ({@link #setSnapshotOnNextOperation()}).
+ *
+ * Calling {@link #clearSnapshot()} has no effect on keys written before this
+ * function has been called.
+ *
+ * If a reference to a snapshot was retrieved via {@link #getSnapshot()}, it
+ * will no longer be valid and should be discarded after a call to
+ * {@link #clearSnapshot()}.
+ */
+ public void clearSnapshot() {
+ assert(isOwningHandle());
+ clearSnapshot(nativeHandle_);
+ }
+
+ /**
+ * Prepare the current transaction for 2PC
+ */
+ void prepare() throws RocksDBException {
+ //TODO(AR) consider a Java'ish version of this function, which returns an AutoCloseable (commit)
+ assert(isOwningHandle());
+ prepare(nativeHandle_);
+ }
+
+ /**
+ * Write all batched keys to the db atomically.
+ *
+ * Returns OK on success.
+ *
+ * May return any error status that could be returned by DB:Write().
+ *
+ * If this transaction was created by an {@link OptimisticTransactionDB}
+ * Status::Busy() may be returned if the transaction could not guarantee
+ * that there are no write conflicts. Status::TryAgain() may be returned
+ * if the memtable history size is not large enough
+ * (See max_write_buffer_number_to_maintain).
+ *
+ * If this transaction was created by a {@link TransactionDB},
+ * Status::Expired() may be returned if this transaction has lived for
+ * longer than {@link TransactionOptions#getExpiration()}.
+ *
+ * @throws RocksDBException if an error occurs when committing the transaction
+ */
+ public void commit() throws RocksDBException {
+ assert(isOwningHandle());
+ commit(nativeHandle_);
+ }
+
+ /**
+ * Discard all batched writes in this transaction.
+ *
+ * @throws RocksDBException if an error occurs when rolling back the transaction
+ */
+ public void rollback() throws RocksDBException {
+ assert(isOwningHandle());
+ rollback(nativeHandle_);
+ }
+
+ /**
+ * Records the state of the transaction for future calls to
+ * {@link #rollbackToSavePoint()}.
+ *
+ * May be called multiple times to set multiple save points.
+ *
+ * @throws RocksDBException if an error occurs whilst setting a save point
+ */
+ public void setSavePoint() throws RocksDBException {
+ assert(isOwningHandle());
+ setSavePoint(nativeHandle_);
+ }
+
+ /**
+ * Undo all operations in this transaction (put, merge, delete, putLogData)
+ * since the most recent call to {@link #setSavePoint()} and removes the most
+ * recent {@link #setSavePoint()}.
+ *
+ * If there is no previous call to {@link #setSavePoint()},
+ * returns Status::NotFound()
+ *
+ * @throws RocksDBException if an error occurs when rolling back to a save point
+ */
+ public void rollbackToSavePoint() throws RocksDBException {
+ assert(isOwningHandle());
+ rollbackToSavePoint(nativeHandle_);
+ }
+
+ /**
+ * This function is similar to
+ * {@link RocksDB#get(ColumnFamilyHandle, ReadOptions, byte[])} except it will
+ * also read pending changes in this transaction.
+ * Currently, this function will return Status::MergeInProgress if the most
+ * recent write to the queried key in this batch is a Merge.
+ *
+ * If {@link ReadOptions#snapshot()} is not set, the current version of the
+ * key will be read. Calling {@link #setSnapshot()} does not affect the
+ * version of the data returned.
+ *
+ * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect
+ * what is read from the DB but will NOT change which keys are read from this
+ * transaction (the keys in this transaction do not yet belong to any snapshot
+ * and will be fetched regardless).
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance
+ * @param readOptions Read options.
+ * @param key the key to retrieve the value for.
+ *
+ * @return a byte array storing the value associated with the input key if
+ * any. null if it does not find the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying native
+ * library.
+ */
+ public byte[] get(final ColumnFamilyHandle columnFamilyHandle,
+ final ReadOptions readOptions, final byte[] key) throws RocksDBException {
+ assert(isOwningHandle());
+ return get(nativeHandle_, readOptions.nativeHandle_, key, key.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * This function is similar to
+ * {@link RocksDB#get(ReadOptions, byte[])} except it will
+ * also read pending changes in this transaction.
+ * Currently, this function will return Status::MergeInProgress if the most
+ * recent write to the queried key in this batch is a Merge.
+ *
+ * If {@link ReadOptions#snapshot()} is not set, the current version of the
+ * key will be read. Calling {@link #setSnapshot()} does not affect the
+ * version of the data returned.
+ *
+ * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect
+ * what is read from the DB but will NOT change which keys are read from this
+ * transaction (the keys in this transaction do not yet belong to any snapshot
+ * and will be fetched regardless).
+ *
+ * @param readOptions Read options.
+ * @param key the key to retrieve the value for.
+ *
+ * @return a byte array storing the value associated with the input key if
+ * any. null if it does not find the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying native
+ * library.
+ */
+ public byte[] get(final ReadOptions readOptions, final byte[] key)
+ throws RocksDBException {
+ assert(isOwningHandle());
+ return get(nativeHandle_, readOptions.nativeHandle_, key, key.length);
+ }
+
+ /**
+ * This function is similar to
+ * {@link RocksDB#multiGet(ReadOptions, List, List)} except it will
+ * also read pending changes in this transaction.
+ * Currently, this function will return Status::MergeInProgress if the most
+ * recent write to the queried key in this batch is a Merge.
+ *
+ * If {@link ReadOptions#snapshot()} is not set, the current version of the
+ * key will be read. Calling {@link #setSnapshot()} does not affect the
+ * version of the data returned.
+ *
+ * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect
+ * what is read from the DB but will NOT change which keys are read from this
+ * transaction (the keys in this transaction do not yet belong to any snapshot
+ * and will be fetched regardless).
+ *
+ * @param readOptions Read options.
+ * @param columnFamilyHandles {@link java.util.List} containing
+ * {@link org.rocksdb.ColumnFamilyHandle} instances.
+ * @param keys of keys for which values need to be retrieved.
+ *
+ * @return Array of values, one for each key
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @throws IllegalArgumentException thrown if the size of passed keys is not
+ * equal to the amount of passed column family handles.
+ */
+ public byte[][] multiGet(final ReadOptions readOptions,
+ final List<ColumnFamilyHandle> columnFamilyHandles,
+ final byte[][] keys) throws RocksDBException {
+ assert(isOwningHandle());
+ // Check if key size equals cfList size. If not a exception must be
+ // thrown. If not a Segmentation fault happens.
+ if (keys.length != columnFamilyHandles.size()) {
+ throw new IllegalArgumentException(
+ "For each key there must be a ColumnFamilyHandle.");
+ }
+ if(keys.length == 0) {
+ return new byte[0][0];
+ }
+ final long[] cfHandles = new long[columnFamilyHandles.size()];
+ for (int i = 0; i < columnFamilyHandles.size(); i++) {
+ cfHandles[i] = columnFamilyHandles.get(i).nativeHandle_;
+ }
+
+ return multiGet(nativeHandle_, readOptions.nativeHandle_,
+ keys, cfHandles);
+ }
+
+ /**
+ * This function is similar to
+ * {@link RocksDB#multiGet(ReadOptions, List)} except it will
+ * also read pending changes in this transaction.
+ * Currently, this function will return Status::MergeInProgress if the most
+ * recent write to the queried key in this batch is a Merge.
+ *
+ * If {@link ReadOptions#snapshot()} is not set, the current version of the
+ * key will be read. Calling {@link #setSnapshot()} does not affect the
+ * version of the data returned.
+ *
+ * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect
+ * what is read from the DB but will NOT change which keys are read from this
+ * transaction (the keys in this transaction do not yet belong to any snapshot
+ * and will be fetched regardless).
+ *
+ * @param readOptions Read options.=
+ * {@link org.rocksdb.ColumnFamilyHandle} instances.
+ * @param keys of keys for which values need to be retrieved.
+ *
+ * @return Array of values, one for each key
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public byte[][] multiGet(final ReadOptions readOptions,
+ final byte[][] keys) throws RocksDBException {
+ assert(isOwningHandle());
+ if(keys.length == 0) {
+ return new byte[0][0];
+ }
+
+ return multiGet(nativeHandle_, readOptions.nativeHandle_,
+ keys);
+ }
+
+ /**
+ * Read this key and ensure that this transaction will only
+ * be able to be committed if this key is not written outside this
+ * transaction after it has first been read (or after the snapshot if a
+ * snapshot is set in this transaction). The transaction behavior is the
+ * same regardless of whether the key exists or not.
+ *
+ * Note: Currently, this function will return Status::MergeInProgress
+ * if the most recent write to the queried key in this batch is a Merge.
+ *
+ * The values returned by this function are similar to
+ * {@link RocksDB#get(ColumnFamilyHandle, ReadOptions, byte[])}.
+ * If value==nullptr, then this function will not read any data, but will
+ * still ensure that this key cannot be written to by outside of this
+ * transaction.
+ *
+ * If this transaction was created by an {@link OptimisticTransactionDB},
+ * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}
+ * could cause {@link #commit()} to fail. Otherwise, it could return any error
+ * that could be returned by
+ * {@link RocksDB#get(ColumnFamilyHandle, ReadOptions, byte[])}.
+ *
+ * If this transaction was created on a {@link TransactionDB}, an
+ * {@link RocksDBException} may be thrown with an accompanying {@link Status}
+ * when:
+ * {@link Status.Code#Busy} if there is a write conflict,
+ * {@link Status.Code#TimedOut} if a lock could not be acquired,
+ * {@link Status.Code#TryAgain} if the memtable history size is not large
+ * enough. See
+ * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()}
+ * {@link Status.Code#MergeInProgress} if merge operations cannot be
+ * resolved.
+ *
+ * @param readOptions Read options.
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param key the key to retrieve the value for.
+ * @param exclusive true if the transaction should have exclusive access to
+ * the key, otherwise false for shared access.
+ * @param doValidate true if it should validate the snapshot before doing the read
+ *
+ * @return a byte array storing the value associated with the input key if
+ * any. null if it does not find the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public byte[] getForUpdate(final ReadOptions readOptions,
+ final ColumnFamilyHandle columnFamilyHandle, final byte[] key, final boolean exclusive,
+ final boolean doValidate) throws RocksDBException {
+ assert (isOwningHandle());
+ return getForUpdate(nativeHandle_, readOptions.nativeHandle_, key, key.length,
+ columnFamilyHandle.nativeHandle_, exclusive, doValidate);
+ }
+
+ /**
+ * Same as
+ * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean, boolean)}
+ * with doValidate=true.
+ *
+ * @param readOptions Read options.
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param key the key to retrieve the value for.
+ * @param exclusive true if the transaction should have exclusive access to
+ * the key, otherwise false for shared access.
+ *
+ * @return a byte array storing the value associated with the input key if
+ * any. null if it does not find the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public byte[] getForUpdate(final ReadOptions readOptions,
+ final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
+ final boolean exclusive) throws RocksDBException {
+ assert(isOwningHandle());
+ return getForUpdate(nativeHandle_, readOptions.nativeHandle_, key, key.length,
+ columnFamilyHandle.nativeHandle_, exclusive, true /*doValidate*/);
+ }
+
+ /**
+ * Read this key and ensure that this transaction will only
+ * be able to be committed if this key is not written outside this
+ * transaction after it has first been read (or after the snapshot if a
+ * snapshot is set in this transaction). The transaction behavior is the
+ * same regardless of whether the key exists or not.
+ *
+ * Note: Currently, this function will return Status::MergeInProgress
+ * if the most recent write to the queried key in this batch is a Merge.
+ *
+ * The values returned by this function are similar to
+ * {@link RocksDB#get(ReadOptions, byte[])}.
+ * If value==nullptr, then this function will not read any data, but will
+ * still ensure that this key cannot be written to by outside of this
+ * transaction.
+ *
+ * If this transaction was created on an {@link OptimisticTransactionDB},
+ * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}
+ * could cause {@link #commit()} to fail. Otherwise, it could return any error
+ * that could be returned by
+ * {@link RocksDB#get(ReadOptions, byte[])}.
+ *
+ * If this transaction was created on a {@link TransactionDB}, an
+ * {@link RocksDBException} may be thrown with an accompanying {@link Status}
+ * when:
+ * {@link Status.Code#Busy} if there is a write conflict,
+ * {@link Status.Code#TimedOut} if a lock could not be acquired,
+ * {@link Status.Code#TryAgain} if the memtable history size is not large
+ * enough. See
+ * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()}
+ * {@link Status.Code#MergeInProgress} if merge operations cannot be
+ * resolved.
+ *
+ * @param readOptions Read options.
+ * @param key the key to retrieve the value for.
+ * @param exclusive true if the transaction should have exclusive access to
+ * the key, otherwise false for shared access.
+ *
+ * @return a byte array storing the value associated with the input key if
+ * any. null if it does not find the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public byte[] getForUpdate(final ReadOptions readOptions, final byte[] key,
+ final boolean exclusive) throws RocksDBException {
+ assert(isOwningHandle());
+ return getForUpdate(
+ nativeHandle_, readOptions.nativeHandle_, key, key.length, exclusive, true /*doValidate*/);
+ }
+
+ /**
+ * A multi-key version of
+ * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}.
+ *
+ *
+ * @param readOptions Read options.
+ * @param columnFamilyHandles {@link org.rocksdb.ColumnFamilyHandle}
+ * instances
+ * @param keys the keys to retrieve the values for.
+ *
+ * @return Array of values, one for each key
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public byte[][] multiGetForUpdate(final ReadOptions readOptions,
+ final List<ColumnFamilyHandle> columnFamilyHandles,
+ final byte[][] keys) throws RocksDBException {
+ assert(isOwningHandle());
+ // Check if key size equals cfList size. If not a exception must be
+ // thrown. If not a Segmentation fault happens.
+ if (keys.length != columnFamilyHandles.size()){
+ throw new IllegalArgumentException(
+ "For each key there must be a ColumnFamilyHandle.");
+ }
+ if(keys.length == 0) {
+ return new byte[0][0];
+ }
+ final long[] cfHandles = new long[columnFamilyHandles.size()];
+ for (int i = 0; i < columnFamilyHandles.size(); i++) {
+ cfHandles[i] = columnFamilyHandles.get(i).nativeHandle_;
+ }
+ return multiGetForUpdate(nativeHandle_, readOptions.nativeHandle_,
+ keys, cfHandles);
+ }
+
+ /**
+ * A multi-key version of {@link #getForUpdate(ReadOptions, byte[], boolean)}.
+ *
+ *
+ * @param readOptions Read options.
+ * @param keys the keys to retrieve the values for.
+ *
+ * @return Array of values, one for each key
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public byte[][] multiGetForUpdate(final ReadOptions readOptions,
+ final byte[][] keys) throws RocksDBException {
+ assert(isOwningHandle());
+ if(keys.length == 0) {
+ return new byte[0][0];
+ }
+
+ return multiGetForUpdate(nativeHandle_,
+ readOptions.nativeHandle_, keys);
+ }
+
+ /**
+ * Returns an iterator that will iterate on all keys in the default
+ * column family including both keys in the DB and uncommitted keys in this
+ * transaction.
+ *
+ * Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read
+ * from the DB but will NOT change which keys are read from this transaction
+ * (the keys in this transaction do not yet belong to any snapshot and will be
+ * fetched regardless).
+ *
+ * Caller is responsible for deleting the returned Iterator.
+ *
+ * The returned iterator is only valid until {@link #commit()},
+ * {@link #rollback()}, or {@link #rollbackToSavePoint()} is called.
+ *
+ * @param readOptions Read options.
+ *
+ * @return instance of iterator object.
+ */
+ public RocksIterator getIterator(final ReadOptions readOptions) {
+ assert(isOwningHandle());
+ return new RocksIterator(parent, getIterator(nativeHandle_,
+ readOptions.nativeHandle_));
+ }
+
+ /**
+ * Returns an iterator that will iterate on all keys in the default
+ * column family including both keys in the DB and uncommitted keys in this
+ * transaction.
+ *
+ * Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read
+ * from the DB but will NOT change which keys are read from this transaction
+ * (the keys in this transaction do not yet belong to any snapshot and will be
+ * fetched regardless).
+ *
+ * Caller is responsible for calling {@link RocksIterator#close()} on
+ * the returned Iterator.
+ *
+ * The returned iterator is only valid until {@link #commit()},
+ * {@link #rollback()}, or {@link #rollbackToSavePoint()} is called.
+ *
+ * @param readOptions Read options.
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ *
+ * @return instance of iterator object.
+ */
+ public RocksIterator getIterator(final ReadOptions readOptions,
+ final ColumnFamilyHandle columnFamilyHandle) {
+ assert(isOwningHandle());
+ return new RocksIterator(parent, getIterator(nativeHandle_,
+ readOptions.nativeHandle_, columnFamilyHandle.nativeHandle_));
+ }
+
+ /**
+ * Similar to {@link RocksDB#put(ColumnFamilyHandle, byte[], byte[])}, but
+ * will also perform conflict checking on the keys be written.
+ *
+ * If this Transaction was created on an {@link OptimisticTransactionDB},
+ * these functions should always succeed.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, an
+ * {@link RocksDBException} may be thrown with an accompanying {@link Status}
+ * when:
+ * {@link Status.Code#Busy} if there is a write conflict,
+ * {@link Status.Code#TimedOut} if a lock could not be acquired,
+ * {@link Status.Code#TryAgain} if the memtable history size is not large
+ * enough. See
+ * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()}
+ *
+ * @param columnFamilyHandle The column family to put the key/value into
+ * @param key the specified key to be inserted.
+ * @param value the value associated with the specified key.
+ * @param assumeTracked true when it is expected that the key is already
+ * tracked. More specifically, it means the the key was previous tracked
+ * in the same savepoint, with the same exclusive flag, and at a lower
+ * sequence number. If valid then it skips ValidateSnapshot,
+ * throws an error otherwise.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void put(final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
+ final byte[] value, final boolean assumeTracked) throws RocksDBException {
+ assert (isOwningHandle());
+ put(nativeHandle_, key, key.length, value, value.length,
+ columnFamilyHandle.nativeHandle_, assumeTracked);
+ }
+
+ /**
+ * Similar to {@link #put(ColumnFamilyHandle, byte[], byte[], boolean)}
+ * but with {@code assumeTracked = false}.
+ *
+ * Will also perform conflict checking on the keys be written.
+ *
+ * If this Transaction was created on an {@link OptimisticTransactionDB},
+ * these functions should always succeed.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, an
+ * {@link RocksDBException} may be thrown with an accompanying {@link Status}
+ * when:
+ * {@link Status.Code#Busy} if there is a write conflict,
+ * {@link Status.Code#TimedOut} if a lock could not be acquired,
+ * {@link Status.Code#TryAgain} if the memtable history size is not large
+ * enough. See
+ * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()}
+ *
+ * @param columnFamilyHandle The column family to put the key/value into
+ * @param key the specified key to be inserted.
+ * @param value the value associated with the specified key.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void put(final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
+ final byte[] value) throws RocksDBException {
+ assert(isOwningHandle());
+ put(nativeHandle_, key, key.length, value, value.length,
+ columnFamilyHandle.nativeHandle_, false);
+ }
+
+ /**
+ * Similar to {@link RocksDB#put(byte[], byte[])}, but
+ * will also perform conflict checking on the keys be written.
+ *
+ * If this Transaction was created on an {@link OptimisticTransactionDB},
+ * these functions should always succeed.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, an
+ * {@link RocksDBException} may be thrown with an accompanying {@link Status}
+ * when:
+ * {@link Status.Code#Busy} if there is a write conflict,
+ * {@link Status.Code#TimedOut} if a lock could not be acquired,
+ * {@link Status.Code#TryAgain} if the memtable history size is not large
+ * enough. See
+ * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()}
+ *
+ * @param key the specified key to be inserted.
+ * @param value the value associated with the specified key.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void put(final byte[] key, final byte[] value)
+ throws RocksDBException {
+ assert(isOwningHandle());
+ put(nativeHandle_, key, key.length, value, value.length);
+ }
+
+ //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future
+ /**
+ * Similar to {@link #put(ColumnFamilyHandle, byte[], byte[])} but allows
+ * you to specify the key and value in several parts that will be
+ * concatenated together.
+ *
+ * @param columnFamilyHandle The column family to put the key/value into
+ * @param keyParts the specified key to be inserted.
+ * @param valueParts the value associated with the specified key.
+ * @param assumeTracked true when it is expected that the key is already
+ * tracked. More specifically, it means the the key was previous tracked
+ * in the same savepoint, with the same exclusive flag, and at a lower
+ * sequence number. If valid then it skips ValidateSnapshot,
+ * throws an error otherwise.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void put(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[][] keyParts, final byte[][] valueParts,
+ final boolean assumeTracked) throws RocksDBException {
+ assert (isOwningHandle());
+ put(nativeHandle_, keyParts, keyParts.length, valueParts, valueParts.length,
+ columnFamilyHandle.nativeHandle_, assumeTracked);
+ }
+
+ /**
+ * Similar to {@link #put(ColumnFamilyHandle, byte[][], byte[][], boolean)}
+ * but with with {@code assumeTracked = false}.
+ *
+ * Allows you to specify the key and value in several parts that will be
+ * concatenated together.
+ *
+ * @param columnFamilyHandle The column family to put the key/value into
+ * @param keyParts the specified key to be inserted.
+ * @param valueParts the value associated with the specified key.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void put(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[][] keyParts, final byte[][] valueParts)
+ throws RocksDBException {
+ assert(isOwningHandle());
+ put(nativeHandle_, keyParts, keyParts.length, valueParts, valueParts.length,
+ columnFamilyHandle.nativeHandle_, false);
+ }
+
+ //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future
+ /**
+ * Similar to {@link #put(byte[], byte[])} but allows
+ * you to specify the key and value in several parts that will be
+ * concatenated together
+ *
+ * @param keyParts the specified key to be inserted.
+ * @param valueParts the value associated with the specified key.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void put(final byte[][] keyParts, final byte[][] valueParts)
+ throws RocksDBException {
+ assert(isOwningHandle());
+ put(nativeHandle_, keyParts, keyParts.length, valueParts,
+ valueParts.length);
+ }
+
+ /**
+ * Similar to {@link RocksDB#merge(ColumnFamilyHandle, byte[], byte[])}, but
+ * will also perform conflict checking on the keys be written.
+ *
+ * If this Transaction was created on an {@link OptimisticTransactionDB},
+ * these functions should always succeed.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, an
+ * {@link RocksDBException} may be thrown with an accompanying {@link Status}
+ * when:
+ * {@link Status.Code#Busy} if there is a write conflict,
+ * {@link Status.Code#TimedOut} if a lock could not be acquired,
+ * {@link Status.Code#TryAgain} if the memtable history size is not large
+ * enough. See
+ * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()}
+ *
+ * @param columnFamilyHandle The column family to merge the key/value into
+ * @param key the specified key to be merged.
+ * @param value the value associated with the specified key.
+ * @param assumeTracked true when it is expected that the key is already
+ * tracked. More specifically, it means the the key was previous tracked
+ * in the same savepoint, with the same exclusive flag, and at a lower
+ * sequence number. If valid then it skips ValidateSnapshot,
+ * throws an error otherwise.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void merge(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key, final byte[] value, final boolean assumeTracked)
+ throws RocksDBException {
+ assert (isOwningHandle());
+ merge(nativeHandle_, key, key.length, value, value.length,
+ columnFamilyHandle.nativeHandle_, assumeTracked);
+ }
+
+ /**
+ * Similar to {@link #merge(ColumnFamilyHandle, byte[], byte[], boolean)}
+ * but with {@code assumeTracked = false}.
+ *
+ * Will also perform conflict checking on the keys be written.
+ *
+ * If this Transaction was created on an {@link OptimisticTransactionDB},
+ * these functions should always succeed.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, an
+ * {@link RocksDBException} may be thrown with an accompanying {@link Status}
+ * when:
+ * {@link Status.Code#Busy} if there is a write conflict,
+ * {@link Status.Code#TimedOut} if a lock could not be acquired,
+ * {@link Status.Code#TryAgain} if the memtable history size is not large
+ * enough. See
+ * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()}
+ *
+ * @param columnFamilyHandle The column family to merge the key/value into
+ * @param key the specified key to be merged.
+ * @param value the value associated with the specified key.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void merge(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key, final byte[] value) throws RocksDBException {
+ assert(isOwningHandle());
+ merge(nativeHandle_, key, key.length, value, value.length,
+ columnFamilyHandle.nativeHandle_, false);
+ }
+
+ /**
+ * Similar to {@link RocksDB#merge(byte[], byte[])}, but
+ * will also perform conflict checking on the keys be written.
+ *
+ * If this Transaction was created on an {@link OptimisticTransactionDB},
+ * these functions should always succeed.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, an
+ * {@link RocksDBException} may be thrown with an accompanying {@link Status}
+ * when:
+ * {@link Status.Code#Busy} if there is a write conflict,
+ * {@link Status.Code#TimedOut} if a lock could not be acquired,
+ * {@link Status.Code#TryAgain} if the memtable history size is not large
+ * enough. See
+ * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()}
+ *
+ * @param key the specified key to be merged.
+ * @param value the value associated with the specified key.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void merge(final byte[] key, final byte[] value)
+ throws RocksDBException {
+ assert(isOwningHandle());
+ merge(nativeHandle_, key, key.length, value, value.length);
+ }
+
+ /**
+ * Similar to {@link RocksDB#delete(ColumnFamilyHandle, byte[])}, but
+ * will also perform conflict checking on the keys be written.
+ *
+ * If this Transaction was created on an {@link OptimisticTransactionDB},
+ * these functions should always succeed.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, an
+ * {@link RocksDBException} may be thrown with an accompanying {@link Status}
+ * when:
+ * {@link Status.Code#Busy} if there is a write conflict,
+ * {@link Status.Code#TimedOut} if a lock could not be acquired,
+ * {@link Status.Code#TryAgain} if the memtable history size is not large
+ * enough. See
+ * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()}
+ *
+ * @param columnFamilyHandle The column family to delete the key/value from
+ * @param key the specified key to be deleted.
+ * @param assumeTracked true when it is expected that the key is already
+ * tracked. More specifically, it means the the key was previous tracked
+ * in the same savepoint, with the same exclusive flag, and at a lower
+ * sequence number. If valid then it skips ValidateSnapshot,
+ * throws an error otherwise.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void delete(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key, final boolean assumeTracked) throws RocksDBException {
+ assert (isOwningHandle());
+ delete(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_,
+ assumeTracked);
+ }
+
+ /**
+ * Similar to {@link #delete(ColumnFamilyHandle, byte[], boolean)}
+ * but with {@code assumeTracked = false}.
+ *
+ * Will also perform conflict checking on the keys be written.
+ *
+ * If this Transaction was created on an {@link OptimisticTransactionDB},
+ * these functions should always succeed.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, an
+ * {@link RocksDBException} may be thrown with an accompanying {@link Status}
+ * when:
+ * {@link Status.Code#Busy} if there is a write conflict,
+ * {@link Status.Code#TimedOut} if a lock could not be acquired,
+ * {@link Status.Code#TryAgain} if the memtable history size is not large
+ * enough. See
+ * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()}
+ *
+ * @param columnFamilyHandle The column family to delete the key/value from
+ * @param key the specified key to be deleted.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void delete(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key) throws RocksDBException {
+ assert(isOwningHandle());
+ delete(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_,
+ /*assumeTracked*/ false);
+ }
+
+ /**
+ * Similar to {@link RocksDB#delete(byte[])}, but
+ * will also perform conflict checking on the keys be written.
+ *
+ * If this Transaction was created on an {@link OptimisticTransactionDB},
+ * these functions should always succeed.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, an
+ * {@link RocksDBException} may be thrown with an accompanying {@link Status}
+ * when:
+ * {@link Status.Code#Busy} if there is a write conflict,
+ * {@link Status.Code#TimedOut} if a lock could not be acquired,
+ * {@link Status.Code#TryAgain} if the memtable history size is not large
+ * enough. See
+ * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()}
+ *
+ * @param key the specified key to be deleted.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void delete(final byte[] key) throws RocksDBException {
+ assert(isOwningHandle());
+ delete(nativeHandle_, key, key.length);
+ }
+
+ //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future
+ /**
+ * Similar to {@link #delete(ColumnFamilyHandle, byte[])} but allows
+ * you to specify the key in several parts that will be
+ * concatenated together.
+ *
+ * @param columnFamilyHandle The column family to delete the key/value from
+ * @param keyParts the specified key to be deleted.
+ * @param assumeTracked true when it is expected that the key is already
+ * tracked. More specifically, it means the the key was previous tracked
+ * in the same savepoint, with the same exclusive flag, and at a lower
+ * sequence number. If valid then it skips ValidateSnapshot,
+ * throws an error otherwise.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void delete(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[][] keyParts, final boolean assumeTracked)
+ throws RocksDBException {
+ assert (isOwningHandle());
+ delete(nativeHandle_, keyParts, keyParts.length,
+ columnFamilyHandle.nativeHandle_, assumeTracked);
+ }
+
+ /**
+ * Similar to{@link #delete(ColumnFamilyHandle, byte[][], boolean)}
+ * but with {@code assumeTracked = false}.
+ *
+ * Allows you to specify the key in several parts that will be
+ * concatenated together.
+ *
+ * @param columnFamilyHandle The column family to delete the key/value from
+ * @param keyParts the specified key to be deleted.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void delete(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[][] keyParts) throws RocksDBException {
+ assert(isOwningHandle());
+ delete(nativeHandle_, keyParts, keyParts.length,
+ columnFamilyHandle.nativeHandle_, false);
+ }
+
+ //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future
+ /**
+ * Similar to {@link #delete(byte[])} but allows
+ * you to specify key the in several parts that will be
+ * concatenated together.
+ *
+ * @param keyParts the specified key to be deleted
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void delete(final byte[][] keyParts) throws RocksDBException {
+ assert(isOwningHandle());
+ delete(nativeHandle_, keyParts, keyParts.length);
+ }
+
+ /**
+ * Similar to {@link RocksDB#singleDelete(ColumnFamilyHandle, byte[])}, but
+ * will also perform conflict checking on the keys be written.
+ *
+ * If this Transaction was created on an {@link OptimisticTransactionDB},
+ * these functions should always succeed.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, an
+ * {@link RocksDBException} may be thrown with an accompanying {@link Status}
+ * when:
+ * {@link Status.Code#Busy} if there is a write conflict,
+ * {@link Status.Code#TimedOut} if a lock could not be acquired,
+ * {@link Status.Code#TryAgain} if the memtable history size is not large
+ * enough. See
+ * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()}
+ *
+ * @param columnFamilyHandle The column family to delete the key/value from
+ * @param key the specified key to be deleted.
+ * @param assumeTracked true when it is expected that the key is already
+ * tracked. More specifically, it means the the key was previous tracked
+ * in the same savepoint, with the same exclusive flag, and at a lower
+ * sequence number. If valid then it skips ValidateSnapshot,
+ * throws an error otherwise.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ @Experimental("Performance optimization for a very specific workload")
+ public void singleDelete(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key, final boolean assumeTracked) throws RocksDBException {
+ assert (isOwningHandle());
+ singleDelete(nativeHandle_, key, key.length,
+ columnFamilyHandle.nativeHandle_, assumeTracked);
+ }
+
+ /**
+ * Similar to {@link #singleDelete(ColumnFamilyHandle, byte[], boolean)}
+ * but with {@code assumeTracked = false}.
+ *
+ * will also perform conflict checking on the keys be written.
+ *
+ * If this Transaction was created on an {@link OptimisticTransactionDB},
+ * these functions should always succeed.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, an
+ * {@link RocksDBException} may be thrown with an accompanying {@link Status}
+ * when:
+ * {@link Status.Code#Busy} if there is a write conflict,
+ * {@link Status.Code#TimedOut} if a lock could not be acquired,
+ * {@link Status.Code#TryAgain} if the memtable history size is not large
+ * enough. See
+ * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()}
+ *
+ * @param columnFamilyHandle The column family to delete the key/value from
+ * @param key the specified key to be deleted.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ @Experimental("Performance optimization for a very specific workload")
+ public void singleDelete(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key) throws RocksDBException {
+ assert(isOwningHandle());
+ singleDelete(nativeHandle_, key, key.length,
+ columnFamilyHandle.nativeHandle_, false);
+ }
+
+ /**
+ * Similar to {@link RocksDB#singleDelete(byte[])}, but
+ * will also perform conflict checking on the keys be written.
+ *
+ * If this Transaction was created on an {@link OptimisticTransactionDB},
+ * these functions should always succeed.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, an
+ * {@link RocksDBException} may be thrown with an accompanying {@link Status}
+ * when:
+ * {@link Status.Code#Busy} if there is a write conflict,
+ * {@link Status.Code#TimedOut} if a lock could not be acquired,
+ * {@link Status.Code#TryAgain} if the memtable history size is not large
+ * enough. See
+ * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()}
+ *
+ * @param key the specified key to be deleted.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ @Experimental("Performance optimization for a very specific workload")
+ public void singleDelete(final byte[] key) throws RocksDBException {
+ assert(isOwningHandle());
+ singleDelete(nativeHandle_, key, key.length);
+ }
+
+ //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future
+ /**
+ * Similar to {@link #singleDelete(ColumnFamilyHandle, byte[])} but allows
+ * you to specify the key in several parts that will be
+ * concatenated together.
+ *
+ * @param columnFamilyHandle The column family to delete the key/value from
+ * @param keyParts the specified key to be deleted.
+ * @param assumeTracked true when it is expected that the key is already
+ * tracked. More specifically, it means the the key was previous tracked
+ * in the same savepoint, with the same exclusive flag, and at a lower
+ * sequence number. If valid then it skips ValidateSnapshot,
+ * throws an error otherwise.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ @Experimental("Performance optimization for a very specific workload")
+ public void singleDelete(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[][] keyParts, final boolean assumeTracked)
+ throws RocksDBException {
+ assert (isOwningHandle());
+ singleDelete(nativeHandle_, keyParts, keyParts.length,
+ columnFamilyHandle.nativeHandle_, assumeTracked);
+ }
+
+ /**
+ * Similar to{@link #singleDelete(ColumnFamilyHandle, byte[][], boolean)}
+ * but with {@code assumeTracked = false}.
+ *
+ * Allows you to specify the key in several parts that will be
+ * concatenated together.
+ *
+ * @param columnFamilyHandle The column family to delete the key/value from
+ * @param keyParts the specified key to be deleted.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ @Experimental("Performance optimization for a very specific workload")
+ public void singleDelete(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[][] keyParts) throws RocksDBException {
+ assert(isOwningHandle());
+ singleDelete(nativeHandle_, keyParts, keyParts.length,
+ columnFamilyHandle.nativeHandle_, false);
+ }
+
+ //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future
+ /**
+ * Similar to {@link #singleDelete(byte[])} but allows
+ * you to specify the key in several parts that will be
+ * concatenated together.
+ *
+ * @param keyParts the specified key to be deleted.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ @Experimental("Performance optimization for a very specific workload")
+ public void singleDelete(final byte[][] keyParts) throws RocksDBException {
+ assert(isOwningHandle());
+ singleDelete(nativeHandle_, keyParts, keyParts.length);
+ }
+
+ /**
+ * Similar to {@link RocksDB#put(ColumnFamilyHandle, byte[], byte[])},
+ * but operates on the transactions write batch. This write will only happen
+ * if this transaction gets committed successfully.
+ *
+ * Unlike {@link #put(ColumnFamilyHandle, byte[], byte[])} no conflict
+ * checking will be performed for this key.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, this function
+ * will still acquire locks necessary to make sure this write doesn't cause
+ * conflicts in other transactions; This may cause a {@link RocksDBException}
+ * with associated {@link Status.Code#Busy}.
+ *
+ * @param columnFamilyHandle The column family to put the key/value into
+ * @param key the specified key to be inserted.
+ * @param value the value associated with the specified key.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void putUntracked(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key, final byte[] value) throws RocksDBException {
+ assert(isOwningHandle());
+ putUntracked(nativeHandle_, key, key.length, value, value.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Similar to {@link RocksDB#put(byte[], byte[])},
+ * but operates on the transactions write batch. This write will only happen
+ * if this transaction gets committed successfully.
+ *
+ * Unlike {@link #put(byte[], byte[])} no conflict
+ * checking will be performed for this key.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, this function
+ * will still acquire locks necessary to make sure this write doesn't cause
+ * conflicts in other transactions; This may cause a {@link RocksDBException}
+ * with associated {@link Status.Code#Busy}.
+ *
+ * @param key the specified key to be inserted.
+ * @param value the value associated with the specified key.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void putUntracked(final byte[] key, final byte[] value)
+ throws RocksDBException {
+ assert(isOwningHandle());
+ putUntracked(nativeHandle_, key, key.length, value, value.length);
+ }
+
+ //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future
+ /**
+ * Similar to {@link #putUntracked(ColumnFamilyHandle, byte[], byte[])} but
+ * allows you to specify the key and value in several parts that will be
+ * concatenated together.
+ *
+ * @param columnFamilyHandle The column family to put the key/value into
+ * @param keyParts the specified key to be inserted.
+ * @param valueParts the value associated with the specified key.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void putUntracked(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[][] keyParts, final byte[][] valueParts)
+ throws RocksDBException {
+ assert(isOwningHandle());
+ putUntracked(nativeHandle_, keyParts, keyParts.length, valueParts,
+ valueParts.length, columnFamilyHandle.nativeHandle_);
+ }
+
+ //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future
+ /**
+ * Similar to {@link #putUntracked(byte[], byte[])} but
+ * allows you to specify the key and value in several parts that will be
+ * concatenated together.
+ *
+ * @param keyParts the specified key to be inserted.
+ * @param valueParts the value associated with the specified key.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void putUntracked(final byte[][] keyParts, final byte[][] valueParts)
+ throws RocksDBException {
+ assert(isOwningHandle());
+ putUntracked(nativeHandle_, keyParts, keyParts.length, valueParts,
+ valueParts.length);
+ }
+
+ /**
+ * Similar to {@link RocksDB#merge(ColumnFamilyHandle, byte[], byte[])},
+ * but operates on the transactions write batch. This write will only happen
+ * if this transaction gets committed successfully.
+ *
+ * Unlike {@link #merge(ColumnFamilyHandle, byte[], byte[])} no conflict
+ * checking will be performed for this key.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, this function
+ * will still acquire locks necessary to make sure this write doesn't cause
+ * conflicts in other transactions; This may cause a {@link RocksDBException}
+ * with associated {@link Status.Code#Busy}.
+ *
+ * @param columnFamilyHandle The column family to merge the key/value into
+ * @param key the specified key to be merged.
+ * @param value the value associated with the specified key.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void mergeUntracked(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key, final byte[] value) throws RocksDBException {
+ mergeUntracked(nativeHandle_, key, key.length, value, value.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Similar to {@link RocksDB#merge(byte[], byte[])},
+ * but operates on the transactions write batch. This write will only happen
+ * if this transaction gets committed successfully.
+ *
+ * Unlike {@link #merge(byte[], byte[])} no conflict
+ * checking will be performed for this key.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, this function
+ * will still acquire locks necessary to make sure this write doesn't cause
+ * conflicts in other transactions; This may cause a {@link RocksDBException}
+ * with associated {@link Status.Code#Busy}.
+ *
+ * @param key the specified key to be merged.
+ * @param value the value associated with the specified key.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void mergeUntracked(final byte[] key, final byte[] value)
+ throws RocksDBException {
+ assert(isOwningHandle());
+ mergeUntracked(nativeHandle_, key, key.length, value, value.length);
+ }
+
+ /**
+ * Similar to {@link RocksDB#delete(ColumnFamilyHandle, byte[])},
+ * but operates on the transactions write batch. This write will only happen
+ * if this transaction gets committed successfully.
+ *
+ * Unlike {@link #delete(ColumnFamilyHandle, byte[])} no conflict
+ * checking will be performed for this key.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, this function
+ * will still acquire locks necessary to make sure this write doesn't cause
+ * conflicts in other transactions; This may cause a {@link RocksDBException}
+ * with associated {@link Status.Code#Busy}.
+ *
+ * @param columnFamilyHandle The column family to delete the key/value from
+ * @param key the specified key to be deleted.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void deleteUntracked(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key) throws RocksDBException {
+ assert(isOwningHandle());
+ deleteUntracked(nativeHandle_, key, key.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Similar to {@link RocksDB#delete(byte[])},
+ * but operates on the transactions write batch. This write will only happen
+ * if this transaction gets committed successfully.
+ *
+ * Unlike {@link #delete(byte[])} no conflict
+ * checking will be performed for this key.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, this function
+ * will still acquire locks necessary to make sure this write doesn't cause
+ * conflicts in other transactions; This may cause a {@link RocksDBException}
+ * with associated {@link Status.Code#Busy}.
+ *
+ * @param key the specified key to be deleted.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void deleteUntracked(final byte[] key) throws RocksDBException {
+ assert(isOwningHandle());
+ deleteUntracked(nativeHandle_, key, key.length);
+ }
+
+ //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future
+ /**
+ * Similar to {@link #deleteUntracked(ColumnFamilyHandle, byte[])} but allows
+ * you to specify the key in several parts that will be
+ * concatenated together.
+ *
+ * @param columnFamilyHandle The column family to delete the key/value from
+ * @param keyParts the specified key to be deleted.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void deleteUntracked(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[][] keyParts) throws RocksDBException {
+ assert(isOwningHandle());
+ deleteUntracked(nativeHandle_, keyParts, keyParts.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future
+ /**
+ * Similar to {@link #deleteUntracked(byte[])} but allows
+ * you to specify the key in several parts that will be
+ * concatenated together.
+ *
+ * @param keyParts the specified key to be deleted.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
+ public void deleteUntracked(final byte[][] keyParts) throws RocksDBException {
+ assert(isOwningHandle());
+ deleteUntracked(nativeHandle_, keyParts, keyParts.length);
+ }
+
+ /**
+ * Similar to {@link WriteBatch#putLogData(byte[])}
+ *
+ * @param blob binary object to be inserted
+ */
+ public void putLogData(final byte[] blob) {
+ assert(isOwningHandle());
+ putLogData(nativeHandle_, blob, blob.length);
+ }
+
+ /**
+ * By default, all put/merge/delete operations will be indexed in the
+ * transaction so that get/getForUpdate/getIterator can search for these
+ * keys.
+ *
+ * If the caller does not want to fetch the keys about to be written,
+ * they may want to avoid indexing as a performance optimization.
+ * Calling {@link #disableIndexing()} will turn off indexing for all future
+ * put/merge/delete operations until {@link #enableIndexing()} is called.
+ *
+ * If a key is put/merge/deleted after {@link #disableIndexing()} is called
+ * and then is fetched via get/getForUpdate/getIterator, the result of the
+ * fetch is undefined.
+ */
+ public void disableIndexing() {
+ assert(isOwningHandle());
+ disableIndexing(nativeHandle_);
+ }
+
+ /**
+ * Re-enables indexing after a previous call to {@link #disableIndexing()}
+ */
+ public void enableIndexing() {
+ assert(isOwningHandle());
+ enableIndexing(nativeHandle_);
+ }
+
+ /**
+ * Returns the number of distinct Keys being tracked by this transaction.
+ * If this transaction was created by a {@link TransactionDB}, this is the
+ * number of keys that are currently locked by this transaction.
+ * If this transaction was created by an {@link OptimisticTransactionDB},
+ * this is the number of keys that need to be checked for conflicts at commit
+ * time.
+ *
+ * @return the number of distinct Keys being tracked by this transaction
+ */
+ public long getNumKeys() {
+ assert(isOwningHandle());
+ return getNumKeys(nativeHandle_);
+ }
+
+ /**
+ * Returns the number of puts that have been applied to this
+ * transaction so far.
+ *
+ * @return the number of puts that have been applied to this transaction
+ */
+ public long getNumPuts() {
+ assert(isOwningHandle());
+ return getNumPuts(nativeHandle_);
+ }
+
+ /**
+ * Returns the number of deletes that have been applied to this
+ * transaction so far.
+ *
+ * @return the number of deletes that have been applied to this transaction
+ */
+ public long getNumDeletes() {
+ assert(isOwningHandle());
+ return getNumDeletes(nativeHandle_);
+ }
+
+ /**
+ * Returns the number of merges that have been applied to this
+ * transaction so far.
+ *
+ * @return the number of merges that have been applied to this transaction
+ */
+ public long getNumMerges() {
+ assert(isOwningHandle());
+ return getNumMerges(nativeHandle_);
+ }
+
+ /**
+ * Returns the elapsed time in milliseconds since this Transaction began.
+ *
+ * @return the elapsed time in milliseconds since this transaction began.
+ */
+ public long getElapsedTime() {
+ assert(isOwningHandle());
+ return getElapsedTime(nativeHandle_);
+ }
+
+ /**
+ * Fetch the underlying write batch that contains all pending changes to be
+ * committed.
+ *
+ * Note: You should not write or delete anything from the batch directly and
+ * should only use the functions in the {@link Transaction} class to
+ * write to this transaction.
+ *
+ * @return The write batch
+ */
+ public WriteBatchWithIndex getWriteBatch() {
+ assert(isOwningHandle());
+ final WriteBatchWithIndex writeBatchWithIndex =
+ new WriteBatchWithIndex(getWriteBatch(nativeHandle_));
+ return writeBatchWithIndex;
+ }
+
+ /**
+ * Change the value of {@link TransactionOptions#getLockTimeout()}
+ * (in milliseconds) for this transaction.
+ *
+ * Has no effect on OptimisticTransactions.
+ *
+ * @param lockTimeout the timeout (in milliseconds) for locks used by this
+ * transaction.
+ */
+ public void setLockTimeout(final long lockTimeout) {
+ assert(isOwningHandle());
+ setLockTimeout(nativeHandle_, lockTimeout);
+ }
+
+ /**
+ * Return the WriteOptions that will be used during {@link #commit()}.
+ *
+ * @return the WriteOptions that will be used
+ */
+ public WriteOptions getWriteOptions() {
+ assert(isOwningHandle());
+ final WriteOptions writeOptions =
+ new WriteOptions(getWriteOptions(nativeHandle_));
+ return writeOptions;
+ }
+
+ /**
+ * Reset the WriteOptions that will be used during {@link #commit()}.
+ *
+ * @param writeOptions The new WriteOptions
+ */
+ public void setWriteOptions(final WriteOptions writeOptions) {
+ assert(isOwningHandle());
+ setWriteOptions(nativeHandle_, writeOptions.nativeHandle_);
+ }
+
+ /**
+ * If this key was previously fetched in this transaction using
+ * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}/
+ * {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, calling
+ * {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} will tell
+ * the transaction that it no longer needs to do any conflict checking
+ * for this key.
+ *
+ * If a key has been fetched N times via
+ * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}/
+ * {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, then
+ * {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} will only have an
+ * effect if it is also called N times. If this key has been written to in
+ * this transaction, {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])}
+ * will have no effect.
+ *
+ * If {@link #setSavePoint()} has been called after the
+ * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)},
+ * {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} will not have any
+ * effect.
+ *
+ * If this Transaction was created by an {@link OptimisticTransactionDB},
+ * calling {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} can affect
+ * whether this key is conflict checked at commit time.
+ * If this Transaction was created by a {@link TransactionDB},
+ * calling {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} may release
+ * any held locks for this key.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param key the key to retrieve the value for.
+ */
+ public void undoGetForUpdate(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key) {
+ assert(isOwningHandle());
+ undoGetForUpdate(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * If this key was previously fetched in this transaction using
+ * {@link #getForUpdate(ReadOptions, byte[], boolean)}/
+ * {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, calling
+ * {@link #undoGetForUpdate(byte[])} will tell
+ * the transaction that it no longer needs to do any conflict checking
+ * for this key.
+ *
+ * If a key has been fetched N times via
+ * {@link #getForUpdate(ReadOptions, byte[], boolean)}/
+ * {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, then
+ * {@link #undoGetForUpdate(byte[])} will only have an
+ * effect if it is also called N times. If this key has been written to in
+ * this transaction, {@link #undoGetForUpdate(byte[])}
+ * will have no effect.
+ *
+ * If {@link #setSavePoint()} has been called after the
+ * {@link #getForUpdate(ReadOptions, byte[], boolean)},
+ * {@link #undoGetForUpdate(byte[])} will not have any
+ * effect.
+ *
+ * If this Transaction was created by an {@link OptimisticTransactionDB},
+ * calling {@link #undoGetForUpdate(byte[])} can affect
+ * whether this key is conflict checked at commit time.
+ * If this Transaction was created by a {@link TransactionDB},
+ * calling {@link #undoGetForUpdate(byte[])} may release
+ * any held locks for this key.
+ *
+ * @param key the key to retrieve the value for.
+ */
+ public void undoGetForUpdate(final byte[] key) {
+ assert(isOwningHandle());
+ undoGetForUpdate(nativeHandle_, key, key.length);
+ }
+
+ /**
+ * Adds the keys from the WriteBatch to the transaction
+ *
+ * @param writeBatch The write batch to read from
+ *
+ * @throws RocksDBException if an error occurs whilst rebuilding from the
+ * write batch.
+ */
+ public void rebuildFromWriteBatch(final WriteBatch writeBatch)
+ throws RocksDBException {
+ assert(isOwningHandle());
+ rebuildFromWriteBatch(nativeHandle_, writeBatch.nativeHandle_);
+ }
+
+ /**
+ * Get the Commit time Write Batch.
+ *
+ * @return the commit time write batch.
+ */
+ public WriteBatch getCommitTimeWriteBatch() {
+ assert(isOwningHandle());
+ final WriteBatch writeBatch =
+ new WriteBatch(getCommitTimeWriteBatch(nativeHandle_));
+ return writeBatch;
+ }
+
+ /**
+ * Set the log number.
+ *
+ * @param logNumber the log number
+ */
+ public void setLogNumber(final long logNumber) {
+ assert(isOwningHandle());
+ setLogNumber(nativeHandle_, logNumber);
+ }
+
+ /**
+ * Get the log number.
+ *
+ * @return the log number
+ */
+ public long getLogNumber() {
+ assert(isOwningHandle());
+ return getLogNumber(nativeHandle_);
+ }
+
+ /**
+ * Set the name of the transaction.
+ *
+ * @param transactionName the name of the transaction
+ *
+ * @throws RocksDBException if an error occurs when setting the transaction
+ * name.
+ */
+ public void setName(final String transactionName) throws RocksDBException {
+ assert(isOwningHandle());
+ setName(nativeHandle_, transactionName);
+ }
+
+ /**
+ * Get the name of the transaction.
+ *
+ * @return the name of the transaction
+ */
+ public String getName() {
+ assert(isOwningHandle());
+ return getName(nativeHandle_);
+ }
+
+ /**
+ * Get the ID of the transaction.
+ *
+ * @return the ID of the transaction.
+ */
+ public long getID() {
+ assert(isOwningHandle());
+ return getID(nativeHandle_);
+ }
+
+ /**
+ * Determine if a deadlock has been detected.
+ *
+ * @return true if a deadlock has been detected.
+ */
+ public boolean isDeadlockDetect() {
+ assert(isOwningHandle());
+ return isDeadlockDetect(nativeHandle_);
+ }
+
+ /**
+ * Get the list of waiting transactions.
+ *
+ * @return The list of waiting transactions.
+ */
+ public WaitingTransactions getWaitingTxns() {
+ assert(isOwningHandle());
+ return getWaitingTxns(nativeHandle_);
+ }
+
+ /**
+ * Get the execution status of the transaction.
+ *
+ * NOTE: The execution status of an Optimistic Transaction
+ * never changes. This is only useful for non-optimistic transactions!
+ *
+ * @return The execution status of the transaction
+ */
+ public TransactionState getState() {
+ assert(isOwningHandle());
+ return TransactionState.getTransactionState(
+ getState(nativeHandle_));
+ }
+
+ /**
+ * The globally unique id with which the transaction is identified. This id
+ * might or might not be set depending on the implementation. Similarly the
+ * implementation decides the point in lifetime of a transaction at which it
+ * assigns the id. Although currently it is the case, the id is not guaranteed
+ * to remain the same across restarts.
+ *
+ * @return the transaction id.
+ */
+ @Experimental("NOTE: Experimental feature")
+ public long getId() {
+ assert(isOwningHandle());
+ return getId(nativeHandle_);
+ }
+
+ public enum TransactionState {
+ STARTED((byte)0),
+ AWAITING_PREPARE((byte)1),
+ PREPARED((byte)2),
+ AWAITING_COMMIT((byte)3),
+ COMMITED((byte)4),
+ AWAITING_ROLLBACK((byte)5),
+ ROLLEDBACK((byte)6),
+ LOCKS_STOLEN((byte)7);
+
+ private final byte value;
+
+ TransactionState(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Get TransactionState by byte value.
+ *
+ * @param value byte representation of TransactionState.
+ *
+ * @return {@link org.rocksdb.Transaction.TransactionState} instance or null.
+ * @throws java.lang.IllegalArgumentException if an invalid
+ * value is provided.
+ */
+ public static TransactionState getTransactionState(final byte value) {
+ for (final TransactionState transactionState : TransactionState.values()) {
+ if (transactionState.value == value){
+ return transactionState;
+ }
+ }
+ throw new IllegalArgumentException(
+ "Illegal value provided for TransactionState.");
+ }
+ }
+
+ /**
+ * Called from C++ native method {@link #getWaitingTxns(long)}
+ * to construct a WaitingTransactions object.
+ *
+ * @param columnFamilyId The id of the {@link ColumnFamilyHandle}
+ * @param key The key
+ * @param transactionIds The transaction ids
+ *
+ * @return The waiting transactions
+ */
+ private WaitingTransactions newWaitingTransactions(
+ final long columnFamilyId, final String key,
+ final long[] transactionIds) {
+ return new WaitingTransactions(columnFamilyId, key, transactionIds);
+ }
+
+ public static class WaitingTransactions {
+ private final long columnFamilyId;
+ private final String key;
+ private final long[] transactionIds;
+
+ private WaitingTransactions(final long columnFamilyId, final String key,
+ final long[] transactionIds) {
+ this.columnFamilyId = columnFamilyId;
+ this.key = key;
+ this.transactionIds = transactionIds;
+ }
+
+ /**
+ * Get the Column Family ID.
+ *
+ * @return The column family ID
+ */
+ public long getColumnFamilyId() {
+ return columnFamilyId;
+ }
+
+ /**
+ * Get the key on which the transactions are waiting.
+ *
+ * @return The key
+ */
+ public String getKey() {
+ return key;
+ }
+
+ /**
+ * Get the IDs of the waiting transactions.
+ *
+ * @return The IDs of the waiting transactions
+ */
+ public long[] getTransactionIds() {
+ return transactionIds;
+ }
+ }
+
+ private native void setSnapshot(final long handle);
+ private native void setSnapshotOnNextOperation(final long handle);
+ private native void setSnapshotOnNextOperation(final long handle,
+ final long transactionNotifierHandle);
+ private native long getSnapshot(final long handle);
+ private native void clearSnapshot(final long handle);
+ private native void prepare(final long handle) throws RocksDBException;
+ private native void commit(final long handle) throws RocksDBException;
+ private native void rollback(final long handle) throws RocksDBException;
+ private native void setSavePoint(final long handle) throws RocksDBException;
+ private native void rollbackToSavePoint(final long handle)
+ throws RocksDBException;
+ private native byte[] get(final long handle, final long readOptionsHandle,
+ final byte key[], final int keyLength, final long columnFamilyHandle)
+ throws RocksDBException;
+ private native byte[] get(final long handle, final long readOptionsHandle,
+ final byte key[], final int keyLen) throws RocksDBException;
+ private native byte[][] multiGet(final long handle,
+ final long readOptionsHandle, final byte[][] keys,
+ final long[] columnFamilyHandles) throws RocksDBException;
+ private native byte[][] multiGet(final long handle,
+ final long readOptionsHandle, final byte[][] keys)
+ throws RocksDBException;
+ private native byte[] getForUpdate(final long handle, final long readOptionsHandle,
+ final byte key[], final int keyLength, final long columnFamilyHandle, final boolean exclusive,
+ final boolean doValidate) throws RocksDBException;
+ private native byte[] getForUpdate(final long handle, final long readOptionsHandle,
+ final byte key[], final int keyLen, final boolean exclusive, final boolean doValidate)
+ throws RocksDBException;
+ private native byte[][] multiGetForUpdate(final long handle,
+ final long readOptionsHandle, final byte[][] keys,
+ final long[] columnFamilyHandles) throws RocksDBException;
+ private native byte[][] multiGetForUpdate(final long handle,
+ final long readOptionsHandle, final byte[][] keys)
+ throws RocksDBException;
+ private native long getIterator(final long handle,
+ final long readOptionsHandle);
+ private native long getIterator(final long handle,
+ final long readOptionsHandle, final long columnFamilyHandle);
+ private native void put(final long handle, final byte[] key, final int keyLength,
+ final byte[] value, final int valueLength, final long columnFamilyHandle,
+ final boolean assumeTracked) throws RocksDBException;
+ private native void put(final long handle, final byte[] key,
+ final int keyLength, final byte[] value, final int valueLength)
+ throws RocksDBException;
+ private native void put(final long handle, final byte[][] keys, final int keysLength,
+ final byte[][] values, final int valuesLength, final long columnFamilyHandle,
+ final boolean assumeTracked) throws RocksDBException;
+ private native void put(final long handle, final byte[][] keys,
+ final int keysLength, final byte[][] values, final int valuesLength)
+ throws RocksDBException;
+ private native void merge(final long handle, final byte[] key, final int keyLength,
+ final byte[] value, final int valueLength, final long columnFamilyHandle,
+ final boolean assumeTracked) throws RocksDBException;
+ private native void merge(final long handle, final byte[] key,
+ final int keyLength, final byte[] value, final int valueLength)
+ throws RocksDBException;
+ private native void delete(final long handle, final byte[] key, final int keyLength,
+ final long columnFamilyHandle, final boolean assumeTracked) throws RocksDBException;
+ private native void delete(final long handle, final byte[] key,
+ final int keyLength) throws RocksDBException;
+ private native void delete(final long handle, final byte[][] keys, final int keysLength,
+ final long columnFamilyHandle, final boolean assumeTracked) throws RocksDBException;
+ private native void delete(final long handle, final byte[][] keys,
+ final int keysLength) throws RocksDBException;
+ private native void singleDelete(final long handle, final byte[] key, final int keyLength,
+ final long columnFamilyHandle, final boolean assumeTracked) throws RocksDBException;
+ private native void singleDelete(final long handle, final byte[] key,
+ final int keyLength) throws RocksDBException;
+ private native void singleDelete(final long handle, final byte[][] keys, final int keysLength,
+ final long columnFamilyHandle, final boolean assumeTracked) throws RocksDBException;
+ private native void singleDelete(final long handle, final byte[][] keys,
+ final int keysLength) throws RocksDBException;
+ private native void putUntracked(final long handle, final byte[] key,
+ final int keyLength, final byte[] value, final int valueLength,
+ final long columnFamilyHandle) throws RocksDBException;
+ private native void putUntracked(final long handle, final byte[] key,
+ final int keyLength, final byte[] value, final int valueLength)
+ throws RocksDBException;
+ private native void putUntracked(final long handle, final byte[][] keys,
+ final int keysLength, final byte[][] values, final int valuesLength,
+ final long columnFamilyHandle) throws RocksDBException;
+ private native void putUntracked(final long handle, final byte[][] keys,
+ final int keysLength, final byte[][] values, final int valuesLength)
+ throws RocksDBException;
+ private native void mergeUntracked(final long handle, final byte[] key,
+ final int keyLength, final byte[] value, final int valueLength,
+ final long columnFamilyHandle) throws RocksDBException;
+ private native void mergeUntracked(final long handle, final byte[] key,
+ final int keyLength, final byte[] value, final int valueLength)
+ throws RocksDBException;
+ private native void deleteUntracked(final long handle, final byte[] key,
+ final int keyLength, final long columnFamilyHandle)
+ throws RocksDBException;
+ private native void deleteUntracked(final long handle, final byte[] key,
+ final int keyLength) throws RocksDBException;
+ private native void deleteUntracked(final long handle, final byte[][] keys,
+ final int keysLength, final long columnFamilyHandle)
+ throws RocksDBException;
+ private native void deleteUntracked(final long handle, final byte[][] keys,
+ final int keysLength) throws RocksDBException;
+ private native void putLogData(final long handle, final byte[] blob,
+ final int blobLength);
+ private native void disableIndexing(final long handle);
+ private native void enableIndexing(final long handle);
+ private native long getNumKeys(final long handle);
+ private native long getNumPuts(final long handle);
+ private native long getNumDeletes(final long handle);
+ private native long getNumMerges(final long handle);
+ private native long getElapsedTime(final long handle);
+ private native long getWriteBatch(final long handle);
+ private native void setLockTimeout(final long handle, final long lockTimeout);
+ private native long getWriteOptions(final long handle);
+ private native void setWriteOptions(final long handle,
+ final long writeOptionsHandle);
+ private native void undoGetForUpdate(final long handle, final byte[] key,
+ final int keyLength, final long columnFamilyHandle);
+ private native void undoGetForUpdate(final long handle, final byte[] key,
+ final int keyLength);
+ private native void rebuildFromWriteBatch(final long handle,
+ final long writeBatchHandle) throws RocksDBException;
+ private native long getCommitTimeWriteBatch(final long handle);
+ private native void setLogNumber(final long handle, final long logNumber);
+ private native long getLogNumber(final long handle);
+ private native void setName(final long handle, final String name)
+ throws RocksDBException;
+ private native String getName(final long handle);
+ private native long getID(final long handle);
+ private native boolean isDeadlockDetect(final long handle);
+ private native WaitingTransactions getWaitingTxns(final long handle);
+ private native byte getState(final long handle);
+ private native long getId(final long handle);
+
+ @Override protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TransactionDB.java b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionDB.java
new file mode 100644
index 000000000..7a6259975
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionDB.java
@@ -0,0 +1,404 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Database with Transaction support
+ */
+public class TransactionDB extends RocksDB
+ implements TransactionalDB<TransactionOptions> {
+
+ private TransactionDBOptions transactionDbOptions_;
+
+ /**
+ * Private constructor.
+ *
+ * @param nativeHandle The native handle of the C++ TransactionDB object
+ */
+ private TransactionDB(final long nativeHandle) {
+ super(nativeHandle);
+ }
+
+ /**
+ * Open a TransactionDB, similar to {@link RocksDB#open(Options, String)}.
+ *
+ * @param options {@link org.rocksdb.Options} instance.
+ * @param transactionDbOptions {@link org.rocksdb.TransactionDBOptions}
+ * instance.
+ * @param path the path to the rocksdb.
+ *
+ * @return a {@link TransactionDB} instance on success, null if the specified
+ * {@link TransactionDB} can not be opened.
+ *
+ * @throws RocksDBException if an error occurs whilst opening the database.
+ */
+ public static TransactionDB open(final Options options,
+ final TransactionDBOptions transactionDbOptions, final String path)
+ throws RocksDBException {
+ final TransactionDB tdb = new TransactionDB(open(options.nativeHandle_,
+ transactionDbOptions.nativeHandle_, path));
+
+ // when non-default Options is used, keeping an Options reference
+ // in RocksDB can prevent Java to GC during the life-time of
+ // the currently-created RocksDB.
+ tdb.storeOptionsInstance(options);
+ tdb.storeTransactionDbOptions(transactionDbOptions);
+
+ return tdb;
+ }
+
+ /**
+ * Open a TransactionDB, similar to
+ * {@link RocksDB#open(DBOptions, String, List, List)}.
+ *
+ * @param dbOptions {@link org.rocksdb.DBOptions} instance.
+ * @param transactionDbOptions {@link org.rocksdb.TransactionDBOptions}
+ * instance.
+ * @param path the path to the rocksdb.
+ * @param columnFamilyDescriptors list of column family descriptors
+ * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances
+ *
+ * @return a {@link TransactionDB} instance on success, null if the specified
+ * {@link TransactionDB} can not be opened.
+ *
+ * @throws RocksDBException if an error occurs whilst opening the database.
+ */
+ public static TransactionDB open(final DBOptions dbOptions,
+ final TransactionDBOptions transactionDbOptions,
+ final String path,
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors,
+ final List<ColumnFamilyHandle> columnFamilyHandles)
+ throws RocksDBException {
+
+ final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][];
+ final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()];
+ for (int i = 0; i < columnFamilyDescriptors.size(); i++) {
+ final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors
+ .get(i);
+ cfNames[i] = cfDescriptor.getName();
+ cfOptionHandles[i] = cfDescriptor.getOptions().nativeHandle_;
+ }
+
+ final long[] handles = open(dbOptions.nativeHandle_,
+ transactionDbOptions.nativeHandle_, path, cfNames, cfOptionHandles);
+ final TransactionDB tdb = new TransactionDB(handles[0]);
+
+ // when non-default Options is used, keeping an Options reference
+ // in RocksDB can prevent Java to GC during the life-time of
+ // the currently-created RocksDB.
+ tdb.storeOptionsInstance(dbOptions);
+ tdb.storeTransactionDbOptions(transactionDbOptions);
+
+ for (int i = 1; i < handles.length; i++) {
+ columnFamilyHandles.add(new ColumnFamilyHandle(tdb, handles[i]));
+ }
+
+ return tdb;
+ }
+
+ /**
+ * This is similar to {@link #close()} except that it
+ * throws an exception if any error occurs.
+ *
+ * This will not fsync the WAL files.
+ * If syncing is required, the caller must first call {@link #syncWal()}
+ * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
+ * with {@link WriteOptions#setSync(boolean)} set to true.
+ *
+ * See also {@link #close()}.
+ *
+ * @throws RocksDBException if an error occurs whilst closing.
+ */
+ public void closeE() throws RocksDBException {
+ if (owningHandle_.compareAndSet(true, false)) {
+ try {
+ closeDatabase(nativeHandle_);
+ } finally {
+ disposeInternal();
+ }
+ }
+ }
+
+ /**
+ * This is similar to {@link #closeE()} except that it
+ * silently ignores any errors.
+ *
+ * This will not fsync the WAL files.
+ * If syncing is required, the caller must first call {@link #syncWal()}
+ * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
+ * with {@link WriteOptions#setSync(boolean)} set to true.
+ *
+ * See also {@link #close()}.
+ */
+ @Override
+ public void close() {
+ if (owningHandle_.compareAndSet(true, false)) {
+ try {
+ closeDatabase(nativeHandle_);
+ } catch (final RocksDBException e) {
+ // silently ignore the error report
+ } finally {
+ disposeInternal();
+ }
+ }
+ }
+
+ @Override
+ public Transaction beginTransaction(final WriteOptions writeOptions) {
+ return new Transaction(this, beginTransaction(nativeHandle_,
+ writeOptions.nativeHandle_));
+ }
+
+ @Override
+ public Transaction beginTransaction(final WriteOptions writeOptions,
+ final TransactionOptions transactionOptions) {
+ return new Transaction(this, beginTransaction(nativeHandle_,
+ writeOptions.nativeHandle_, transactionOptions.nativeHandle_));
+ }
+
+ // TODO(AR) consider having beingTransaction(... oldTransaction) set a
+ // reference count inside Transaction, so that we can always call
+ // Transaction#close but the object is only disposed when there are as many
+ // closes as beginTransaction. Makes the try-with-resources paradigm easier for
+ // java developers
+
+ @Override
+ public Transaction beginTransaction(final WriteOptions writeOptions,
+ final Transaction oldTransaction) {
+ final long jtxnHandle = beginTransaction_withOld(nativeHandle_,
+ writeOptions.nativeHandle_, oldTransaction.nativeHandle_);
+
+ // RocksJava relies on the assumption that
+ // we do not allocate a new Transaction object
+ // when providing an old_txn
+ assert(jtxnHandle == oldTransaction.nativeHandle_);
+
+ return oldTransaction;
+ }
+
+ @Override
+ public Transaction beginTransaction(final WriteOptions writeOptions,
+ final TransactionOptions transactionOptions,
+ final Transaction oldTransaction) {
+ final long jtxn_handle = beginTransaction_withOld(nativeHandle_,
+ writeOptions.nativeHandle_, transactionOptions.nativeHandle_,
+ oldTransaction.nativeHandle_);
+
+ // RocksJava relies on the assumption that
+ // we do not allocate a new Transaction object
+ // when providing an old_txn
+ assert(jtxn_handle == oldTransaction.nativeHandle_);
+
+ return oldTransaction;
+ }
+
+ public Transaction getTransactionByName(final String transactionName) {
+ final long jtxnHandle = getTransactionByName(nativeHandle_, transactionName);
+ if(jtxnHandle == 0) {
+ return null;
+ }
+
+ final Transaction txn = new Transaction(this, jtxnHandle);
+
+ // this instance doesn't own the underlying C++ object
+ txn.disOwnNativeHandle();
+
+ return txn;
+ }
+
+ public List<Transaction> getAllPreparedTransactions() {
+ final long[] jtxnHandles = getAllPreparedTransactions(nativeHandle_);
+
+ final List<Transaction> txns = new ArrayList<>();
+ for(final long jtxnHandle : jtxnHandles) {
+ final Transaction txn = new Transaction(this, jtxnHandle);
+
+ // this instance doesn't own the underlying C++ object
+ txn.disOwnNativeHandle();
+
+ txns.add(txn);
+ }
+ return txns;
+ }
+
+ public static class KeyLockInfo {
+ private final String key;
+ private final long[] transactionIDs;
+ private final boolean exclusive;
+
+ public KeyLockInfo(final String key, final long transactionIDs[],
+ final boolean exclusive) {
+ this.key = key;
+ this.transactionIDs = transactionIDs;
+ this.exclusive = exclusive;
+ }
+
+ /**
+ * Get the key.
+ *
+ * @return the key
+ */
+ public String getKey() {
+ return key;
+ }
+
+ /**
+ * Get the Transaction IDs.
+ *
+ * @return the Transaction IDs.
+ */
+ public long[] getTransactionIDs() {
+ return transactionIDs;
+ }
+
+ /**
+ * Get the Lock status.
+ *
+ * @return true if the lock is exclusive, false if the lock is shared.
+ */
+ public boolean isExclusive() {
+ return exclusive;
+ }
+ }
+
+ /**
+ * Returns map of all locks held.
+ *
+ * @return a map of all the locks held.
+ */
+ public Map<Long, KeyLockInfo> getLockStatusData() {
+ return getLockStatusData(nativeHandle_);
+ }
+
+ /**
+ * Called from C++ native method {@link #getDeadlockInfoBuffer(long)}
+ * to construct a DeadlockInfo object.
+ *
+ * @param transactionID The transaction id
+ * @param columnFamilyId The id of the {@link ColumnFamilyHandle}
+ * @param waitingKey the key that we are waiting on
+ * @param exclusive true if the lock is exclusive, false if the lock is shared
+ *
+ * @return The waiting transactions
+ */
+ private DeadlockInfo newDeadlockInfo(
+ final long transactionID, final long columnFamilyId,
+ final String waitingKey, final boolean exclusive) {
+ return new DeadlockInfo(transactionID, columnFamilyId,
+ waitingKey, exclusive);
+ }
+
+ public static class DeadlockInfo {
+ private final long transactionID;
+ private final long columnFamilyId;
+ private final String waitingKey;
+ private final boolean exclusive;
+
+ private DeadlockInfo(final long transactionID, final long columnFamilyId,
+ final String waitingKey, final boolean exclusive) {
+ this.transactionID = transactionID;
+ this.columnFamilyId = columnFamilyId;
+ this.waitingKey = waitingKey;
+ this.exclusive = exclusive;
+ }
+
+ /**
+ * Get the Transaction ID.
+ *
+ * @return the transaction ID
+ */
+ public long getTransactionID() {
+ return transactionID;
+ }
+
+ /**
+ * Get the Column Family ID.
+ *
+ * @return The column family ID
+ */
+ public long getColumnFamilyId() {
+ return columnFamilyId;
+ }
+
+ /**
+ * Get the key that we are waiting on.
+ *
+ * @return the key that we are waiting on
+ */
+ public String getWaitingKey() {
+ return waitingKey;
+ }
+
+ /**
+ * Get the Lock status.
+ *
+ * @return true if the lock is exclusive, false if the lock is shared.
+ */
+ public boolean isExclusive() {
+ return exclusive;
+ }
+ }
+
+ public static class DeadlockPath {
+ final DeadlockInfo[] path;
+ final boolean limitExceeded;
+
+ public DeadlockPath(final DeadlockInfo[] path, final boolean limitExceeded) {
+ this.path = path;
+ this.limitExceeded = limitExceeded;
+ }
+
+ public boolean isEmpty() {
+ return path.length == 0 && !limitExceeded;
+ }
+ }
+
+ public DeadlockPath[] getDeadlockInfoBuffer() {
+ return getDeadlockInfoBuffer(nativeHandle_);
+ }
+
+ public void setDeadlockInfoBufferSize(final int targetSize) {
+ setDeadlockInfoBufferSize(nativeHandle_, targetSize);
+ }
+
+ private void storeTransactionDbOptions(
+ final TransactionDBOptions transactionDbOptions) {
+ this.transactionDbOptions_ = transactionDbOptions;
+ }
+
+ @Override protected final native void disposeInternal(final long handle);
+
+ private static native long open(final long optionsHandle,
+ final long transactionDbOptionsHandle, final String path)
+ throws RocksDBException;
+ private static native long[] open(final long dbOptionsHandle,
+ final long transactionDbOptionsHandle, final String path,
+ final byte[][] columnFamilyNames, final long[] columnFamilyOptions);
+ private native static void closeDatabase(final long handle)
+ throws RocksDBException;
+ private native long beginTransaction(final long handle,
+ final long writeOptionsHandle);
+ private native long beginTransaction(final long handle,
+ final long writeOptionsHandle, final long transactionOptionsHandle);
+ private native long beginTransaction_withOld(final long handle,
+ final long writeOptionsHandle, final long oldTransactionHandle);
+ private native long beginTransaction_withOld(final long handle,
+ final long writeOptionsHandle, final long transactionOptionsHandle,
+ final long oldTransactionHandle);
+ private native long getTransactionByName(final long handle,
+ final String name);
+ private native long[] getAllPreparedTransactions(final long handle);
+ private native Map<Long, KeyLockInfo> getLockStatusData(
+ final long handle);
+ private native DeadlockPath[] getDeadlockInfoBuffer(final long handle);
+ private native void setDeadlockInfoBufferSize(final long handle,
+ final int targetSize);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TransactionDBOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionDBOptions.java
new file mode 100644
index 000000000..7f4296a7c
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionDBOptions.java
@@ -0,0 +1,217 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+public class TransactionDBOptions extends RocksObject {
+
+ public TransactionDBOptions() {
+ super(newTransactionDBOptions());
+ }
+
+ /**
+ * Specifies the maximum number of keys that can be locked at the same time
+ * per column family.
+ *
+ * If the number of locked keys is greater than {@link #getMaxNumLocks()},
+ * transaction writes (or GetForUpdate) will return an error.
+ *
+ * @return The maximum number of keys that can be locked
+ */
+ public long getMaxNumLocks() {
+ assert(isOwningHandle());
+ return getMaxNumLocks(nativeHandle_);
+ }
+
+ /**
+ * Specifies the maximum number of keys that can be locked at the same time
+ * per column family.
+ *
+ * If the number of locked keys is greater than {@link #getMaxNumLocks()},
+ * transaction writes (or GetForUpdate) will return an error.
+ *
+ * @param maxNumLocks The maximum number of keys that can be locked;
+ * If this value is not positive, no limit will be enforced.
+ *
+ * @return this TransactionDBOptions instance
+ */
+ public TransactionDBOptions setMaxNumLocks(final long maxNumLocks) {
+ assert(isOwningHandle());
+ setMaxNumLocks(nativeHandle_, maxNumLocks);
+ return this;
+ }
+
+ /**
+ * The number of sub-tables per lock table (per column family)
+ *
+ * @return The number of sub-tables
+ */
+ public long getNumStripes() {
+ assert(isOwningHandle());
+ return getNumStripes(nativeHandle_);
+ }
+
+ /**
+ * Increasing this value will increase the concurrency by dividing the lock
+ * table (per column family) into more sub-tables, each with their own
+ * separate mutex.
+ *
+ * Default: 16
+ *
+ * @param numStripes The number of sub-tables
+ *
+ * @return this TransactionDBOptions instance
+ */
+ public TransactionDBOptions setNumStripes(final long numStripes) {
+ assert(isOwningHandle());
+ setNumStripes(nativeHandle_, numStripes);
+ return this;
+ }
+
+ /**
+ * The default wait timeout in milliseconds when
+ * a transaction attempts to lock a key if not specified by
+ * {@link TransactionOptions#setLockTimeout(long)}
+ *
+ * If 0, no waiting is done if a lock cannot instantly be acquired.
+ * If negative, there is no timeout.
+ *
+ * @return the default wait timeout in milliseconds
+ */
+ public long getTransactionLockTimeout() {
+ assert(isOwningHandle());
+ return getTransactionLockTimeout(nativeHandle_);
+ }
+
+ /**
+ * If positive, specifies the default wait timeout in milliseconds when
+ * a transaction attempts to lock a key if not specified by
+ * {@link TransactionOptions#setLockTimeout(long)}
+ *
+ * If 0, no waiting is done if a lock cannot instantly be acquired.
+ * If negative, there is no timeout. Not using a timeout is not recommended
+ * as it can lead to deadlocks. Currently, there is no deadlock-detection to
+ * recover from a deadlock.
+ *
+ * Default: 1000
+ *
+ * @param transactionLockTimeout the default wait timeout in milliseconds
+ *
+ * @return this TransactionDBOptions instance
+ */
+ public TransactionDBOptions setTransactionLockTimeout(
+ final long transactionLockTimeout) {
+ assert(isOwningHandle());
+ setTransactionLockTimeout(nativeHandle_, transactionLockTimeout);
+ return this;
+ }
+
+ /**
+ * The wait timeout in milliseconds when writing a key
+ * OUTSIDE of a transaction (ie by calling {@link RocksDB#put},
+ * {@link RocksDB#merge}, {@link RocksDB#delete} or {@link RocksDB#write}
+ * directly).
+ *
+ * If 0, no waiting is done if a lock cannot instantly be acquired.
+ * If negative, there is no timeout and will block indefinitely when acquiring
+ * a lock.
+ *
+ * @return the timeout in milliseconds when writing a key OUTSIDE of a
+ * transaction
+ */
+ public long getDefaultLockTimeout() {
+ assert(isOwningHandle());
+ return getDefaultLockTimeout(nativeHandle_);
+ }
+
+ /**
+ * If positive, specifies the wait timeout in milliseconds when writing a key
+ * OUTSIDE of a transaction (ie by calling {@link RocksDB#put},
+ * {@link RocksDB#merge}, {@link RocksDB#delete} or {@link RocksDB#write}
+ * directly).
+ *
+ * If 0, no waiting is done if a lock cannot instantly be acquired.
+ * If negative, there is no timeout and will block indefinitely when acquiring
+ * a lock.
+ *
+ * Not using a timeout can lead to deadlocks. Currently, there
+ * is no deadlock-detection to recover from a deadlock. While DB writes
+ * cannot deadlock with other DB writes, they can deadlock with a transaction.
+ * A negative timeout should only be used if all transactions have a small
+ * expiration set.
+ *
+ * Default: 1000
+ *
+ * @param defaultLockTimeout the timeout in milliseconds when writing a key
+ * OUTSIDE of a transaction
+ * @return this TransactionDBOptions instance
+ */
+ public TransactionDBOptions setDefaultLockTimeout(
+ final long defaultLockTimeout) {
+ assert(isOwningHandle());
+ setDefaultLockTimeout(nativeHandle_, defaultLockTimeout);
+ return this;
+ }
+
+// /**
+// * If set, the {@link TransactionDB} will use this implementation of a mutex
+// * and condition variable for all transaction locking instead of the default
+// * mutex/condvar implementation.
+// *
+// * @param transactionDbMutexFactory the mutex factory for the transactions
+// *
+// * @return this TransactionDBOptions instance
+// */
+// public TransactionDBOptions setCustomMutexFactory(
+// final TransactionDBMutexFactory transactionDbMutexFactory) {
+//
+// }
+
+ /**
+ * The policy for when to write the data into the DB. The default policy is to
+ * write only the committed data {@link TxnDBWritePolicy#WRITE_COMMITTED}.
+ * The data could be written before the commit phase. The DB then needs to
+ * provide the mechanisms to tell apart committed from uncommitted data.
+ *
+ * @return The write policy.
+ */
+ public TxnDBWritePolicy getWritePolicy() {
+ assert(isOwningHandle());
+ return TxnDBWritePolicy.getTxnDBWritePolicy(getWritePolicy(nativeHandle_));
+ }
+
+ /**
+ * The policy for when to write the data into the DB. The default policy is to
+ * write only the committed data {@link TxnDBWritePolicy#WRITE_COMMITTED}.
+ * The data could be written before the commit phase. The DB then needs to
+ * provide the mechanisms to tell apart committed from uncommitted data.
+ *
+ * @param writePolicy The write policy.
+ *
+ * @return this TransactionDBOptions instance
+ */
+ public TransactionDBOptions setWritePolicy(
+ final TxnDBWritePolicy writePolicy) {
+ assert(isOwningHandle());
+ setWritePolicy(nativeHandle_, writePolicy.getValue());
+ return this;
+ }
+
+ private native static long newTransactionDBOptions();
+ private native long getMaxNumLocks(final long handle);
+ private native void setMaxNumLocks(final long handle,
+ final long maxNumLocks);
+ private native long getNumStripes(final long handle);
+ private native void setNumStripes(final long handle, final long numStripes);
+ private native long getTransactionLockTimeout(final long handle);
+ private native void setTransactionLockTimeout(final long handle,
+ final long transactionLockTimeout);
+ private native long getDefaultLockTimeout(final long handle);
+ private native void setDefaultLockTimeout(final long handle,
+ final long transactionLockTimeout);
+ private native byte getWritePolicy(final long handle);
+ private native void setWritePolicy(final long handle, final byte writePolicy);
+ @Override protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TransactionLogIterator.java b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionLogIterator.java
new file mode 100644
index 000000000..5d9ec58d7
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionLogIterator.java
@@ -0,0 +1,112 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+/**
+ * <p>A TransactionLogIterator is used to iterate over the transactions in a db.
+ * One run of the iterator is continuous, i.e. the iterator will stop at the
+ * beginning of any gap in sequences.</p>
+ */
+public class TransactionLogIterator extends RocksObject {
+
+ /**
+ * <p>An iterator is either positioned at a WriteBatch
+ * or not valid. This method returns true if the iterator
+ * is valid. Can read data from a valid iterator.</p>
+ *
+ * @return true if iterator position is valid.
+ */
+ public boolean isValid() {
+ return isValid(nativeHandle_);
+ }
+
+ /**
+ * <p>Moves the iterator to the next WriteBatch.
+ * <strong>REQUIRES</strong>: Valid() to be true.</p>
+ */
+ public void next() {
+ next(nativeHandle_);
+ }
+
+ /**
+ * <p>Throws RocksDBException if something went wrong.</p>
+ *
+ * @throws org.rocksdb.RocksDBException if something went
+ * wrong in the underlying C++ code.
+ */
+ public void status() throws RocksDBException {
+ status(nativeHandle_);
+ }
+
+ /**
+ * <p>If iterator position is valid, return the current
+ * write_batch and the sequence number of the earliest
+ * transaction contained in the batch.</p>
+ *
+ * <p>ONLY use if Valid() is true and status() is OK.</p>
+ *
+ * @return {@link org.rocksdb.TransactionLogIterator.BatchResult}
+ * instance.
+ */
+ public BatchResult getBatch() {
+ assert(isValid());
+ return getBatch(nativeHandle_);
+ }
+
+ /**
+ * <p>TransactionLogIterator constructor.</p>
+ *
+ * @param nativeHandle address to native address.
+ */
+ TransactionLogIterator(final long nativeHandle) {
+ super(nativeHandle);
+ }
+
+ /**
+ * <p>BatchResult represents a data structure returned
+ * by a TransactionLogIterator containing a sequence
+ * number and a {@link WriteBatch} instance.</p>
+ */
+ public static final class BatchResult {
+ /**
+ * <p>Constructor of BatchResult class.</p>
+ *
+ * @param sequenceNumber related to this BatchResult instance.
+ * @param nativeHandle to {@link org.rocksdb.WriteBatch}
+ * native instance.
+ */
+ public BatchResult(final long sequenceNumber,
+ final long nativeHandle) {
+ sequenceNumber_ = sequenceNumber;
+ writeBatch_ = new WriteBatch(nativeHandle, true);
+ }
+
+ /**
+ * <p>Return sequence number related to this BatchResult.</p>
+ *
+ * @return Sequence number.
+ */
+ public long sequenceNumber() {
+ return sequenceNumber_;
+ }
+
+ /**
+ * <p>Return contained {@link org.rocksdb.WriteBatch}
+ * instance</p>
+ *
+ * @return {@link org.rocksdb.WriteBatch} instance.
+ */
+ public WriteBatch writeBatch() {
+ return writeBatch_;
+ }
+
+ private final long sequenceNumber_;
+ private final WriteBatch writeBatch_;
+ }
+
+ @Override protected final native void disposeInternal(final long handle);
+ private native boolean isValid(long handle);
+ private native void next(long handle);
+ private native void status(long handle)
+ throws RocksDBException;
+ private native BatchResult getBatch(long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TransactionOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionOptions.java
new file mode 100644
index 000000000..195fc85e4
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionOptions.java
@@ -0,0 +1,189 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+public class TransactionOptions extends RocksObject
+ implements TransactionalOptions<TransactionOptions> {
+
+ public TransactionOptions() {
+ super(newTransactionOptions());
+ }
+
+ @Override
+ public boolean isSetSnapshot() {
+ assert(isOwningHandle());
+ return isSetSnapshot(nativeHandle_);
+ }
+
+ @Override
+ public TransactionOptions setSetSnapshot(final boolean setSnapshot) {
+ assert(isOwningHandle());
+ setSetSnapshot(nativeHandle_, setSnapshot);
+ return this;
+ }
+
+ /**
+ * True means that before acquiring locks, this transaction will
+ * check if doing so will cause a deadlock. If so, it will return with
+ * {@link Status.Code#Busy}. The user should retry their transaction.
+ *
+ * @return true if a deadlock is detected.
+ */
+ public boolean isDeadlockDetect() {
+ assert(isOwningHandle());
+ return isDeadlockDetect(nativeHandle_);
+ }
+
+ /**
+ * Setting to true means that before acquiring locks, this transaction will
+ * check if doing so will cause a deadlock. If so, it will return with
+ * {@link Status.Code#Busy}. The user should retry their transaction.
+ *
+ * @param deadlockDetect true if we should detect deadlocks.
+ *
+ * @return this TransactionOptions instance
+ */
+ public TransactionOptions setDeadlockDetect(final boolean deadlockDetect) {
+ assert(isOwningHandle());
+ setDeadlockDetect(nativeHandle_, deadlockDetect);
+ return this;
+ }
+
+ /**
+ * The wait timeout in milliseconds when a transaction attempts to lock a key.
+ *
+ * If 0, no waiting is done if a lock cannot instantly be acquired.
+ * If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)}
+ * will be used
+ *
+ * @return the lock timeout in milliseconds
+ */
+ public long getLockTimeout() {
+ assert(isOwningHandle());
+ return getLockTimeout(nativeHandle_);
+ }
+
+ /**
+ * If positive, specifies the wait timeout in milliseconds when
+ * a transaction attempts to lock a key.
+ *
+ * If 0, no waiting is done if a lock cannot instantly be acquired.
+ * If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)}
+ * will be used
+ *
+ * Default: -1
+ *
+ * @param lockTimeout the lock timeout in milliseconds
+ *
+ * @return this TransactionOptions instance
+ */
+ public TransactionOptions setLockTimeout(final long lockTimeout) {
+ assert(isOwningHandle());
+ setLockTimeout(nativeHandle_, lockTimeout);
+ return this;
+ }
+
+ /**
+ * Expiration duration in milliseconds.
+ *
+ * If non-negative, transactions that last longer than this many milliseconds
+ * will fail to commit. If not set, a forgotten transaction that is never
+ * committed, rolled back, or deleted will never relinquish any locks it
+ * holds. This could prevent keys from being written by other writers.
+ *
+ * @return expiration the expiration duration in milliseconds
+ */
+ public long getExpiration() {
+ assert(isOwningHandle());
+ return getExpiration(nativeHandle_);
+ }
+
+ /**
+ * Expiration duration in milliseconds.
+ *
+ * If non-negative, transactions that last longer than this many milliseconds
+ * will fail to commit. If not set, a forgotten transaction that is never
+ * committed, rolled back, or deleted will never relinquish any locks it
+ * holds. This could prevent keys from being written by other writers.
+ *
+ * Default: -1
+ *
+ * @param expiration the expiration duration in milliseconds
+ *
+ * @return this TransactionOptions instance
+ */
+ public TransactionOptions setExpiration(final long expiration) {
+ assert(isOwningHandle());
+ setExpiration(nativeHandle_, expiration);
+ return this;
+ }
+
+ /**
+ * Gets the number of traversals to make during deadlock detection.
+ *
+ * @return the number of traversals to make during
+ * deadlock detection
+ */
+ public long getDeadlockDetectDepth() {
+ return getDeadlockDetectDepth(nativeHandle_);
+ }
+
+ /**
+ * Sets the number of traversals to make during deadlock detection.
+ *
+ * Default: 50
+ *
+ * @param deadlockDetectDepth the number of traversals to make during
+ * deadlock detection
+ *
+ * @return this TransactionOptions instance
+ */
+ public TransactionOptions setDeadlockDetectDepth(
+ final long deadlockDetectDepth) {
+ setDeadlockDetectDepth(nativeHandle_, deadlockDetectDepth);
+ return this;
+ }
+
+ /**
+ * Get the maximum number of bytes that may be used for the write batch.
+ *
+ * @return the maximum number of bytes, 0 means no limit.
+ */
+ public long getMaxWriteBatchSize() {
+ return getMaxWriteBatchSize(nativeHandle_);
+ }
+
+ /**
+ * Set the maximum number of bytes that may be used for the write batch.
+ *
+ * @param maxWriteBatchSize the maximum number of bytes, 0 means no limit.
+ *
+ * @return this TransactionOptions instance
+ */
+ public TransactionOptions setMaxWriteBatchSize(final long maxWriteBatchSize) {
+ setMaxWriteBatchSize(nativeHandle_, maxWriteBatchSize);
+ return this;
+ }
+
+ private native static long newTransactionOptions();
+ private native boolean isSetSnapshot(final long handle);
+ private native void setSetSnapshot(final long handle,
+ final boolean setSnapshot);
+ private native boolean isDeadlockDetect(final long handle);
+ private native void setDeadlockDetect(final long handle,
+ final boolean deadlockDetect);
+ private native long getLockTimeout(final long handle);
+ private native void setLockTimeout(final long handle, final long lockTimeout);
+ private native long getExpiration(final long handle);
+ private native void setExpiration(final long handle, final long expiration);
+ private native long getDeadlockDetectDepth(final long handle);
+ private native void setDeadlockDetectDepth(final long handle,
+ final long deadlockDetectDepth);
+ private native long getMaxWriteBatchSize(final long handle);
+ private native void setMaxWriteBatchSize(final long handle,
+ final long maxWriteBatchSize);
+ @Override protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TransactionalDB.java b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionalDB.java
new file mode 100644
index 000000000..3f0eceda8
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionalDB.java
@@ -0,0 +1,68 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+
+interface TransactionalDB<T extends TransactionalOptions>
+ extends AutoCloseable {
+
+ /**
+ * Starts a new Transaction.
+ *
+ * Caller is responsible for calling {@link #close()} on the returned
+ * transaction when it is no longer needed.
+ *
+ * @param writeOptions Any write options for the transaction
+ * @return a new transaction
+ */
+ Transaction beginTransaction(final WriteOptions writeOptions);
+
+ /**
+ * Starts a new Transaction.
+ *
+ * Caller is responsible for calling {@link #close()} on the returned
+ * transaction when it is no longer needed.
+ *
+ * @param writeOptions Any write options for the transaction
+ * @param transactionOptions Any options for the transaction
+ * @return a new transaction
+ */
+ Transaction beginTransaction(final WriteOptions writeOptions,
+ final T transactionOptions);
+
+ /**
+ * Starts a new Transaction.
+ *
+ * Caller is responsible for calling {@link #close()} on the returned
+ * transaction when it is no longer needed.
+ *
+ * @param writeOptions Any write options for the transaction
+ * @param oldTransaction this Transaction will be reused instead of allocating
+ * a new one. This is an optimization to avoid extra allocations
+ * when repeatedly creating transactions.
+ * @return The oldTransaction which has been reinitialized as a new
+ * transaction
+ */
+ Transaction beginTransaction(final WriteOptions writeOptions,
+ final Transaction oldTransaction);
+
+ /**
+ * Starts a new Transaction.
+ *
+ * Caller is responsible for calling {@link #close()} on the returned
+ * transaction when it is no longer needed.
+ *
+ * @param writeOptions Any write options for the transaction
+ * @param transactionOptions Any options for the transaction
+ * @param oldTransaction this Transaction will be reused instead of allocating
+ * a new one. This is an optimization to avoid extra allocations
+ * when repeatedly creating transactions.
+ * @return The oldTransaction which has been reinitialized as a new
+ * transaction
+ */
+ Transaction beginTransaction(final WriteOptions writeOptions,
+ final T transactionOptions, final Transaction oldTransaction);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TransactionalOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionalOptions.java
new file mode 100644
index 000000000..d55ee900c
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionalOptions.java
@@ -0,0 +1,31 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+
+interface TransactionalOptions<T extends TransactionalOptions<T>>
+ extends AutoCloseable {
+
+ /**
+ * True indicates snapshots will be set, just like if
+ * {@link Transaction#setSnapshot()} had been called
+ *
+ * @return whether a snapshot will be set
+ */
+ boolean isSetSnapshot();
+
+ /**
+ * Setting the setSnapshot to true is the same as calling
+ * {@link Transaction#setSnapshot()}.
+ *
+ * Default: false
+ *
+ * @param setSnapshot Whether to set a snapshot
+ *
+ * @return this TransactionalOptions instance
+ */
+ T setSetSnapshot(final boolean setSnapshot);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TtlDB.java b/src/rocksdb/java/src/main/java/org/rocksdb/TtlDB.java
new file mode 100644
index 000000000..c1e3bb473
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/TtlDB.java
@@ -0,0 +1,245 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.List;
+
+/**
+ * Database with TTL support.
+ *
+ * <p><strong>Use case</strong></p>
+ * <p>This API should be used to open the db when key-values inserted are
+ * meant to be removed from the db in a non-strict 'ttl' amount of time
+ * Therefore, this guarantees that key-values inserted will remain in the
+ * db for &gt;= ttl amount of time and the db will make efforts to remove the
+ * key-values as soon as possible after ttl seconds of their insertion.
+ * </p>
+ *
+ * <p><strong>Behaviour</strong></p>
+ * <p>TTL is accepted in seconds
+ * (int32_t)Timestamp(creation) is suffixed to values in Put internally
+ * Expired TTL values deleted in compaction only:(Timestamp+ttl&lt;time_now)
+ * Get/Iterator may return expired entries(compaction not run on them yet)
+ * Different TTL may be used during different Opens
+ * </p>
+ *
+ * <p><strong>Example</strong></p>
+ * <ul>
+ * <li>Open1 at t=0 with ttl=4 and insert k1,k2, close at t=2</li>
+ * <li>Open2 at t=3 with ttl=5. Now k1,k2 should be deleted at t&gt;=5</li>
+ * </ul>
+ *
+ * <p>
+ * read_only=true opens in the usual read-only mode. Compactions will not be
+ * triggered(neither manual nor automatic), so no expired entries removed
+ * </p>
+ *
+ * <p><strong>Constraints</strong></p>
+ * <p>Not specifying/passing or non-positive TTL behaves
+ * like TTL = infinity</p>
+ *
+ * <p><strong>!!!WARNING!!!</strong></p>
+ * <p>Calling DB::Open directly to re-open a db created by this API will get
+ * corrupt values(timestamp suffixed) and no ttl effect will be there
+ * during the second Open, so use this API consistently to open the db
+ * Be careful when passing ttl with a small positive value because the
+ * whole database may be deleted in a small amount of time.</p>
+ */
+public class TtlDB extends RocksDB {
+
+ /**
+ * <p>Opens a TtlDB.</p>
+ *
+ * <p>Database is opened in read-write mode without default TTL.</p>
+ *
+ * @param options {@link org.rocksdb.Options} instance.
+ * @param db_path path to database.
+ *
+ * @return TtlDB instance.
+ *
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ public static TtlDB open(final Options options, final String db_path)
+ throws RocksDBException {
+ return open(options, db_path, 0, false);
+ }
+
+ /**
+ * <p>Opens a TtlDB.</p>
+ *
+ * @param options {@link org.rocksdb.Options} instance.
+ * @param db_path path to database.
+ * @param ttl time to live for new entries.
+ * @param readOnly boolean value indicating if database if db is
+ * opened read-only.
+ *
+ * @return TtlDB instance.
+ *
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ public static TtlDB open(final Options options, final String db_path,
+ final int ttl, final boolean readOnly) throws RocksDBException {
+ return new TtlDB(open(options.nativeHandle_, db_path, ttl, readOnly));
+ }
+
+ /**
+ * <p>Opens a TtlDB.</p>
+ *
+ * @param options {@link org.rocksdb.Options} instance.
+ * @param db_path path to database.
+ * @param columnFamilyDescriptors list of column family descriptors
+ * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances
+ * on open.
+ * @param ttlValues time to live values per column family handle
+ * @param readOnly boolean value indicating if database if db is
+ * opened read-only.
+ *
+ * @return TtlDB instance.
+ *
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ * @throws java.lang.IllegalArgumentException when there is not a ttl value
+ * per given column family handle.
+ */
+ public static TtlDB open(final DBOptions options, final String db_path,
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors,
+ final List<ColumnFamilyHandle> columnFamilyHandles,
+ final List<Integer> ttlValues, final boolean readOnly)
+ throws RocksDBException {
+ if (columnFamilyDescriptors.size() != ttlValues.size()) {
+ throw new IllegalArgumentException("There must be a ttl value per column"
+ + "family handle.");
+ }
+
+ final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][];
+ final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()];
+ for (int i = 0; i < columnFamilyDescriptors.size(); i++) {
+ final ColumnFamilyDescriptor cfDescriptor =
+ columnFamilyDescriptors.get(i);
+ cfNames[i] = cfDescriptor.getName();
+ cfOptionHandles[i] = cfDescriptor.getOptions().nativeHandle_;
+ }
+
+ final int ttlVals[] = new int[ttlValues.size()];
+ for(int i = 0; i < ttlValues.size(); i++) {
+ ttlVals[i] = ttlValues.get(i);
+ }
+ final long[] handles = openCF(options.nativeHandle_, db_path,
+ cfNames, cfOptionHandles, ttlVals, readOnly);
+
+ final TtlDB ttlDB = new TtlDB(handles[0]);
+ for (int i = 1; i < handles.length; i++) {
+ columnFamilyHandles.add(new ColumnFamilyHandle(ttlDB, handles[i]));
+ }
+ return ttlDB;
+ }
+
+ /**
+ * <p>Close the TtlDB instance and release resource.</p>
+ *
+ * This is similar to {@link #close()} except that it
+ * throws an exception if any error occurs.
+ *
+ * This will not fsync the WAL files.
+ * If syncing is required, the caller must first call {@link #syncWal()}
+ * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
+ * with {@link WriteOptions#setSync(boolean)} set to true.
+ *
+ * See also {@link #close()}.
+ *
+ * @throws RocksDBException if an error occurs whilst closing.
+ */
+ public void closeE() throws RocksDBException {
+ if (owningHandle_.compareAndSet(true, false)) {
+ try {
+ closeDatabase(nativeHandle_);
+ } finally {
+ disposeInternal();
+ }
+ }
+ }
+
+ /**
+ * <p>Close the TtlDB instance and release resource.</p>
+ *
+ *
+ * This will not fsync the WAL files.
+ * If syncing is required, the caller must first call {@link #syncWal()}
+ * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
+ * with {@link WriteOptions#setSync(boolean)} set to true.
+ *
+ * See also {@link #close()}.
+ */
+ @Override
+ public void close() {
+ if (owningHandle_.compareAndSet(true, false)) {
+ try {
+ closeDatabase(nativeHandle_);
+ } catch (final RocksDBException e) {
+ // silently ignore the error report
+ } finally {
+ disposeInternal();
+ }
+ }
+ }
+
+ /**
+ * <p>Creates a new ttl based column family with a name defined
+ * in given ColumnFamilyDescriptor and allocates a
+ * ColumnFamilyHandle within an internal structure.</p>
+ *
+ * <p>The ColumnFamilyHandle is automatically disposed with DB
+ * disposal.</p>
+ *
+ * @param columnFamilyDescriptor column family to be created.
+ * @param ttl TTL to set for this column family.
+ *
+ * @return {@link org.rocksdb.ColumnFamilyHandle} instance.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public ColumnFamilyHandle createColumnFamilyWithTtl(
+ final ColumnFamilyDescriptor columnFamilyDescriptor,
+ final int ttl) throws RocksDBException {
+ return new ColumnFamilyHandle(this,
+ createColumnFamilyWithTtl(nativeHandle_,
+ columnFamilyDescriptor.getName(),
+ columnFamilyDescriptor.getOptions().nativeHandle_, ttl));
+ }
+
+ /**
+ * <p>A protected constructor that will be used in the static
+ * factory method
+ * {@link #open(Options, String, int, boolean)}
+ * and
+ * {@link #open(DBOptions, String, java.util.List, java.util.List,
+ * java.util.List, boolean)}.
+ * </p>
+ *
+ * @param nativeHandle The native handle of the C++ TtlDB object
+ */
+ protected TtlDB(final long nativeHandle) {
+ super(nativeHandle);
+ }
+
+ @Override protected native void disposeInternal(final long handle);
+
+ private native static long open(final long optionsHandle,
+ final String db_path, final int ttl, final boolean readOnly)
+ throws RocksDBException;
+ private native static long[] openCF(final long optionsHandle,
+ final String db_path, final byte[][] columnFamilyNames,
+ final long[] columnFamilyOptions, final int[] ttlValues,
+ final boolean readOnly) throws RocksDBException;
+ private native long createColumnFamilyWithTtl(final long handle,
+ final byte[] columnFamilyName, final long columnFamilyOptions, int ttl)
+ throws RocksDBException;
+ private native static void closeDatabase(final long handle)
+ throws RocksDBException;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java b/src/rocksdb/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java
new file mode 100644
index 000000000..837ce6157
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java
@@ -0,0 +1,62 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+/**
+ * The transaction db write policy.
+ */
+public enum TxnDBWritePolicy {
+ /**
+ * Write only the committed data.
+ */
+ WRITE_COMMITTED((byte)0x00),
+
+ /**
+ * Write data after the prepare phase of 2pc.
+ */
+ WRITE_PREPARED((byte)0x1),
+
+ /**
+ * Write data before the prepare phase of 2pc.
+ */
+ WRITE_UNPREPARED((byte)0x2);
+
+ private byte value;
+
+ TxnDBWritePolicy(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * <p>Returns the byte value of the enumerations value.</p>
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value;
+ }
+
+ /**
+ * <p>Get the TxnDBWritePolicy enumeration value by
+ * passing the byte identifier to this method.</p>
+ *
+ * @param byteIdentifier of TxnDBWritePolicy.
+ *
+ * @return TxnDBWritePolicy instance.
+ *
+ * @throws IllegalArgumentException If TxnDBWritePolicy cannot be found for
+ * the provided byteIdentifier
+ */
+ public static TxnDBWritePolicy getTxnDBWritePolicy(final byte byteIdentifier) {
+ for (final TxnDBWritePolicy txnDBWritePolicy : TxnDBWritePolicy.values()) {
+ if (txnDBWritePolicy.getValue() == byteIdentifier) {
+ return txnDBWritePolicy;
+ }
+ }
+
+ throw new IllegalArgumentException(
+ "Illegal value provided for TxnDBWritePolicy.");
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/UInt64AddOperator.java b/src/rocksdb/java/src/main/java/org/rocksdb/UInt64AddOperator.java
new file mode 100644
index 000000000..cce9b298d
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/UInt64AddOperator.java
@@ -0,0 +1,19 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Uint64AddOperator is a merge operator that accumlates a long
+ * integer value.
+ */
+public class UInt64AddOperator extends MergeOperator {
+ public UInt64AddOperator() {
+ super(newSharedUInt64AddOperator());
+ }
+
+ private native static long newSharedUInt64AddOperator();
+ @Override protected final native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/VectorMemTableConfig.java b/src/rocksdb/java/src/main/java/org/rocksdb/VectorMemTableConfig.java
new file mode 100644
index 000000000..fb1e7a948
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/VectorMemTableConfig.java
@@ -0,0 +1,46 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+/**
+ * The config for vector memtable representation.
+ */
+public class VectorMemTableConfig extends MemTableConfig {
+ public static final int DEFAULT_RESERVED_SIZE = 0;
+
+ /**
+ * VectorMemTableConfig constructor
+ */
+ public VectorMemTableConfig() {
+ reservedSize_ = DEFAULT_RESERVED_SIZE;
+ }
+
+ /**
+ * Set the initial size of the vector that will be used
+ * by the memtable created based on this config.
+ *
+ * @param size the initial size of the vector.
+ * @return the reference to the current config.
+ */
+ public VectorMemTableConfig setReservedSize(final int size) {
+ reservedSize_ = size;
+ return this;
+ }
+
+ /**
+ * Returns the initial size of the vector used by the memtable
+ * created based on this config.
+ *
+ * @return the initial size of the vector.
+ */
+ public int reservedSize() {
+ return reservedSize_;
+ }
+
+ @Override protected long newMemTableFactoryHandle() {
+ return newMemTableFactoryHandle(reservedSize_);
+ }
+
+ private native long newMemTableFactoryHandle(long reservedSize)
+ throws IllegalArgumentException;
+ private int reservedSize_;
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WALRecoveryMode.java b/src/rocksdb/java/src/main/java/org/rocksdb/WALRecoveryMode.java
new file mode 100644
index 000000000..d8b9eeced
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/WALRecoveryMode.java
@@ -0,0 +1,83 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * The WAL Recover Mode
+ */
+public enum WALRecoveryMode {
+
+ /**
+ * Original levelDB recovery
+ *
+ * We tolerate incomplete record in trailing data on all logs
+ * Use case : This is legacy behavior (default)
+ */
+ TolerateCorruptedTailRecords((byte)0x00),
+
+ /**
+ * Recover from clean shutdown
+ *
+ * We don't expect to find any corruption in the WAL
+ * Use case : This is ideal for unit tests and rare applications that
+ * can require high consistency guarantee
+ */
+ AbsoluteConsistency((byte)0x01),
+
+ /**
+ * Recover to point-in-time consistency
+ * We stop the WAL playback on discovering WAL inconsistency
+ * Use case : Ideal for systems that have disk controller cache like
+ * hard disk, SSD without super capacitor that store related data
+ */
+ PointInTimeRecovery((byte)0x02),
+
+ /**
+ * Recovery after a disaster
+ * We ignore any corruption in the WAL and try to salvage as much data as
+ * possible
+ * Use case : Ideal for last ditch effort to recover data or systems that
+ * operate with low grade unrelated data
+ */
+ SkipAnyCorruptedRecords((byte)0x03);
+
+ private byte value;
+
+ WALRecoveryMode(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * <p>Returns the byte value of the enumerations value.</p>
+ *
+ * @return byte representation
+ */
+ public byte getValue() {
+ return value;
+ }
+
+ /**
+ * <p>Get the WALRecoveryMode enumeration value by
+ * passing the byte identifier to this method.</p>
+ *
+ * @param byteIdentifier of WALRecoveryMode.
+ *
+ * @return WALRecoveryMode instance.
+ *
+ * @throws IllegalArgumentException If WALRecoveryMode cannot be found for the
+ * provided byteIdentifier
+ */
+ public static WALRecoveryMode getWALRecoveryMode(final byte byteIdentifier) {
+ for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) {
+ if (walRecoveryMode.getValue() == byteIdentifier) {
+ return walRecoveryMode;
+ }
+ }
+
+ throw new IllegalArgumentException(
+ "Illegal value provided for WALRecoveryMode.");
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WBWIRocksIterator.java b/src/rocksdb/java/src/main/java/org/rocksdb/WBWIRocksIterator.java
new file mode 100644
index 000000000..60922ae4b
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/WBWIRocksIterator.java
@@ -0,0 +1,197 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.ByteBuffer;
+
+public class WBWIRocksIterator
+ extends AbstractRocksIterator<WriteBatchWithIndex> {
+ private final WriteEntry entry = new WriteEntry();
+
+ protected WBWIRocksIterator(final WriteBatchWithIndex wbwi,
+ final long nativeHandle) {
+ super(wbwi, nativeHandle);
+ }
+
+ /**
+ * Get the current entry
+ *
+ * The WriteEntry is only valid
+ * until the iterator is repositioned.
+ * If you want to keep the WriteEntry across iterator
+ * movements, you must make a copy of its data!
+ *
+ * Note - This method is not thread-safe with respect to the WriteEntry
+ * as it performs a non-atomic update across the fields of the WriteEntry
+ *
+ * @return The WriteEntry of the current entry
+ */
+ public WriteEntry entry() {
+ assert(isOwningHandle());
+ final long ptrs[] = entry1(nativeHandle_);
+
+ entry.type = WriteType.fromId((byte)ptrs[0]);
+ entry.key.resetNativeHandle(ptrs[1], ptrs[1] != 0);
+ entry.value.resetNativeHandle(ptrs[2], ptrs[2] != 0);
+
+ return entry;
+ }
+
+ @Override protected final native void disposeInternal(final long handle);
+ @Override final native boolean isValid0(long handle);
+ @Override final native void seekToFirst0(long handle);
+ @Override final native void seekToLast0(long handle);
+ @Override final native void next0(long handle);
+ @Override final native void prev0(long handle);
+ @Override final native void seek0(long handle, byte[] target, int targetLen);
+ @Override final native void seekForPrev0(long handle, byte[] target, int targetLen);
+ @Override final native void status0(long handle) throws RocksDBException;
+ @Override
+ final native void seekDirect0(long handle, ByteBuffer target, int targetOffset, int targetLen);
+
+ private native long[] entry1(final long handle);
+
+ /**
+ * Enumeration of the Write operation
+ * that created the record in the Write Batch
+ */
+ public enum WriteType {
+ PUT((byte)0x0),
+ MERGE((byte)0x1),
+ DELETE((byte)0x2),
+ SINGLE_DELETE((byte)0x3),
+ DELETE_RANGE((byte)0x4),
+ LOG((byte)0x5),
+ XID((byte)0x6);
+
+ final byte id;
+ WriteType(final byte id) {
+ this.id = id;
+ }
+
+ public static WriteType fromId(final byte id) {
+ for(final WriteType wt : WriteType.values()) {
+ if(id == wt.id) {
+ return wt;
+ }
+ }
+ throw new IllegalArgumentException("No WriteType with id=" + id);
+ }
+ }
+
+ @Override
+ public void close() {
+ entry.close();
+ super.close();
+ }
+
+ /**
+ * Represents an entry returned by
+ * {@link org.rocksdb.WBWIRocksIterator#entry()}
+ *
+ * It is worth noting that a WriteEntry with
+ * the type {@link org.rocksdb.WBWIRocksIterator.WriteType#DELETE}
+ * or {@link org.rocksdb.WBWIRocksIterator.WriteType#LOG}
+ * will not have a value.
+ */
+ public static class WriteEntry implements AutoCloseable {
+ WriteType type = null;
+ final DirectSlice key;
+ final DirectSlice value;
+
+ /**
+ * Intentionally private as this
+ * should only be instantiated in
+ * this manner by the outer WBWIRocksIterator
+ * class; The class members are then modified
+ * by calling {@link org.rocksdb.WBWIRocksIterator#entry()}
+ */
+ private WriteEntry() {
+ key = new DirectSlice();
+ value = new DirectSlice();
+ }
+
+ public WriteEntry(final WriteType type, final DirectSlice key,
+ final DirectSlice value) {
+ this.type = type;
+ this.key = key;
+ this.value = value;
+ }
+
+ /**
+ * Returns the type of the Write Entry
+ *
+ * @return the WriteType of the WriteEntry
+ */
+ public WriteType getType() {
+ return type;
+ }
+
+ /**
+ * Returns the key of the Write Entry
+ *
+ * @return The slice containing the key
+ * of the WriteEntry
+ */
+ public DirectSlice getKey() {
+ return key;
+ }
+
+ /**
+ * Returns the value of the Write Entry
+ *
+ * @return The slice containing the value of
+ * the WriteEntry or null if the WriteEntry has
+ * no value
+ */
+ public DirectSlice getValue() {
+ if(!value.isOwningHandle()) {
+ return null; //TODO(AR) migrate to JDK8 java.util.Optional#empty()
+ } else {
+ return value;
+ }
+ }
+
+ /**
+ * Generates a hash code for the Write Entry. NOTE: The hash code is based
+ * on the string representation of the key, so it may not work correctly
+ * with exotic custom comparators.
+ *
+ * @return The hash code for the Write Entry
+ */
+ @Override
+ public int hashCode() {
+ return (key == null) ? 0 : key.hashCode();
+ }
+
+ @Override
+ public boolean equals(final Object other) {
+ if(other == null) {
+ return false;
+ } else if (this == other) {
+ return true;
+ } else if(other instanceof WriteEntry) {
+ final WriteEntry otherWriteEntry = (WriteEntry)other;
+ return type.equals(otherWriteEntry.type)
+ && key.equals(otherWriteEntry.key)
+ && value.equals(otherWriteEntry.value);
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ public void close() {
+ value.close();
+ key.close();
+ }
+ }
+
+ @Override
+ void seekForPrevDirect0(long handle, ByteBuffer target, int targetOffset, int targetLen) {
+ throw new IllegalAccessError("Not implemented");
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WalFileType.java b/src/rocksdb/java/src/main/java/org/rocksdb/WalFileType.java
new file mode 100644
index 000000000..fed27ed11
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/WalFileType.java
@@ -0,0 +1,55 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+public enum WalFileType {
+ /**
+ * Indicates that WAL file is in archive directory. WAL files are moved from
+ * the main db directory to archive directory once they are not live and stay
+ * there until cleaned up. Files are cleaned depending on archive size
+ * (Options::WAL_size_limit_MB) and time since last cleaning
+ * (Options::WAL_ttl_seconds).
+ */
+ kArchivedLogFile((byte)0x0),
+
+ /**
+ * Indicates that WAL file is live and resides in the main db directory
+ */
+ kAliveLogFile((byte)0x1);
+
+ private final byte value;
+
+ WalFileType(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the internal representation value.
+ *
+ * @return the internal representation value
+ */
+ byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get the WalFileType from the internal representation value.
+ *
+ * @return the wal file type.
+ *
+ * @throws IllegalArgumentException if the value is unknown.
+ */
+ static WalFileType fromValue(final byte value) {
+ for (final WalFileType walFileType : WalFileType.values()) {
+ if(walFileType.value == value) {
+ return walFileType;
+ }
+ }
+
+ throw new IllegalArgumentException(
+ "Illegal value provided for WalFileType: " + value);
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WalFilter.java b/src/rocksdb/java/src/main/java/org/rocksdb/WalFilter.java
new file mode 100644
index 000000000..37e36213a
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/WalFilter.java
@@ -0,0 +1,87 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.Map;
+
+/**
+ * WALFilter allows an application to inspect write-ahead-log (WAL)
+ * records or modify their processing on recovery.
+ */
+public interface WalFilter {
+
+ /**
+ * Provide ColumnFamily-&gt;LogNumber map to filter
+ * so that filter can determine whether a log number applies to a given
+ * column family (i.e. that log hasn't been flushed to SST already for the
+ * column family).
+ *
+ * We also pass in name&gt;id map as only name is known during
+ * recovery (as handles are opened post-recovery).
+ * while write batch callbacks happen in terms of column family id.
+ *
+ * @param cfLognumber column_family_id to lognumber map
+ * @param cfNameId column_family_name to column_family_id map
+ */
+ void columnFamilyLogNumberMap(final Map<Integer, Long> cfLognumber,
+ final Map<String, Integer> cfNameId);
+
+ /**
+ * LogRecord is invoked for each log record encountered for all the logs
+ * during replay on logs on recovery. This method can be used to:
+ * * inspect the record (using the batch parameter)
+ * * ignoring current record
+ * (by returning WalProcessingOption::kIgnoreCurrentRecord)
+ * * reporting corrupted record
+ * (by returning WalProcessingOption::kCorruptedRecord)
+ * * stop log replay
+ * (by returning kStop replay) - please note that this implies
+ * discarding the logs from current record onwards.
+ *
+ * @param logNumber log number of the current log.
+ * Filter might use this to determine if the log
+ * record is applicable to a certain column family.
+ * @param logFileName log file name - only for informational purposes
+ * @param batch batch encountered in the log during recovery
+ * @param newBatch new batch to populate if filter wants to change
+ * the batch (for example to filter some records out, or alter some
+ * records). Please note that the new batch MUST NOT contain
+ * more records than original, else recovery would be failed.
+ *
+ * @return Processing option for the current record.
+ */
+ LogRecordFoundResult logRecordFound(final long logNumber,
+ final String logFileName, final WriteBatch batch,
+ final WriteBatch newBatch);
+
+ class LogRecordFoundResult {
+ public static LogRecordFoundResult CONTINUE_UNCHANGED =
+ new LogRecordFoundResult(WalProcessingOption.CONTINUE_PROCESSING, false);
+
+ final WalProcessingOption walProcessingOption;
+ final boolean batchChanged;
+
+ /**
+ * @param walProcessingOption the processing option
+ * @param batchChanged Whether batch was changed by the filter.
+ * It must be set to true if newBatch was populated,
+ * else newBatch has no effect.
+ */
+ public LogRecordFoundResult(final WalProcessingOption walProcessingOption,
+ final boolean batchChanged) {
+ this.walProcessingOption = walProcessingOption;
+ this.batchChanged = batchChanged;
+ }
+ }
+
+ /**
+ * Returns a name that identifies this WAL filter.
+ * The name will be printed to LOG file on start up for diagnosis.
+ *
+ * @return the name
+ */
+ String name();
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WalProcessingOption.java b/src/rocksdb/java/src/main/java/org/rocksdb/WalProcessingOption.java
new file mode 100644
index 000000000..889602edc
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/WalProcessingOption.java
@@ -0,0 +1,54 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+public enum WalProcessingOption {
+ /**
+ * Continue processing as usual.
+ */
+ CONTINUE_PROCESSING((byte)0x0),
+
+ /**
+ * Ignore the current record but continue processing of log(s).
+ */
+ IGNORE_CURRENT_RECORD((byte)0x1),
+
+ /**
+ * Stop replay of logs and discard logs.
+ * Logs won't be replayed on subsequent recovery.
+ */
+ STOP_REPLAY((byte)0x2),
+
+ /**
+ * Corrupted record detected by filter.
+ */
+ CORRUPTED_RECORD((byte)0x3);
+
+ private final byte value;
+
+ WalProcessingOption(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the internal representation.
+ *
+ * @return the internal representation.
+ */
+ byte getValue() {
+ return value;
+ }
+
+ public static WalProcessingOption fromValue(final byte value) {
+ for (final WalProcessingOption walProcessingOption : WalProcessingOption.values()) {
+ if (walProcessingOption.value == value) {
+ return walProcessingOption;
+ }
+ }
+ throw new IllegalArgumentException(
+ "Illegal value provided for WalProcessingOption: " + value);
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WriteBatch.java b/src/rocksdb/java/src/main/java/org/rocksdb/WriteBatch.java
new file mode 100644
index 000000000..01dbe5a5a
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/WriteBatch.java
@@ -0,0 +1,394 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.ByteBuffer;
+
+/**
+ * WriteBatch holds a collection of updates to apply atomically to a DB.
+ *
+ * The updates are applied in the order in which they are added
+ * to the WriteBatch. For example, the value of "key" will be "v3"
+ * after the following batch is written:
+ *
+ * batch.put("key", "v1");
+ * batch.remove("key");
+ * batch.put("key", "v2");
+ * batch.put("key", "v3");
+ *
+ * Multiple threads can invoke const methods on a WriteBatch without
+ * external synchronization, but if any of the threads may call a
+ * non-const method, all threads accessing the same WriteBatch must use
+ * external synchronization.
+ */
+public class WriteBatch extends AbstractWriteBatch {
+ /**
+ * Constructs a WriteBatch instance.
+ */
+ public WriteBatch() {
+ this(0);
+ }
+
+ /**
+ * Constructs a WriteBatch instance with a given size.
+ *
+ * @param reserved_bytes reserved size for WriteBatch
+ */
+ public WriteBatch(final int reserved_bytes) {
+ super(newWriteBatch(reserved_bytes));
+ }
+
+ /**
+ * Constructs a WriteBatch instance from a serialized representation
+ * as returned by {@link #data()}.
+ *
+ * @param serialized the serialized representation.
+ */
+ public WriteBatch(final byte[] serialized) {
+ super(newWriteBatch(serialized, serialized.length));
+ }
+
+ /**
+ * Support for iterating over the contents of a batch.
+ *
+ * @param handler A handler that is called back for each
+ * update present in the batch
+ *
+ * @throws RocksDBException If we cannot iterate over the batch
+ */
+ public void iterate(final Handler handler) throws RocksDBException {
+ iterate(nativeHandle_, handler.nativeHandle_);
+ }
+
+ /**
+ * Retrieve the serialized version of this batch.
+ *
+ * @return the serialized representation of this write batch.
+ *
+ * @throws RocksDBException if an error occurs whilst retrieving
+ * the serialized batch data.
+ */
+ public byte[] data() throws RocksDBException {
+ return data(nativeHandle_);
+ }
+
+ /**
+ * Retrieve data size of the batch.
+ *
+ * @return the serialized data size of the batch.
+ */
+ public long getDataSize() {
+ return getDataSize(nativeHandle_);
+ }
+
+ /**
+ * Returns true if Put will be called during Iterate.
+ *
+ * @return true if Put will be called during Iterate.
+ */
+ public boolean hasPut() {
+ return hasPut(nativeHandle_);
+ }
+
+ /**
+ * Returns true if Delete will be called during Iterate.
+ *
+ * @return true if Delete will be called during Iterate.
+ */
+ public boolean hasDelete() {
+ return hasDelete(nativeHandle_);
+ }
+
+ /**
+ * Returns true if SingleDelete will be called during Iterate.
+ *
+ * @return true if SingleDelete will be called during Iterate.
+ */
+ public boolean hasSingleDelete() {
+ return hasSingleDelete(nativeHandle_);
+ }
+
+ /**
+ * Returns true if DeleteRange will be called during Iterate.
+ *
+ * @return true if DeleteRange will be called during Iterate.
+ */
+ public boolean hasDeleteRange() {
+ return hasDeleteRange(nativeHandle_);
+ }
+
+ /**
+ * Returns true if Merge will be called during Iterate.
+ *
+ * @return true if Merge will be called during Iterate.
+ */
+ public boolean hasMerge() {
+ return hasMerge(nativeHandle_);
+ }
+
+ /**
+ * Returns true if MarkBeginPrepare will be called during Iterate.
+ *
+ * @return true if MarkBeginPrepare will be called during Iterate.
+ */
+ public boolean hasBeginPrepare() {
+ return hasBeginPrepare(nativeHandle_);
+ }
+
+ /**
+ * Returns true if MarkEndPrepare will be called during Iterate.
+ *
+ * @return true if MarkEndPrepare will be called during Iterate.
+ */
+ public boolean hasEndPrepare() {
+ return hasEndPrepare(nativeHandle_);
+ }
+
+ /**
+ * Returns true if MarkCommit will be called during Iterate.
+ *
+ * @return true if MarkCommit will be called during Iterate.
+ */
+ public boolean hasCommit() {
+ return hasCommit(nativeHandle_);
+ }
+
+ /**
+ * Returns true if MarkRollback will be called during Iterate.
+ *
+ * @return true if MarkRollback will be called during Iterate.
+ */
+ public boolean hasRollback() {
+ return hasRollback(nativeHandle_);
+ }
+
+ @Override
+ public WriteBatch getWriteBatch() {
+ return this;
+ }
+
+ /**
+ * Marks this point in the WriteBatch as the last record to
+ * be inserted into the WAL, provided the WAL is enabled.
+ */
+ public void markWalTerminationPoint() {
+ markWalTerminationPoint(nativeHandle_);
+ }
+
+ /**
+ * Gets the WAL termination point.
+ *
+ * See {@link #markWalTerminationPoint()}
+ *
+ * @return the WAL termination point
+ */
+ public SavePoint getWalTerminationPoint() {
+ return getWalTerminationPoint(nativeHandle_);
+ }
+
+ @Override
+ WriteBatch getWriteBatch(final long handle) {
+ return this;
+ }
+
+ /**
+ * <p>Private WriteBatch constructor which is used to construct
+ * WriteBatch instances from C++ side. As the reference to this
+ * object is also managed from C++ side the handle will be disowned.</p>
+ *
+ * @param nativeHandle address of native instance.
+ */
+ WriteBatch(final long nativeHandle) {
+ this(nativeHandle, false);
+ }
+
+ /**
+ * <p>Private WriteBatch constructor which is used to construct
+ * WriteBatch instances. </p>
+ *
+ * @param nativeHandle address of native instance.
+ * @param owningNativeHandle whether to own this reference from the C++ side or not
+ */
+ WriteBatch(final long nativeHandle, final boolean owningNativeHandle) {
+ super(nativeHandle);
+ if(!owningNativeHandle)
+ disOwnNativeHandle();
+ }
+
+ @Override protected final native void disposeInternal(final long handle);
+ @Override final native int count0(final long handle);
+ @Override final native void put(final long handle, final byte[] key,
+ final int keyLen, final byte[] value, final int valueLen);
+ @Override final native void put(final long handle, final byte[] key,
+ final int keyLen, final byte[] value, final int valueLen,
+ final long cfHandle);
+ @Override
+ final native void putDirect(final long handle, final ByteBuffer key, final int keyOffset,
+ final int keyLength, final ByteBuffer value, final int valueOffset, final int valueLength,
+ final long cfHandle);
+ @Override final native void merge(final long handle, final byte[] key,
+ final int keyLen, final byte[] value, final int valueLen);
+ @Override final native void merge(final long handle, final byte[] key,
+ final int keyLen, final byte[] value, final int valueLen,
+ final long cfHandle);
+ @Override final native void delete(final long handle, final byte[] key,
+ final int keyLen) throws RocksDBException;
+ @Override final native void delete(final long handle, final byte[] key,
+ final int keyLen, final long cfHandle) throws RocksDBException;
+ @Override final native void singleDelete(final long handle, final byte[] key,
+ final int keyLen) throws RocksDBException;
+ @Override final native void singleDelete(final long handle, final byte[] key,
+ final int keyLen, final long cfHandle) throws RocksDBException;
+ @Override
+ final native void removeDirect(final long handle, final ByteBuffer key, final int keyOffset,
+ final int keyLength, final long cfHandle) throws RocksDBException;
+ @Override
+ final native void deleteRange(final long handle, final byte[] beginKey, final int beginKeyLen,
+ final byte[] endKey, final int endKeyLen);
+ @Override
+ final native void deleteRange(final long handle, final byte[] beginKey, final int beginKeyLen,
+ final byte[] endKey, final int endKeyLen, final long cfHandle);
+ @Override final native void putLogData(final long handle,
+ final byte[] blob, final int blobLen) throws RocksDBException;
+ @Override final native void clear0(final long handle);
+ @Override final native void setSavePoint0(final long handle);
+ @Override final native void rollbackToSavePoint0(final long handle);
+ @Override final native void popSavePoint(final long handle) throws RocksDBException;
+ @Override final native void setMaxBytes(final long nativeHandle,
+ final long maxBytes);
+
+ private native static long newWriteBatch(final int reserved_bytes);
+ private native static long newWriteBatch(final byte[] serialized,
+ final int serializedLength);
+ private native void iterate(final long handle, final long handlerHandle)
+ throws RocksDBException;
+ private native byte[] data(final long nativeHandle) throws RocksDBException;
+ private native long getDataSize(final long nativeHandle);
+ private native boolean hasPut(final long nativeHandle);
+ private native boolean hasDelete(final long nativeHandle);
+ private native boolean hasSingleDelete(final long nativeHandle);
+ private native boolean hasDeleteRange(final long nativeHandle);
+ private native boolean hasMerge(final long nativeHandle);
+ private native boolean hasBeginPrepare(final long nativeHandle);
+ private native boolean hasEndPrepare(final long nativeHandle);
+ private native boolean hasCommit(final long nativeHandle);
+ private native boolean hasRollback(final long nativeHandle);
+ private native void markWalTerminationPoint(final long nativeHandle);
+ private native SavePoint getWalTerminationPoint(final long nativeHandle);
+
+ /**
+ * Handler callback for iterating over the contents of a batch.
+ */
+ public static abstract class Handler
+ extends RocksCallbackObject {
+ public Handler() {
+ super(null);
+ }
+
+ @Override
+ protected long initializeNative(final long... nativeParameterHandles) {
+ return createNewHandler0();
+ }
+
+ public abstract void put(final int columnFamilyId, final byte[] key,
+ final byte[] value) throws RocksDBException;
+ public abstract void put(final byte[] key, final byte[] value);
+ public abstract void merge(final int columnFamilyId, final byte[] key,
+ final byte[] value) throws RocksDBException;
+ public abstract void merge(final byte[] key, final byte[] value);
+ public abstract void delete(final int columnFamilyId, final byte[] key)
+ throws RocksDBException;
+ public abstract void delete(final byte[] key);
+ public abstract void singleDelete(final int columnFamilyId,
+ final byte[] key) throws RocksDBException;
+ public abstract void singleDelete(final byte[] key);
+ public abstract void deleteRange(final int columnFamilyId,
+ final byte[] beginKey, final byte[] endKey) throws RocksDBException;
+ public abstract void deleteRange(final byte[] beginKey,
+ final byte[] endKey);
+ public abstract void logData(final byte[] blob);
+ public abstract void putBlobIndex(final int columnFamilyId,
+ final byte[] key, final byte[] value) throws RocksDBException;
+ public abstract void markBeginPrepare() throws RocksDBException;
+ public abstract void markEndPrepare(final byte[] xid)
+ throws RocksDBException;
+ public abstract void markNoop(final boolean emptyBatch)
+ throws RocksDBException;
+ public abstract void markRollback(final byte[] xid)
+ throws RocksDBException;
+ public abstract void markCommit(final byte[] xid)
+ throws RocksDBException;
+
+ /**
+ * shouldContinue is called by the underlying iterator
+ * {@link WriteBatch#iterate(Handler)}. If it returns false,
+ * iteration is halted. Otherwise, it continues
+ * iterating. The default implementation always
+ * returns true.
+ *
+ * @return boolean value indicating if the
+ * iteration is halted.
+ */
+ public boolean shouldContinue() {
+ return true;
+ }
+
+ private native long createNewHandler0();
+ }
+
+ /**
+ * A structure for describing the save point in the Write Batch.
+ */
+ public static class SavePoint {
+ private long size;
+ private long count;
+ private long contentFlags;
+
+ public SavePoint(final long size, final long count,
+ final long contentFlags) {
+ this.size = size;
+ this.count = count;
+ this.contentFlags = contentFlags;
+ }
+
+ public void clear() {
+ this.size = 0;
+ this.count = 0;
+ this.contentFlags = 0;
+ }
+
+ /**
+ * Get the size of the serialized representation.
+ *
+ * @return the size of the serialized representation.
+ */
+ public long getSize() {
+ return size;
+ }
+
+ /**
+ * Get the number of elements.
+ *
+ * @return the number of elements.
+ */
+ public long getCount() {
+ return count;
+ }
+
+ /**
+ * Get the content flags.
+ *
+ * @return the content flags.
+ */
+ public long getContentFlags() {
+ return contentFlags;
+ }
+
+ public boolean isCleared() {
+ return (size | count | contentFlags) == 0;
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WriteBatchInterface.java b/src/rocksdb/java/src/main/java/org/rocksdb/WriteBatchInterface.java
new file mode 100644
index 000000000..1f1ddc4ad
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/WriteBatchInterface.java
@@ -0,0 +1,305 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.ByteBuffer;
+
+/**
+ * <p>Defines the interface for a Write Batch which
+ * holds a collection of updates to apply atomically to a DB.</p>
+ */
+public interface WriteBatchInterface {
+
+ /**
+ * Returns the number of updates in the batch.
+ *
+ * @return number of items in WriteBatch
+ */
+ int count();
+
+ /**
+ * <p>Store the mapping "key-&gt;value" in the database.</p>
+ *
+ * @param key the specified key to be inserted.
+ * @param value the value associated with the specified key.
+ * @throws RocksDBException thrown if error happens in underlying native library.
+ */
+ void put(byte[] key, byte[] value) throws RocksDBException;
+
+ /**
+ * <p>Store the mapping "key-&gt;value" within given column
+ * family.</p>
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param key the specified key to be inserted.
+ * @param value the value associated with the specified key.
+ * @throws RocksDBException thrown if error happens in underlying native library.
+ */
+ void put(ColumnFamilyHandle columnFamilyHandle,
+ byte[] key, byte[] value) throws RocksDBException;
+
+ /**
+ * <p>Store the mapping "key-&gt;value" within given column
+ * family.</p>
+ *
+ * @param key the specified key to be inserted. It is using position and limit.
+ * Supports direct buffer only.
+ * @param value the value associated with the specified key. It is using position and limit.
+ * Supports direct buffer only.
+ * @throws RocksDBException
+ */
+ void put(ByteBuffer key, ByteBuffer value) throws RocksDBException;
+
+ /**
+ * <p>Store the mapping "key-&gt;value" within given column
+ * family.</p>
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param key the specified key to be inserted. It is using position and limit.
+ * Supports direct buffer only.
+ * @param value the value associated with the specified key. It is using position and limit.
+ * Supports direct buffer only.
+ * @throws RocksDBException
+ */
+ void put(ColumnFamilyHandle columnFamilyHandle, ByteBuffer key, ByteBuffer value)
+ throws RocksDBException;
+
+ /**
+ * <p>Merge "value" with the existing value of "key" in the database.
+ * "key-&gt;merge(existing, value)"</p>
+ *
+ * @param key the specified key to be merged.
+ * @param value the value to be merged with the current value for
+ * the specified key.
+ * @throws RocksDBException thrown if error happens in underlying native library.
+ */
+ void merge(byte[] key, byte[] value) throws RocksDBException;
+
+ /**
+ * <p>Merge "value" with the existing value of "key" in given column family.
+ * "key-&gt;merge(existing, value)"</p>
+ *
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param key the specified key to be merged.
+ * @param value the value to be merged with the current value for
+ * the specified key.
+ * @throws RocksDBException thrown if error happens in underlying native library.
+ */
+ void merge(ColumnFamilyHandle columnFamilyHandle,
+ byte[] key, byte[] value) throws RocksDBException;
+
+ /**
+ * <p>If the database contains a mapping for "key", erase it. Else do nothing.</p>
+ *
+ * @param key Key to delete within database
+ *
+ * @deprecated Use {@link #delete(byte[])}
+ * @throws RocksDBException thrown if error happens in underlying native library.
+ */
+ @Deprecated
+ void remove(byte[] key) throws RocksDBException;
+
+ /**
+ * <p>If column family contains a mapping for "key", erase it. Else do nothing.</p>
+ *
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param key Key to delete within database
+ *
+ * @deprecated Use {@link #delete(ColumnFamilyHandle, byte[])}
+ * @throws RocksDBException thrown if error happens in underlying native library.
+ */
+ @Deprecated
+ void remove(ColumnFamilyHandle columnFamilyHandle, byte[] key)
+ throws RocksDBException;
+
+ /**
+ * <p>If the database contains a mapping for "key", erase it. Else do nothing.</p>
+ *
+ * @param key Key to delete within database
+ * @throws RocksDBException thrown if error happens in underlying native library.
+ */
+ void delete(byte[] key) throws RocksDBException;
+
+ /**
+ * <p>If column family contains a mapping for "key", erase it. Else do nothing.</p>
+ *
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param key Key to delete within database
+ * @throws RocksDBException thrown if error happens in underlying native library.
+ */
+ void delete(ColumnFamilyHandle columnFamilyHandle, byte[] key)
+ throws RocksDBException;
+
+ /**
+ * Remove the database entry for {@code key}. Requires that the key exists
+ * and was not overwritten. It is not an error if the key did not exist
+ * in the database.
+ *
+ * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
+ * times), then the result of calling SingleDelete() on this key is undefined.
+ * SingleDelete() only behaves correctly if there has been only one Put()
+ * for this key since the previous call to SingleDelete() for this key.
+ *
+ * This feature is currently an experimental performance optimization
+ * for a very specific workload. It is up to the caller to ensure that
+ * SingleDelete is only used for a key that is not deleted using Delete() or
+ * written using Merge(). Mixing SingleDelete operations with Deletes and
+ * Merges can result in undefined behavior.
+ *
+ * @param key Key to delete within database
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ @Experimental("Performance optimization for a very specific workload")
+ void singleDelete(final byte[] key) throws RocksDBException;
+
+ /**
+ * Remove the database entry for {@code key}. Requires that the key exists
+ * and was not overwritten. It is not an error if the key did not exist
+ * in the database.
+ *
+ * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
+ * times), then the result of calling SingleDelete() on this key is undefined.
+ * SingleDelete() only behaves correctly if there has been only one Put()
+ * for this key since the previous call to SingleDelete() for this key.
+ *
+ * This feature is currently an experimental performance optimization
+ * for a very specific workload. It is up to the caller to ensure that
+ * SingleDelete is only used for a key that is not deleted using Delete() or
+ * written using Merge(). Mixing SingleDelete operations with Deletes and
+ * Merges can result in undefined behavior.
+ *
+ * @param columnFamilyHandle The column family to delete the key from
+ * @param key Key to delete within database
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ @Experimental("Performance optimization for a very specific workload")
+ void singleDelete(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key) throws RocksDBException;
+
+ /**
+ * <p>If column family contains a mapping for "key", erase it. Else do nothing.</p>
+ *
+ * @param key Key to delete within database. It is using position and limit.
+ * Supports direct buffer only.
+ * @throws RocksDBException
+ */
+ void remove(ByteBuffer key) throws RocksDBException;
+
+ /**
+ * <p>If column family contains a mapping for "key", erase it. Else do nothing.</p>
+ *
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param key Key to delete within database. It is using position and limit.
+ * Supports direct buffer only.
+ * @throws RocksDBException
+ */
+ void remove(ColumnFamilyHandle columnFamilyHandle, ByteBuffer key) throws RocksDBException;
+
+ /**
+ * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
+ * including "beginKey" and excluding "endKey". a non-OK status on error. It
+ * is not an error if no keys exist in the range ["beginKey", "endKey").
+ *
+ * Delete the database entry (if any) for "key". Returns OK on success, and a
+ * non-OK status on error. It is not an error if "key" did not exist in the
+ * database.
+ *
+ * @param beginKey
+ * First key to delete within database (included)
+ * @param endKey
+ * Last key to delete within database (excluded)
+ * @throws RocksDBException thrown if error happens in underlying native library.
+ */
+ void deleteRange(byte[] beginKey, byte[] endKey) throws RocksDBException;
+
+ /**
+ * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
+ * including "beginKey" and excluding "endKey". a non-OK status on error. It
+ * is not an error if no keys exist in the range ["beginKey", "endKey").
+ *
+ * Delete the database entry (if any) for "key". Returns OK on success, and a
+ * non-OK status on error. It is not an error if "key" did not exist in the
+ * database.
+ *
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param beginKey
+ * First key to delete within database (included)
+ * @param endKey
+ * Last key to delete within database (excluded)
+ * @throws RocksDBException thrown if error happens in underlying native library.
+ */
+ void deleteRange(ColumnFamilyHandle columnFamilyHandle, byte[] beginKey,
+ byte[] endKey) throws RocksDBException;
+
+ /**
+ * Append a blob of arbitrary size to the records in this batch. The blob will
+ * be stored in the transaction log but not in any other file. In particular,
+ * it will not be persisted to the SST files. When iterating over this
+ * WriteBatch, WriteBatch::Handler::LogData will be called with the contents
+ * of the blob as it is encountered. Blobs, puts, deletes, and merges will be
+ * encountered in the same order in thich they were inserted. The blob will
+ * NOT consume sequence number(s) and will NOT increase the count of the batch
+ *
+ * Example application: add timestamps to the transaction log for use in
+ * replication.
+ *
+ * @param blob binary object to be inserted
+ * @throws RocksDBException thrown if error happens in underlying native library.
+ */
+ void putLogData(byte[] blob) throws RocksDBException;
+
+ /**
+ * Clear all updates buffered in this batch
+ */
+ void clear();
+
+ /**
+ * Records the state of the batch for future calls to RollbackToSavePoint().
+ * May be called multiple times to set multiple save points.
+ */
+ void setSavePoint();
+
+ /**
+ * Remove all entries in this batch (Put, Merge, Delete, PutLogData) since
+ * the most recent call to SetSavePoint() and removes the most recent save
+ * point.
+ *
+ * @throws RocksDBException if there is no previous call to SetSavePoint()
+ */
+ void rollbackToSavePoint() throws RocksDBException;
+
+ /**
+ * Pop the most recent save point.
+ *
+ * That is to say that it removes the last save point,
+ * which was set by {@link #setSavePoint()}.
+ *
+ * @throws RocksDBException If there is no previous call to
+ * {@link #setSavePoint()}, an exception with
+ * {@link Status.Code#NotFound} will be thrown.
+ */
+ void popSavePoint() throws RocksDBException;
+
+ /**
+ * Set the maximum size of the write batch.
+ *
+ * @param maxBytes the maximum size in bytes.
+ */
+ void setMaxBytes(long maxBytes);
+
+ /**
+ * Get the underlying Write Batch.
+ *
+ * @return the underlying WriteBatch.
+ */
+ WriteBatch getWriteBatch();
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java b/src/rocksdb/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java
new file mode 100644
index 000000000..3831f85ba
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java
@@ -0,0 +1,318 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.ByteBuffer;
+
+/**
+ * Similar to {@link org.rocksdb.WriteBatch} but with a binary searchable
+ * index built for all the keys inserted.
+ *
+ * Calling put, merge, remove or putLogData calls the same function
+ * as with {@link org.rocksdb.WriteBatch} whilst also building an index.
+ *
+ * A user can call {@link org.rocksdb.WriteBatchWithIndex#newIterator()} to
+ * create an iterator over the write batch or
+ * {@link org.rocksdb.WriteBatchWithIndex#newIteratorWithBase(org.rocksdb.RocksIterator)}
+ * to get an iterator for the database with Read-Your-Own-Writes like capability
+ */
+public class WriteBatchWithIndex extends AbstractWriteBatch {
+ /**
+ * Creates a WriteBatchWithIndex where no bytes
+ * are reserved up-front, bytewise comparison is
+ * used for fallback key comparisons,
+ * and duplicate keys operations are retained
+ */
+ public WriteBatchWithIndex() {
+ super(newWriteBatchWithIndex());
+ }
+
+
+ /**
+ * Creates a WriteBatchWithIndex where no bytes
+ * are reserved up-front, bytewise comparison is
+ * used for fallback key comparisons, and duplicate key
+ * assignment is determined by the constructor argument
+ *
+ * @param overwriteKey if true, overwrite the key in the index when
+ * inserting a duplicate key, in this way an iterator will never
+ * show two entries with the same key.
+ */
+ public WriteBatchWithIndex(final boolean overwriteKey) {
+ super(newWriteBatchWithIndex(overwriteKey));
+ }
+
+ /**
+ * Creates a WriteBatchWithIndex
+ *
+ * @param fallbackIndexComparator We fallback to this comparator
+ * to compare keys within a column family if we cannot determine
+ * the column family and so look up it's comparator.
+ *
+ * @param reservedBytes reserved bytes in underlying WriteBatch
+ *
+ * @param overwriteKey if true, overwrite the key in the index when
+ * inserting a duplicate key, in this way an iterator will never
+ * show two entries with the same key.
+ */
+ public WriteBatchWithIndex(
+ final AbstractComparator
+ fallbackIndexComparator, final int reservedBytes,
+ final boolean overwriteKey) {
+ super(newWriteBatchWithIndex(fallbackIndexComparator.nativeHandle_,
+ fallbackIndexComparator.getComparatorType().getValue(), reservedBytes,
+ overwriteKey));
+ }
+
+ /**
+ * <p>Private WriteBatchWithIndex constructor which is used to construct
+ * WriteBatchWithIndex instances from C++ side. As the reference to this
+ * object is also managed from C++ side the handle will be disowned.</p>
+ *
+ * @param nativeHandle address of native instance.
+ */
+ WriteBatchWithIndex(final long nativeHandle) {
+ super(nativeHandle);
+ disOwnNativeHandle();
+ }
+
+ /**
+ * Create an iterator of a column family. User can call
+ * {@link org.rocksdb.RocksIteratorInterface#seek(byte[])} to
+ * search to the next entry of or after a key. Keys will be iterated in the
+ * order given by index_comparator. For multiple updates on the same key,
+ * each update will be returned as a separate entry, in the order of update
+ * time.
+ *
+ * @param columnFamilyHandle The column family to iterate over
+ * @return An iterator for the Write Batch contents, restricted to the column
+ * family
+ */
+ public WBWIRocksIterator newIterator(
+ final ColumnFamilyHandle columnFamilyHandle) {
+ return new WBWIRocksIterator(this, iterator1(nativeHandle_,
+ columnFamilyHandle.nativeHandle_));
+ }
+
+ /**
+ * Create an iterator of the default column family. User can call
+ * {@link org.rocksdb.RocksIteratorInterface#seek(byte[])} to
+ * search to the next entry of or after a key. Keys will be iterated in the
+ * order given by index_comparator. For multiple updates on the same key,
+ * each update will be returned as a separate entry, in the order of update
+ * time.
+ *
+ * @return An iterator for the Write Batch contents
+ */
+ public WBWIRocksIterator newIterator() {
+ return new WBWIRocksIterator(this, iterator0(nativeHandle_));
+ }
+
+ /**
+ * Provides Read-Your-Own-Writes like functionality by
+ * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator}
+ * as a delta and baseIterator as a base
+ *
+ * Updating write batch with the current key of the iterator is not safe.
+ * We strongly recommand users not to do it. It will invalidate the current
+ * key() and value() of the iterator. This invalidation happens even before
+ * the write batch update finishes. The state may recover after Next() is
+ * called.
+ *
+ * @param columnFamilyHandle The column family to iterate over
+ * @param baseIterator The base iterator,
+ * e.g. {@link org.rocksdb.RocksDB#newIterator()}
+ * @return An iterator which shows a view comprised of both the database
+ * point-in-time from baseIterator and modifications made in this write batch.
+ */
+ public RocksIterator newIteratorWithBase(
+ final ColumnFamilyHandle columnFamilyHandle,
+ final RocksIterator baseIterator) {
+ RocksIterator iterator = new RocksIterator(baseIterator.parent_,
+ iteratorWithBase(
+ nativeHandle_, columnFamilyHandle.nativeHandle_, baseIterator.nativeHandle_));
+ // when the iterator is deleted it will also delete the baseIterator
+ baseIterator.disOwnNativeHandle();
+ return iterator;
+ }
+
+ /**
+ * Provides Read-Your-Own-Writes like functionality by
+ * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator}
+ * as a delta and baseIterator as a base. Operates on the default column
+ * family.
+ *
+ * @param baseIterator The base iterator,
+ * e.g. {@link org.rocksdb.RocksDB#newIterator()}
+ * @return An iterator which shows a view comprised of both the database
+ * point-in-timefrom baseIterator and modifications made in this write batch.
+ */
+ public RocksIterator newIteratorWithBase(final RocksIterator baseIterator) {
+ return newIteratorWithBase(baseIterator.parent_.getDefaultColumnFamily(), baseIterator);
+ }
+
+ /**
+ * Similar to {@link RocksDB#get(ColumnFamilyHandle, byte[])} but will only
+ * read the key from this batch.
+ *
+ * @param columnFamilyHandle The column family to retrieve the value from
+ * @param options The database options to use
+ * @param key The key to read the value for
+ *
+ * @return a byte array storing the value associated with the input key if
+ * any. null if it does not find the specified key.
+ *
+ * @throws RocksDBException if the batch does not have enough data to resolve
+ * Merge operations, MergeInProgress status may be returned.
+ */
+ public byte[] getFromBatch(final ColumnFamilyHandle columnFamilyHandle,
+ final DBOptions options, final byte[] key) throws RocksDBException {
+ return getFromBatch(nativeHandle_, options.nativeHandle_,
+ key, key.length, columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Similar to {@link RocksDB#get(byte[])} but will only
+ * read the key from this batch.
+ *
+ * @param options The database options to use
+ * @param key The key to read the value for
+ *
+ * @return a byte array storing the value associated with the input key if
+ * any. null if it does not find the specified key.
+ *
+ * @throws RocksDBException if the batch does not have enough data to resolve
+ * Merge operations, MergeInProgress status may be returned.
+ */
+ public byte[] getFromBatch(final DBOptions options, final byte[] key)
+ throws RocksDBException {
+ return getFromBatch(nativeHandle_, options.nativeHandle_, key, key.length);
+ }
+
+ /**
+ * Similar to {@link RocksDB#get(ColumnFamilyHandle, byte[])} but will also
+ * read writes from this batch.
+ *
+ * This function will query both this batch and the DB and then merge
+ * the results using the DB's merge operator (if the batch contains any
+ * merge requests).
+ *
+ * Setting {@link ReadOptions#setSnapshot(long, long)} will affect what is
+ * read from the DB but will NOT change which keys are read from the batch
+ * (the keys in this batch do not yet belong to any snapshot and will be
+ * fetched regardless).
+ *
+ * @param db The Rocks database
+ * @param columnFamilyHandle The column family to retrieve the value from
+ * @param options The read options to use
+ * @param key The key to read the value for
+ *
+ * @return a byte array storing the value associated with the input key if
+ * any. null if it does not find the specified key.
+ *
+ * @throws RocksDBException if the value for the key cannot be read
+ */
+ public byte[] getFromBatchAndDB(final RocksDB db, final ColumnFamilyHandle columnFamilyHandle,
+ final ReadOptions options, final byte[] key) throws RocksDBException {
+ return getFromBatchAndDB(nativeHandle_, db.nativeHandle_,
+ options.nativeHandle_, key, key.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Similar to {@link RocksDB#get(byte[])} but will also
+ * read writes from this batch.
+ *
+ * This function will query both this batch and the DB and then merge
+ * the results using the DB's merge operator (if the batch contains any
+ * merge requests).
+ *
+ * Setting {@link ReadOptions#setSnapshot(long, long)} will affect what is
+ * read from the DB but will NOT change which keys are read from the batch
+ * (the keys in this batch do not yet belong to any snapshot and will be
+ * fetched regardless).
+ *
+ * @param db The Rocks database
+ * @param options The read options to use
+ * @param key The key to read the value for
+ *
+ * @return a byte array storing the value associated with the input key if
+ * any. null if it does not find the specified key.
+ *
+ * @throws RocksDBException if the value for the key cannot be read
+ */
+ public byte[] getFromBatchAndDB(final RocksDB db, final ReadOptions options,
+ final byte[] key) throws RocksDBException {
+ return getFromBatchAndDB(nativeHandle_, db.nativeHandle_,
+ options.nativeHandle_, key, key.length);
+ }
+
+ @Override protected final native void disposeInternal(final long handle);
+ @Override final native int count0(final long handle);
+ @Override final native void put(final long handle, final byte[] key,
+ final int keyLen, final byte[] value, final int valueLen);
+ @Override final native void put(final long handle, final byte[] key,
+ final int keyLen, final byte[] value, final int valueLen,
+ final long cfHandle);
+ @Override
+ final native void putDirect(final long handle, final ByteBuffer key, final int keyOffset,
+ final int keyLength, final ByteBuffer value, final int valueOffset, final int valueLength,
+ final long cfHandle);
+ @Override final native void merge(final long handle, final byte[] key,
+ final int keyLen, final byte[] value, final int valueLen);
+ @Override final native void merge(final long handle, final byte[] key,
+ final int keyLen, final byte[] value, final int valueLen,
+ final long cfHandle);
+ @Override final native void delete(final long handle, final byte[] key,
+ final int keyLen) throws RocksDBException;
+ @Override final native void delete(final long handle, final byte[] key,
+ final int keyLen, final long cfHandle) throws RocksDBException;
+ @Override final native void singleDelete(final long handle, final byte[] key,
+ final int keyLen) throws RocksDBException;
+ @Override final native void singleDelete(final long handle, final byte[] key,
+ final int keyLen, final long cfHandle) throws RocksDBException;
+ @Override
+ final native void removeDirect(final long handle, final ByteBuffer key, final int keyOffset,
+ final int keyLength, final long cfHandle) throws RocksDBException;
+ // DO NOT USE - `WriteBatchWithIndex::deleteRange` is not yet supported
+ @Override
+ final native void deleteRange(final long handle, final byte[] beginKey, final int beginKeyLen,
+ final byte[] endKey, final int endKeyLen);
+ // DO NOT USE - `WriteBatchWithIndex::deleteRange` is not yet supported
+ @Override
+ final native void deleteRange(final long handle, final byte[] beginKey, final int beginKeyLen,
+ final byte[] endKey, final int endKeyLen, final long cfHandle);
+ @Override final native void putLogData(final long handle, final byte[] blob,
+ final int blobLen) throws RocksDBException;
+ @Override final native void clear0(final long handle);
+ @Override final native void setSavePoint0(final long handle);
+ @Override final native void rollbackToSavePoint0(final long handle);
+ @Override final native void popSavePoint(final long handle) throws RocksDBException;
+ @Override final native void setMaxBytes(final long nativeHandle,
+ final long maxBytes);
+ @Override final native WriteBatch getWriteBatch(final long handle);
+
+ private native static long newWriteBatchWithIndex();
+ private native static long newWriteBatchWithIndex(final boolean overwriteKey);
+ private native static long newWriteBatchWithIndex(
+ final long fallbackIndexComparatorHandle,
+ final byte comparatorType, final int reservedBytes,
+ final boolean overwriteKey);
+ private native long iterator0(final long handle);
+ private native long iterator1(final long handle, final long cfHandle);
+ private native long iteratorWithBase(
+ final long handle, final long baseIteratorHandle, final long cfHandle);
+ private native byte[] getFromBatch(final long handle, final long optHandle,
+ final byte[] key, final int keyLen);
+ private native byte[] getFromBatch(final long handle, final long optHandle,
+ final byte[] key, final int keyLen, final long cfHandle);
+ private native byte[] getFromBatchAndDB(final long handle,
+ final long dbHandle, final long readOptHandle, final byte[] key,
+ final int keyLen);
+ private native byte[] getFromBatchAndDB(final long handle,
+ final long dbHandle, final long readOptHandle, final byte[] key,
+ final int keyLen, final long cfHandle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WriteBufferManager.java b/src/rocksdb/java/src/main/java/org/rocksdb/WriteBufferManager.java
new file mode 100644
index 000000000..b244aa952
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/WriteBufferManager.java
@@ -0,0 +1,33 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Java wrapper over native write_buffer_manager class
+ */
+public class WriteBufferManager extends RocksObject {
+ static {
+ RocksDB.loadLibrary();
+ }
+
+ /**
+ * Construct a new instance of WriteBufferManager.
+ *
+ * Check <a href="https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager">
+ * https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager</a>
+ * for more details on when to use it
+ *
+ * @param bufferSizeBytes buffer size(in bytes) to use for native write_buffer_manager
+ * @param cache cache whose memory should be bounded by this write buffer manager
+ */
+ public WriteBufferManager(final long bufferSizeBytes, final Cache cache){
+ super(newWriteBufferManager(bufferSizeBytes, cache.nativeHandle_));
+ }
+
+ private native static long newWriteBufferManager(final long bufferSizeBytes, final long cacheHandle);
+ @Override
+ protected native void disposeInternal(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WriteOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/WriteOptions.java
new file mode 100644
index 000000000..71789ed1f
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/WriteOptions.java
@@ -0,0 +1,219 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Options that control write operations.
+ *
+ * Note that developers should call WriteOptions.dispose() to release the
+ * c++ side memory before a WriteOptions instance runs out of scope.
+ */
+public class WriteOptions extends RocksObject {
+ /**
+ * Construct WriteOptions instance.
+ */
+ public WriteOptions() {
+ super(newWriteOptions());
+
+ }
+
+ // TODO(AR) consider ownership
+ WriteOptions(final long nativeHandle) {
+ super(nativeHandle);
+ disOwnNativeHandle();
+ }
+
+ /**
+ * Copy constructor for WriteOptions.
+ *
+ * NOTE: This does a shallow copy, which means comparator, merge_operator, compaction_filter,
+ * compaction_filter_factory and other pointers will be cloned!
+ *
+ * @param other The ColumnFamilyOptions to copy.
+ */
+ public WriteOptions(WriteOptions other) {
+ super(copyWriteOptions(other.nativeHandle_));
+ }
+
+
+ /**
+ * If true, the write will be flushed from the operating system
+ * buffer cache (by calling WritableFile::Sync()) before the write
+ * is considered complete. If this flag is true, writes will be
+ * slower.
+ *
+ * If this flag is false, and the machine crashes, some recent
+ * writes may be lost. Note that if it is just the process that
+ * crashes (i.e., the machine does not reboot), no writes will be
+ * lost even if sync==false.
+ *
+ * In other words, a DB write with sync==false has similar
+ * crash semantics as the "write()" system call. A DB write
+ * with sync==true has similar crash semantics to a "write()"
+ * system call followed by "fdatasync()".
+ *
+ * Default: false
+ *
+ * @param flag a boolean flag to indicate whether a write
+ * should be synchronized.
+ * @return the instance of the current WriteOptions.
+ */
+ public WriteOptions setSync(final boolean flag) {
+ setSync(nativeHandle_, flag);
+ return this;
+ }
+
+ /**
+ * If true, the write will be flushed from the operating system
+ * buffer cache (by calling WritableFile::Sync()) before the write
+ * is considered complete. If this flag is true, writes will be
+ * slower.
+ *
+ * If this flag is false, and the machine crashes, some recent
+ * writes may be lost. Note that if it is just the process that
+ * crashes (i.e., the machine does not reboot), no writes will be
+ * lost even if sync==false.
+ *
+ * In other words, a DB write with sync==false has similar
+ * crash semantics as the "write()" system call. A DB write
+ * with sync==true has similar crash semantics to a "write()"
+ * system call followed by "fdatasync()".
+ *
+ * @return boolean value indicating if sync is active.
+ */
+ public boolean sync() {
+ return sync(nativeHandle_);
+ }
+
+ /**
+ * If true, writes will not first go to the write ahead log,
+ * and the write may got lost after a crash. The backup engine
+ * relies on write-ahead logs to back up the memtable, so if
+ * you disable write-ahead logs, you must create backups with
+ * flush_before_backup=true to avoid losing unflushed memtable data.
+ *
+ * @param flag a boolean flag to specify whether to disable
+ * write-ahead-log on writes.
+ * @return the instance of the current WriteOptions.
+ */
+ public WriteOptions setDisableWAL(final boolean flag) {
+ setDisableWAL(nativeHandle_, flag);
+ return this;
+ }
+
+ /**
+ * If true, writes will not first go to the write ahead log,
+ * and the write may got lost after a crash. The backup engine
+ * relies on write-ahead logs to back up the memtable, so if
+ * you disable write-ahead logs, you must create backups with
+ * flush_before_backup=true to avoid losing unflushed memtable data.
+ *
+ * @return boolean value indicating if WAL is disabled.
+ */
+ public boolean disableWAL() {
+ return disableWAL(nativeHandle_);
+ }
+
+ /**
+ * If true and if user is trying to write to column families that don't exist
+ * (they were dropped), ignore the write (don't return an error). If there
+ * are multiple writes in a WriteBatch, other writes will succeed.
+ *
+ * Default: false
+ *
+ * @param ignoreMissingColumnFamilies true to ignore writes to column families
+ * which don't exist
+ * @return the instance of the current WriteOptions.
+ */
+ public WriteOptions setIgnoreMissingColumnFamilies(
+ final boolean ignoreMissingColumnFamilies) {
+ setIgnoreMissingColumnFamilies(nativeHandle_, ignoreMissingColumnFamilies);
+ return this;
+ }
+
+ /**
+ * If true and if user is trying to write to column families that don't exist
+ * (they were dropped), ignore the write (don't return an error). If there
+ * are multiple writes in a WriteBatch, other writes will succeed.
+ *
+ * Default: false
+ *
+ * @return true if writes to column families which don't exist are ignored
+ */
+ public boolean ignoreMissingColumnFamilies() {
+ return ignoreMissingColumnFamilies(nativeHandle_);
+ }
+
+ /**
+ * If true and we need to wait or sleep for the write request, fails
+ * immediately with {@link Status.Code#Incomplete}.
+ *
+ * @param noSlowdown true to fail write requests if we need to wait or sleep
+ * @return the instance of the current WriteOptions.
+ */
+ public WriteOptions setNoSlowdown(final boolean noSlowdown) {
+ setNoSlowdown(nativeHandle_, noSlowdown);
+ return this;
+ }
+
+ /**
+ * If true and we need to wait or sleep for the write request, fails
+ * immediately with {@link Status.Code#Incomplete}.
+ *
+ * @return true when write requests are failed if we need to wait or sleep
+ */
+ public boolean noSlowdown() {
+ return noSlowdown(nativeHandle_);
+ }
+
+ /**
+ * If true, this write request is of lower priority if compaction is
+ * behind. In this case that, {@link #noSlowdown()} == true, the request
+ * will be cancelled immediately with {@link Status.Code#Incomplete} returned.
+ * Otherwise, it will be slowed down. The slowdown value is determined by
+ * RocksDB to guarantee it introduces minimum impacts to high priority writes.
+ *
+ * Default: false
+ *
+ * @param lowPri true if the write request should be of lower priority than
+ * compactions which are behind.
+ *
+ * @return the instance of the current WriteOptions.
+ */
+ public WriteOptions setLowPri(final boolean lowPri) {
+ setLowPri(nativeHandle_, lowPri);
+ return this;
+ }
+
+ /**
+ * Returns true if this write request is of lower priority if compaction is
+ * behind.
+ *
+ * See {@link #setLowPri(boolean)}.
+ *
+ * @return true if this write request is of lower priority, false otherwise.
+ */
+ public boolean lowPri() {
+ return lowPri(nativeHandle_);
+ }
+
+ private native static long newWriteOptions();
+ private native static long copyWriteOptions(long handle);
+ @Override protected final native void disposeInternal(final long handle);
+
+ private native void setSync(long handle, boolean flag);
+ private native boolean sync(long handle);
+ private native void setDisableWAL(long handle, boolean flag);
+ private native boolean disableWAL(long handle);
+ private native void setIgnoreMissingColumnFamilies(final long handle,
+ final boolean ignoreMissingColumnFamilies);
+ private native boolean ignoreMissingColumnFamilies(final long handle);
+ private native void setNoSlowdown(final long handle,
+ final boolean noSlowdown);
+ private native boolean noSlowdown(final long handle);
+ private native void setLowPri(final long handle, final boolean lowPri);
+ private native boolean lowPri(final long handle);
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/util/ByteUtil.java b/src/rocksdb/java/src/main/java/org/rocksdb/util/ByteUtil.java
new file mode 100644
index 000000000..9014fcba0
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/util/ByteUtil.java
@@ -0,0 +1,46 @@
+package org.rocksdb.util;
+
+import java.nio.ByteBuffer;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+public class ByteUtil {
+
+ /**
+ * Convert a String to a UTF-8 byte array.
+ *
+ * @param str the string
+ *
+ * @return the byte array.
+ */
+ public static byte[] bytes(final String str) {
+ return str.getBytes(UTF_8);
+ }
+
+ /**
+ * Compares the first {@code count} bytes of two areas of memory. Returns
+ * zero if they are the same, a value less than zero if {@code x} is
+ * lexically less than {@code y}, or a value greater than zero if {@code x}
+ * is lexically greater than {@code y}. Note that lexical order is determined
+ * as if comparing unsigned char arrays.
+ *
+ * Similar to <a href="https://github.com/gcc-mirror/gcc/blob/master/libiberty/memcmp.c">memcmp.c</a>.
+ *
+ * @param x the first value to compare with
+ * @param y the second value to compare against
+ * @param count the number of bytes to compare
+ *
+ * @return the result of the comparison
+ */
+ public static int memcmp(final ByteBuffer x, final ByteBuffer y,
+ final int count) {
+ for (int idx = 0; idx < count; idx++) {
+ final int aa = x.get(idx) & 0xff;
+ final int bb = y.get(idx) & 0xff;
+ if (aa != bb) {
+ return aa - bb;
+ }
+ }
+ return 0;
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/util/BytewiseComparator.java b/src/rocksdb/java/src/main/java/org/rocksdb/util/BytewiseComparator.java
new file mode 100644
index 000000000..9561b0a31
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/util/BytewiseComparator.java
@@ -0,0 +1,121 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb.util;
+
+import org.rocksdb.*;
+
+import java.nio.ByteBuffer;
+
+import static org.rocksdb.util.ByteUtil.memcmp;
+
+/**
+ * This is a Java Native implementation of the C++
+ * equivalent BytewiseComparatorImpl using {@link Slice}
+ *
+ * The performance of Comparators implemented in Java is always
+ * less than their C++ counterparts due to the bridging overhead,
+ * as such you likely don't want to use this apart from benchmarking
+ * and you most likely instead wanted
+ * {@link org.rocksdb.BuiltinComparator#BYTEWISE_COMPARATOR}
+ */
+public final class BytewiseComparator extends AbstractComparator {
+
+ public BytewiseComparator(final ComparatorOptions copt) {
+ super(copt);
+ }
+
+ @Override
+ public String name() {
+ return "rocksdb.java.BytewiseComparator";
+ }
+
+ @Override
+ public int compare(final ByteBuffer a, final ByteBuffer b) {
+ return _compare(a, b);
+ }
+
+ static int _compare(final ByteBuffer a, final ByteBuffer b) {
+ assert(a != null && b != null);
+ final int minLen = a.remaining() < b.remaining() ?
+ a.remaining() : b.remaining();
+ int r = memcmp(a, b, minLen);
+ if (r == 0) {
+ if (a.remaining() < b.remaining()) {
+ r = -1;
+ } else if (a.remaining() > b.remaining()) {
+ r = +1;
+ }
+ }
+ return r;
+ }
+
+ @Override
+ public void findShortestSeparator(final ByteBuffer start,
+ final ByteBuffer limit) {
+ // Find length of common prefix
+ final int minLength = Math.min(start.remaining(), limit.remaining());
+ int diffIndex = 0;
+ while (diffIndex < minLength &&
+ start.get(diffIndex) == limit.get(diffIndex)) {
+ diffIndex++;
+ }
+
+ if (diffIndex >= minLength) {
+ // Do not shorten if one string is a prefix of the other
+ } else {
+ final int startByte = start.get(diffIndex) & 0xff;
+ final int limitByte = limit.get(diffIndex) & 0xff;
+ if (startByte >= limitByte) {
+ // Cannot shorten since limit is smaller than start or start is
+ // already the shortest possible.
+ return;
+ }
+ assert(startByte < limitByte);
+
+ if (diffIndex < limit.remaining() - 1 || startByte + 1 < limitByte) {
+ start.put(diffIndex, (byte)((start.get(diffIndex) & 0xff) + 1));
+ start.limit(diffIndex + 1);
+ } else {
+ // v
+ // A A 1 A A A
+ // A A 2
+ //
+ // Incrementing the current byte will make start bigger than limit, we
+ // will skip this byte, and find the first non 0xFF byte in start and
+ // increment it.
+ diffIndex++;
+
+ while (diffIndex < start.remaining()) {
+ // Keep moving until we find the first non 0xFF byte to
+ // increment it
+ if ((start.get(diffIndex) & 0xff) <
+ 0xff) {
+ start.put(diffIndex, (byte)((start.get(diffIndex) & 0xff) + 1));
+ start.limit(diffIndex + 1);
+ break;
+ }
+ diffIndex++;
+ }
+ }
+ assert(compare(start.duplicate(), limit.duplicate()) < 0);
+ }
+ }
+
+ @Override
+ public void findShortSuccessor(final ByteBuffer key) {
+ // Find first character that can be incremented
+ final int n = key.remaining();
+ for (int i = 0; i < n; i++) {
+ final int byt = key.get(i) & 0xff;
+ if (byt != 0xff) {
+ key.put(i, (byte)(byt + 1));
+ key.limit(i+1);
+ return;
+ }
+ }
+ // *key is a run of 0xffs. Leave it alone.
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/util/Environment.java b/src/rocksdb/java/src/main/java/org/rocksdb/util/Environment.java
new file mode 100644
index 000000000..b5de34b75
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/util/Environment.java
@@ -0,0 +1,152 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb.util;
+
+import java.io.File;
+import java.io.IOException;
+
+public class Environment {
+ private static String OS = System.getProperty("os.name").toLowerCase();
+ private static String ARCH = System.getProperty("os.arch").toLowerCase();
+ private static boolean MUSL_LIBC;
+
+ static {
+ try {
+ final Process p = new ProcessBuilder("/usr/bin/env", "sh", "-c", "ldd /usr/bin/env | grep -q musl").start();
+ MUSL_LIBC = p.waitFor() == 0;
+ } catch (final IOException | InterruptedException e) {
+ MUSL_LIBC = false;
+ }
+ }
+
+ public static boolean isAarch64() {
+ return ARCH.contains("aarch64");
+ }
+
+ public static boolean isPowerPC() {
+ return ARCH.contains("ppc");
+ }
+
+ public static boolean isS390x() {
+ return ARCH.contains("s390x");
+ }
+
+ public static boolean isWindows() {
+ return (OS.contains("win"));
+ }
+
+ public static boolean isFreeBSD() {
+ return (OS.contains("freebsd"));
+ }
+
+ public static boolean isMac() {
+ return (OS.contains("mac"));
+ }
+
+ public static boolean isAix() {
+ return OS.contains("aix");
+ }
+
+ public static boolean isUnix() {
+ return OS.contains("nix") ||
+ OS.contains("nux");
+ }
+
+ public static boolean isMuslLibc() {
+ return MUSL_LIBC;
+ }
+
+ public static boolean isSolaris() {
+ return OS.contains("sunos");
+ }
+
+ public static boolean isOpenBSD() {
+ return (OS.contains("openbsd"));
+ }
+
+ public static boolean is64Bit() {
+ if (ARCH.indexOf("sparcv9") >= 0) {
+ return true;
+ }
+ return (ARCH.indexOf("64") > 0);
+ }
+
+ public static String getSharedLibraryName(final String name) {
+ return name + "jni";
+ }
+
+ public static String getSharedLibraryFileName(final String name) {
+ return appendLibOsSuffix("lib" + getSharedLibraryName(name), true);
+ }
+
+ /**
+ * Get the name of the libc implementation
+ *
+ * @return the name of the implementation,
+ * or null if the default for that platform (e.g. glibc on Linux).
+ */
+ public static /* @Nullable */ String getLibcName() {
+ if (isMuslLibc()) {
+ return "musl";
+ } else {
+ return null;
+ }
+ }
+
+ private static String getLibcPostfix() {
+ final String libcName = getLibcName();
+ if (libcName == null) {
+ return "";
+ }
+ return "-" + libcName;
+ }
+
+ public static String getJniLibraryName(final String name) {
+ if (isUnix()) {
+ final String arch = is64Bit() ? "64" : "32";
+ if (isPowerPC() || isAarch64()) {
+ return String.format("%sjni-linux-%s%s", name, ARCH, getLibcPostfix());
+ } else if (isS390x()) {
+ return String.format("%sjni-linux%s", name, ARCH);
+ } else {
+ return String.format("%sjni-linux%s%s", name, arch, getLibcPostfix());
+ }
+ } else if (isMac()) {
+ return String.format("%sjni-osx", name);
+ } else if (isFreeBSD()) {
+ return String.format("%sjni-freebsd%s", name, is64Bit() ? "64" : "32");
+ } else if (isAix() && is64Bit()) {
+ return String.format("%sjni-aix64", name);
+ } else if (isSolaris()) {
+ final String arch = is64Bit() ? "64" : "32";
+ return String.format("%sjni-solaris%s", name, arch);
+ } else if (isWindows() && is64Bit()) {
+ return String.format("%sjni-win64", name);
+ } else if (isOpenBSD()) {
+ return String.format("%sjni-openbsd%s", name, is64Bit() ? "64" : "32");
+ }
+
+ throw new UnsupportedOperationException(String.format("Cannot determine JNI library name for ARCH='%s' OS='%s' name='%s'", ARCH, OS, name));
+ }
+
+ public static String getJniLibraryFileName(final String name) {
+ return appendLibOsSuffix("lib" + getJniLibraryName(name), false);
+ }
+
+ private static String appendLibOsSuffix(final String libraryFileName, final boolean shared) {
+ if (isUnix() || isAix() || isSolaris() || isFreeBSD() || isOpenBSD()) {
+ return libraryFileName + ".so";
+ } else if (isMac()) {
+ return libraryFileName + (shared ? ".dylib" : ".jnilib");
+ } else if (isWindows()) {
+ return libraryFileName + ".dll";
+ }
+ throw new UnsupportedOperationException();
+ }
+
+ public static String getJniLibraryExtension() {
+ if (isWindows()) {
+ return ".dll";
+ }
+ return (isMac()) ? ".jnilib" : ".so";
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/util/IntComparator.java b/src/rocksdb/java/src/main/java/org/rocksdb/util/IntComparator.java
new file mode 100644
index 000000000..cc096cd14
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/util/IntComparator.java
@@ -0,0 +1,67 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb.util;
+
+import org.rocksdb.AbstractComparator;
+import org.rocksdb.ComparatorOptions;
+
+import java.nio.ByteBuffer;
+
+/**
+ * This is a Java implementation of a Comparator for Java int
+ * keys.
+ *
+ * This comparator assumes keys are (at least) four bytes, so
+ * the caller must guarantee that in accessing other APIs in
+ * combination with this comparator.
+ *
+ * The performance of Comparators implemented in Java is always
+ * less than their C++ counterparts due to the bridging overhead,
+ * as such you likely don't want to use this apart from benchmarking
+ * or testing.
+ */
+public final class IntComparator extends AbstractComparator {
+
+ public IntComparator(final ComparatorOptions copt) {
+ super(copt);
+ }
+
+ @Override
+ public String name() {
+ return "rocksdb.java.IntComparator";
+ }
+
+ @Override
+ public int compare(final ByteBuffer a, final ByteBuffer b) {
+ return compareIntKeys(a, b);
+ }
+
+ /**
+ * Compares integer keys
+ * so that they are in ascending order
+ *
+ * @param a 4-bytes representing an integer key
+ * @param b 4-bytes representing an integer key
+ *
+ * @return negative if a &lt; b, 0 if a == b, positive otherwise
+ */
+ private final int compareIntKeys(final ByteBuffer a, final ByteBuffer b) {
+ final int iA = a.getInt();
+ final int iB = b.getInt();
+
+ // protect against int key calculation overflow
+ final long diff = (long)iA - iB;
+ final int result;
+ if (diff < Integer.MIN_VALUE) {
+ result = Integer.MIN_VALUE;
+ } else if(diff > Integer.MAX_VALUE) {
+ result = Integer.MAX_VALUE;
+ } else {
+ result = (int)diff;
+ }
+ return result;
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java b/src/rocksdb/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java
new file mode 100644
index 000000000..4c06f80aa
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java
@@ -0,0 +1,88 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb.util;
+
+import org.rocksdb.AbstractComparator;
+import org.rocksdb.BuiltinComparator;
+import org.rocksdb.ComparatorOptions;
+import org.rocksdb.Slice;
+
+import java.nio.ByteBuffer;
+
+/**
+ * This is a Java Native implementation of the C++
+ * equivalent ReverseBytewiseComparatorImpl using {@link Slice}
+ *
+ * The performance of Comparators implemented in Java is always
+ * less than their C++ counterparts due to the bridging overhead,
+ * as such you likely don't want to use this apart from benchmarking
+ * and you most likely instead wanted
+ * {@link BuiltinComparator#REVERSE_BYTEWISE_COMPARATOR}
+ */
+public final class ReverseBytewiseComparator extends AbstractComparator {
+
+ public ReverseBytewiseComparator(final ComparatorOptions copt) {
+ super(copt);
+ }
+
+ @Override
+ public String name() {
+ return "rocksdb.java.ReverseBytewiseComparator";
+ }
+
+ @Override
+ public int compare(final ByteBuffer a, final ByteBuffer b) {
+ return -BytewiseComparator._compare(a, b);
+ }
+
+ @Override
+ public void findShortestSeparator(final ByteBuffer start,
+ final ByteBuffer limit) {
+ // Find length of common prefix
+ final int minLength = Math.min(start.remaining(), limit.remaining());
+ int diffIndex = 0;
+ while (diffIndex < minLength &&
+ start.get(diffIndex) == limit.get(diffIndex)) {
+ diffIndex++;
+ }
+
+ assert(diffIndex <= minLength);
+ if (diffIndex == minLength) {
+ // Do not shorten if one string is a prefix of the other
+ //
+ // We could handle cases like:
+ // V
+ // A A 2 X Y
+ // A A 2
+ // in a similar way as BytewiseComparator::FindShortestSeparator().
+ // We keep it simple by not implementing it. We can come back to it
+ // later when needed.
+ } else {
+ final int startByte = start.get(diffIndex) & 0xff;
+ final int limitByte = limit.get(diffIndex) & 0xff;
+ if (startByte > limitByte && diffIndex < start.remaining() - 1) {
+ // Case like
+ // V
+ // A A 3 A A
+ // A A 1 B B
+ //
+ // or
+ // v
+ // A A 2 A A
+ // A A 1 B B
+ // In this case "AA2" will be good.
+//#ifndef NDEBUG
+// std::string old_start = *start;
+//#endif
+ start.limit(diffIndex + 1);
+//#ifndef NDEBUG
+// assert(old_start >= *start);
+//#endif
+ assert(BytewiseComparator._compare(start.duplicate(), limit.duplicate()) > 0);
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/util/SizeUnit.java b/src/rocksdb/java/src/main/java/org/rocksdb/util/SizeUnit.java
new file mode 100644
index 000000000..0f717e8d4
--- /dev/null
+++ b/src/rocksdb/java/src/main/java/org/rocksdb/util/SizeUnit.java
@@ -0,0 +1,16 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb.util;
+
+public class SizeUnit {
+ public static final long KB = 1024L;
+ public static final long MB = KB * KB;
+ public static final long GB = KB * MB;
+ public static final long TB = KB * GB;
+ public static final long PB = KB * TB;
+
+ private SizeUnit() {}
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/AbstractTransactionTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/AbstractTransactionTest.java
new file mode 100644
index 000000000..7cac3015b
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/AbstractTransactionTest.java
@@ -0,0 +1,902 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.fail;
+
+/**
+ * Base class of {@link TransactionTest} and {@link OptimisticTransactionTest}
+ */
+public abstract class AbstractTransactionTest {
+
+ protected final static byte[] TXN_TEST_COLUMN_FAMILY = "txn_test_cf"
+ .getBytes();
+
+ protected static final Random rand = PlatformRandomHelper.
+ getPlatformSpecificRandomFactory();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ public abstract DBContainer startDb()
+ throws RocksDBException;
+
+ @Test
+ public void setSnapshot() throws RocksDBException {
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ txn.setSnapshot();
+ }
+ }
+
+ @Test
+ public void setSnapshotOnNextOperation() throws RocksDBException {
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ txn.setSnapshotOnNextOperation();
+ txn.put("key1".getBytes(), "value1".getBytes());
+ }
+ }
+
+ @Test
+ public void setSnapshotOnNextOperation_transactionNotifier() throws RocksDBException {
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+
+ try(final TestTransactionNotifier notifier = new TestTransactionNotifier()) {
+ txn.setSnapshotOnNextOperation(notifier);
+ txn.put("key1".getBytes(), "value1".getBytes());
+
+ txn.setSnapshotOnNextOperation(notifier);
+ txn.put("key2".getBytes(), "value2".getBytes());
+
+ assertThat(notifier.getCreatedSnapshots().size()).isEqualTo(2);
+ }
+ }
+ }
+
+ @Test
+ public void getSnapshot() throws RocksDBException {
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ txn.setSnapshot();
+ final Snapshot snapshot = txn.getSnapshot();
+ assertThat(snapshot.isOwningHandle()).isFalse();
+ }
+ }
+
+ @Test
+ public void getSnapshot_null() throws RocksDBException {
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ final Snapshot snapshot = txn.getSnapshot();
+ assertThat(snapshot).isNull();
+ }
+ }
+
+ @Test
+ public void clearSnapshot() throws RocksDBException {
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ txn.setSnapshot();
+ txn.clearSnapshot();
+ }
+ }
+
+ @Test
+ public void clearSnapshot_none() throws RocksDBException {
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ txn.clearSnapshot();
+ }
+ }
+
+ @Test
+ public void commit() throws RocksDBException {
+ final byte k1[] = "rollback-key1".getBytes(UTF_8);
+ final byte v1[] = "rollback-value1".getBytes(UTF_8);
+ try(final DBContainer dbContainer = startDb()) {
+ try(final Transaction txn = dbContainer.beginTransaction()) {
+ txn.put(k1, v1);
+ txn.commit();
+ }
+
+ try(final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn2 = dbContainer.beginTransaction()) {
+ assertThat(txn2.get(readOptions, k1)).isEqualTo(v1);
+ }
+ }
+ }
+
+ @Test
+ public void rollback() throws RocksDBException {
+ final byte k1[] = "rollback-key1".getBytes(UTF_8);
+ final byte v1[] = "rollback-value1".getBytes(UTF_8);
+ try(final DBContainer dbContainer = startDb()) {
+ try(final Transaction txn = dbContainer.beginTransaction()) {
+ txn.put(k1, v1);
+ txn.rollback();
+ }
+
+ try(final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn2 = dbContainer.beginTransaction()) {
+ assertThat(txn2.get(readOptions, k1)).isNull();
+ }
+ }
+ }
+
+ @Test
+ public void savePoint() throws RocksDBException {
+ final byte k1[] = "savePoint-key1".getBytes(UTF_8);
+ final byte v1[] = "savePoint-value1".getBytes(UTF_8);
+ final byte k2[] = "savePoint-key2".getBytes(UTF_8);
+ final byte v2[] = "savePoint-value2".getBytes(UTF_8);
+
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions()) {
+
+
+ try(final Transaction txn = dbContainer.beginTransaction()) {
+ txn.put(k1, v1);
+
+ assertThat(txn.get(readOptions, k1)).isEqualTo(v1);
+
+ txn.setSavePoint();
+
+ txn.put(k2, v2);
+
+ assertThat(txn.get(readOptions, k1)).isEqualTo(v1);
+ assertThat(txn.get(readOptions, k2)).isEqualTo(v2);
+
+ txn.rollbackToSavePoint();
+
+ assertThat(txn.get(readOptions, k1)).isEqualTo(v1);
+ assertThat(txn.get(readOptions, k2)).isNull();
+
+ txn.commit();
+ }
+
+ try(final Transaction txn2 = dbContainer.beginTransaction()) {
+ assertThat(txn2.get(readOptions, k1)).isEqualTo(v1);
+ assertThat(txn2.get(readOptions, k2)).isNull();
+ }
+ }
+ }
+
+ @Test
+ public void getPut_cf() throws RocksDBException {
+ final byte k1[] = "key1".getBytes(UTF_8);
+ final byte v1[] = "value1".getBytes(UTF_8);
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+ assertThat(txn.get(testCf, readOptions, k1)).isNull();
+ txn.put(testCf, k1, v1);
+ assertThat(txn.get(testCf, readOptions, k1)).isEqualTo(v1);
+ }
+ }
+
+ @Test
+ public void getPut() throws RocksDBException {
+ final byte k1[] = "key1".getBytes(UTF_8);
+ final byte v1[] = "value1".getBytes(UTF_8);
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ assertThat(txn.get(readOptions, k1)).isNull();
+ txn.put(k1, v1);
+ assertThat(txn.get(readOptions, k1)).isEqualTo(v1);
+ }
+ }
+
+ @Test
+ public void multiGetPut_cf() throws RocksDBException {
+ final byte keys[][] = new byte[][] {
+ "key1".getBytes(UTF_8),
+ "key2".getBytes(UTF_8)};
+ final byte values[][] = new byte[][] {
+ "value1".getBytes(UTF_8),
+ "value2".getBytes(UTF_8)};
+
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+ final List<ColumnFamilyHandle> cfList = Arrays.asList(testCf, testCf);
+
+ assertThat(txn.multiGet(readOptions, cfList, keys)).isEqualTo(new byte[][] { null, null });
+
+ txn.put(testCf, keys[0], values[0]);
+ txn.put(testCf, keys[1], values[1]);
+ assertThat(txn.multiGet(readOptions, cfList, keys)).isEqualTo(values);
+ }
+ }
+
+ @Test
+ public void multiGetPut() throws RocksDBException {
+ final byte keys[][] = new byte[][] {
+ "key1".getBytes(UTF_8),
+ "key2".getBytes(UTF_8)};
+ final byte values[][] = new byte[][] {
+ "value1".getBytes(UTF_8),
+ "value2".getBytes(UTF_8)};
+
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+
+ assertThat(txn.multiGet(readOptions, keys)).isEqualTo(new byte[][] { null, null });
+
+ txn.put(keys[0], values[0]);
+ txn.put(keys[1], values[1]);
+ assertThat(txn.multiGet(readOptions, keys)).isEqualTo(values);
+ }
+ }
+
+ @Test
+ public void getForUpdate_cf() throws RocksDBException {
+ final byte k1[] = "key1".getBytes(UTF_8);
+ final byte v1[] = "value1".getBytes(UTF_8);
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+ assertThat(txn.getForUpdate(readOptions, testCf, k1, true)).isNull();
+ txn.put(testCf, k1, v1);
+ assertThat(txn.getForUpdate(readOptions, testCf, k1, true)).isEqualTo(v1);
+ }
+ }
+
+ @Test
+ public void getForUpdate() throws RocksDBException {
+ final byte k1[] = "key1".getBytes(UTF_8);
+ final byte v1[] = "value1".getBytes(UTF_8);
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ assertThat(txn.getForUpdate(readOptions, k1, true)).isNull();
+ txn.put(k1, v1);
+ assertThat(txn.getForUpdate(readOptions, k1, true)).isEqualTo(v1);
+ }
+ }
+
+ @Test
+ public void multiGetForUpdate_cf() throws RocksDBException {
+ final byte keys[][] = new byte[][] {
+ "key1".getBytes(UTF_8),
+ "key2".getBytes(UTF_8)};
+ final byte values[][] = new byte[][] {
+ "value1".getBytes(UTF_8),
+ "value2".getBytes(UTF_8)};
+
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+ final List<ColumnFamilyHandle> cfList = Arrays.asList(testCf, testCf);
+
+ assertThat(txn.multiGetForUpdate(readOptions, cfList, keys))
+ .isEqualTo(new byte[][] { null, null });
+
+ txn.put(testCf, keys[0], values[0]);
+ txn.put(testCf, keys[1], values[1]);
+ assertThat(txn.multiGetForUpdate(readOptions, cfList, keys))
+ .isEqualTo(values);
+ }
+ }
+
+ @Test
+ public void multiGetForUpdate() throws RocksDBException {
+ final byte keys[][] = new byte[][]{
+ "key1".getBytes(UTF_8),
+ "key2".getBytes(UTF_8)};
+ final byte values[][] = new byte[][]{
+ "value1".getBytes(UTF_8),
+ "value2".getBytes(UTF_8)};
+
+ try (final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ assertThat(txn.multiGetForUpdate(readOptions, keys)).isEqualTo(new byte[][]{null, null});
+
+ txn.put(keys[0], values[0]);
+ txn.put(keys[1], values[1]);
+ assertThat(txn.multiGetForUpdate(readOptions, keys)).isEqualTo(values);
+ }
+ }
+
+ @Test
+ public void getIterator() throws RocksDBException {
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+
+ final byte[] k1 = "key1".getBytes(UTF_8);
+ final byte[] v1 = "value1".getBytes(UTF_8);
+
+ txn.put(k1, v1);
+
+ try(final RocksIterator iterator = txn.getIterator(readOptions)) {
+ iterator.seek(k1);
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo(k1);
+ assertThat(iterator.value()).isEqualTo(v1);
+ }
+ }
+ }
+
+ @Test
+ public void getIterator_cf() throws RocksDBException {
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+
+ final byte[] k1 = "key1".getBytes(UTF_8);
+ final byte[] v1 = "value1".getBytes(UTF_8);
+
+ txn.put(testCf, k1, v1);
+
+ try(final RocksIterator iterator = txn.getIterator(readOptions, testCf)) {
+ iterator.seek(k1);
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo(k1);
+ assertThat(iterator.value()).isEqualTo(v1);
+ }
+ }
+ }
+
+ @Test
+ public void merge_cf() throws RocksDBException {
+ final byte[] k1 = "key1".getBytes(UTF_8);
+ final byte[] v1 = "value1".getBytes(UTF_8);
+
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+ txn.merge(testCf, k1, v1);
+ }
+ }
+
+ @Test
+ public void merge() throws RocksDBException {
+ final byte[] k1 = "key1".getBytes(UTF_8);
+ final byte[] v1 = "value1".getBytes(UTF_8);
+
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ txn.merge(k1, v1);
+ }
+ }
+
+
+ @Test
+ public void delete_cf() throws RocksDBException {
+ final byte[] k1 = "key1".getBytes(UTF_8);
+ final byte[] v1 = "value1".getBytes(UTF_8);
+
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+ txn.put(testCf, k1, v1);
+ assertThat(txn.get(testCf, readOptions, k1)).isEqualTo(v1);
+
+ txn.delete(testCf, k1);
+ assertThat(txn.get(testCf, readOptions, k1)).isNull();
+ }
+ }
+
+ @Test
+ public void delete() throws RocksDBException {
+ final byte[] k1 = "key1".getBytes(UTF_8);
+ final byte[] v1 = "value1".getBytes(UTF_8);
+
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ txn.put(k1, v1);
+ assertThat(txn.get(readOptions, k1)).isEqualTo(v1);
+
+ txn.delete(k1);
+ assertThat(txn.get(readOptions, k1)).isNull();
+ }
+ }
+
+ @Test
+ public void delete_parts_cf() throws RocksDBException {
+ final byte keyParts[][] = new byte[][] {
+ "ke".getBytes(UTF_8),
+ "y1".getBytes(UTF_8)};
+ final byte valueParts[][] = new byte[][] {
+ "val".getBytes(UTF_8),
+ "ue1".getBytes(UTF_8)};
+ final byte[] key = concat(keyParts);
+ final byte[] value = concat(valueParts);
+
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+ txn.put(testCf, keyParts, valueParts);
+ assertThat(txn.get(testCf, readOptions, key)).isEqualTo(value);
+
+ txn.delete(testCf, keyParts);
+
+ assertThat(txn.get(testCf, readOptions, key))
+ .isNull();
+ }
+ }
+
+ @Test
+ public void delete_parts() throws RocksDBException {
+ final byte keyParts[][] = new byte[][] {
+ "ke".getBytes(UTF_8),
+ "y1".getBytes(UTF_8)};
+ final byte valueParts[][] = new byte[][] {
+ "val".getBytes(UTF_8),
+ "ue1".getBytes(UTF_8)};
+ final byte[] key = concat(keyParts);
+ final byte[] value = concat(valueParts);
+
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+
+ txn.put(keyParts, valueParts);
+
+ assertThat(txn.get(readOptions, key)).isEqualTo(value);
+
+ txn.delete(keyParts);
+
+ assertThat(txn.get(readOptions, key)).isNull();
+ }
+ }
+
+ @Test
+ public void getPutUntracked_cf() throws RocksDBException {
+ final byte k1[] = "key1".getBytes(UTF_8);
+ final byte v1[] = "value1".getBytes(UTF_8);
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+ assertThat(txn.get(testCf, readOptions, k1)).isNull();
+ txn.putUntracked(testCf, k1, v1);
+ assertThat(txn.get(testCf, readOptions, k1)).isEqualTo(v1);
+ }
+ }
+
+ @Test
+ public void getPutUntracked() throws RocksDBException {
+ final byte k1[] = "key1".getBytes(UTF_8);
+ final byte v1[] = "value1".getBytes(UTF_8);
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ assertThat(txn.get(readOptions, k1)).isNull();
+ txn.putUntracked(k1, v1);
+ assertThat(txn.get(readOptions, k1)).isEqualTo(v1);
+ }
+ }
+
+ @Test
+ public void multiGetPutUntracked_cf() throws RocksDBException {
+ final byte keys[][] = new byte[][] {
+ "key1".getBytes(UTF_8),
+ "key2".getBytes(UTF_8)};
+ final byte values[][] = new byte[][] {
+ "value1".getBytes(UTF_8),
+ "value2".getBytes(UTF_8)};
+
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+
+ final List<ColumnFamilyHandle> cfList = Arrays.asList(testCf, testCf);
+
+ assertThat(txn.multiGet(readOptions, cfList, keys)).isEqualTo(new byte[][] { null, null });
+ txn.putUntracked(testCf, keys[0], values[0]);
+ txn.putUntracked(testCf, keys[1], values[1]);
+ assertThat(txn.multiGet(readOptions, cfList, keys)).isEqualTo(values);
+ }
+ }
+
+ @Test
+ public void multiGetPutUntracked() throws RocksDBException {
+ final byte keys[][] = new byte[][] {
+ "key1".getBytes(UTF_8),
+ "key2".getBytes(UTF_8)};
+ final byte values[][] = new byte[][] {
+ "value1".getBytes(UTF_8),
+ "value2".getBytes(UTF_8)};
+
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+
+ assertThat(txn.multiGet(readOptions, keys)).isEqualTo(new byte[][] { null, null });
+ txn.putUntracked(keys[0], values[0]);
+ txn.putUntracked(keys[1], values[1]);
+ assertThat(txn.multiGet(readOptions, keys)).isEqualTo(values);
+ }
+ }
+
+ @Test
+ public void mergeUntracked_cf() throws RocksDBException {
+ final byte[] k1 = "key1".getBytes(UTF_8);
+ final byte[] v1 = "value1".getBytes(UTF_8);
+
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+ txn.mergeUntracked(testCf, k1, v1);
+ }
+ }
+
+ @Test
+ public void mergeUntracked() throws RocksDBException {
+ final byte[] k1 = "key1".getBytes(UTF_8);
+ final byte[] v1 = "value1".getBytes(UTF_8);
+
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ txn.mergeUntracked(k1, v1);
+ }
+ }
+
+ @Test
+ public void deleteUntracked_cf() throws RocksDBException {
+ final byte[] k1 = "key1".getBytes(UTF_8);
+ final byte[] v1 = "value1".getBytes(UTF_8);
+
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+ txn.put(testCf, k1, v1);
+ assertThat(txn.get(testCf, readOptions, k1)).isEqualTo(v1);
+
+ txn.deleteUntracked(testCf, k1);
+ assertThat(txn.get(testCf, readOptions, k1)).isNull();
+ }
+ }
+
+ @Test
+ public void deleteUntracked() throws RocksDBException {
+ final byte[] k1 = "key1".getBytes(UTF_8);
+ final byte[] v1 = "value1".getBytes(UTF_8);
+
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ txn.put(k1, v1);
+ assertThat(txn.get(readOptions, k1)).isEqualTo(v1);
+
+ txn.deleteUntracked(k1);
+ assertThat(txn.get(readOptions, k1)).isNull();
+ }
+ }
+
+ @Test
+ public void deleteUntracked_parts_cf() throws RocksDBException {
+ final byte keyParts[][] = new byte[][] {
+ "ke".getBytes(UTF_8),
+ "y1".getBytes(UTF_8)};
+ final byte valueParts[][] = new byte[][] {
+ "val".getBytes(UTF_8),
+ "ue1".getBytes(UTF_8)};
+ final byte[] key = concat(keyParts);
+ final byte[] value = concat(valueParts);
+
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+ txn.put(testCf, keyParts, valueParts);
+ assertThat(txn.get(testCf, readOptions, key)).isEqualTo(value);
+
+ txn.deleteUntracked(testCf, keyParts);
+ assertThat(txn.get(testCf, readOptions, key)).isNull();
+ }
+ }
+
+ @Test
+ public void deleteUntracked_parts() throws RocksDBException {
+ final byte keyParts[][] = new byte[][] {
+ "ke".getBytes(UTF_8),
+ "y1".getBytes(UTF_8)};
+ final byte valueParts[][] = new byte[][] {
+ "val".getBytes(UTF_8),
+ "ue1".getBytes(UTF_8)};
+ final byte[] key = concat(keyParts);
+ final byte[] value = concat(valueParts);
+
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ txn.put(keyParts, valueParts);
+ assertThat(txn.get(readOptions, key)).isEqualTo(value);
+
+ txn.deleteUntracked(keyParts);
+ assertThat(txn.get(readOptions, key)).isNull();
+ }
+ }
+
+ @Test
+ public void putLogData() throws RocksDBException {
+ final byte[] blob = "blobby".getBytes(UTF_8);
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ txn.putLogData(blob);
+ }
+ }
+
+ @Test
+ public void enabledDisableIndexing() throws RocksDBException {
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ txn.disableIndexing();
+ txn.enableIndexing();
+ txn.disableIndexing();
+ txn.enableIndexing();
+ }
+ }
+
+ @Test
+ public void numKeys() throws RocksDBException {
+ final byte k1[] = "key1".getBytes(UTF_8);
+ final byte v1[] = "value1".getBytes(UTF_8);
+ final byte k2[] = "key2".getBytes(UTF_8);
+ final byte v2[] = "value2".getBytes(UTF_8);
+ final byte k3[] = "key3".getBytes(UTF_8);
+ final byte v3[] = "value3".getBytes(UTF_8);
+
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+ txn.put(k1, v1);
+ txn.put(testCf, k2, v2);
+ txn.merge(k3, v3);
+ txn.delete(testCf, k2);
+
+ assertThat(txn.getNumKeys()).isEqualTo(3);
+ assertThat(txn.getNumPuts()).isEqualTo(2);
+ assertThat(txn.getNumMerges()).isEqualTo(1);
+ assertThat(txn.getNumDeletes()).isEqualTo(1);
+ }
+ }
+
+ @Test
+ public void elapsedTime() throws RocksDBException, InterruptedException {
+ final long preStartTxnTime = System.currentTimeMillis();
+ try (final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ Thread.sleep(2);
+
+ final long txnElapsedTime = txn.getElapsedTime();
+ assertThat(txnElapsedTime).isLessThan(System.currentTimeMillis() - preStartTxnTime);
+ assertThat(txnElapsedTime).isGreaterThan(0);
+ }
+ }
+
+ @Test
+ public void getWriteBatch() throws RocksDBException {
+ final byte k1[] = "key1".getBytes(UTF_8);
+ final byte v1[] = "value1".getBytes(UTF_8);
+
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+
+ txn.put(k1, v1);
+
+ final WriteBatchWithIndex writeBatch = txn.getWriteBatch();
+ assertThat(writeBatch).isNotNull();
+ assertThat(writeBatch.isOwningHandle()).isFalse();
+ assertThat(writeBatch.count()).isEqualTo(1);
+ }
+ }
+
+ @Test
+ public void setLockTimeout() throws RocksDBException {
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ txn.setLockTimeout(1000);
+ }
+ }
+
+ @Test
+ public void writeOptions() throws RocksDBException {
+ final byte k1[] = "key1".getBytes(UTF_8);
+ final byte v1[] = "value1".getBytes(UTF_8);
+
+ try(final DBContainer dbContainer = startDb();
+ final WriteOptions writeOptions = new WriteOptions()
+ .setDisableWAL(true)
+ .setSync(true);
+ final Transaction txn = dbContainer.beginTransaction(writeOptions)) {
+
+ txn.put(k1, v1);
+
+ WriteOptions txnWriteOptions = txn.getWriteOptions();
+ assertThat(txnWriteOptions).isNotNull();
+ assertThat(txnWriteOptions.isOwningHandle()).isFalse();
+ assertThat(txnWriteOptions).isNotSameAs(writeOptions);
+ assertThat(txnWriteOptions.disableWAL()).isTrue();
+ assertThat(txnWriteOptions.sync()).isTrue();
+
+ txn.setWriteOptions(txnWriteOptions.setSync(false));
+ txnWriteOptions = txn.getWriteOptions();
+ assertThat(txnWriteOptions).isNotNull();
+ assertThat(txnWriteOptions.isOwningHandle()).isFalse();
+ assertThat(txnWriteOptions).isNotSameAs(writeOptions);
+ assertThat(txnWriteOptions.disableWAL()).isTrue();
+ assertThat(txnWriteOptions.sync()).isFalse();
+ }
+ }
+
+ @Test
+ public void undoGetForUpdate_cf() throws RocksDBException {
+ final byte k1[] = "key1".getBytes(UTF_8);
+ final byte v1[] = "value1".getBytes(UTF_8);
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+ assertThat(txn.getForUpdate(readOptions, testCf, k1, true)).isNull();
+ txn.put(testCf, k1, v1);
+ assertThat(txn.getForUpdate(readOptions, testCf, k1, true)).isEqualTo(v1);
+ txn.undoGetForUpdate(testCf, k1);
+ }
+ }
+
+ @Test
+ public void undoGetForUpdate() throws RocksDBException {
+ final byte k1[] = "key1".getBytes(UTF_8);
+ final byte v1[] = "value1".getBytes(UTF_8);
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ assertThat(txn.getForUpdate(readOptions, k1, true)).isNull();
+ txn.put(k1, v1);
+ assertThat(txn.getForUpdate(readOptions, k1, true)).isEqualTo(v1);
+ txn.undoGetForUpdate(k1);
+ }
+ }
+
+ @Test
+ public void rebuildFromWriteBatch() throws RocksDBException {
+ final byte k1[] = "key1".getBytes(UTF_8);
+ final byte v1[] = "value1".getBytes(UTF_8);
+ final byte k2[] = "key2".getBytes(UTF_8);
+ final byte v2[] = "value2".getBytes(UTF_8);
+ final byte k3[] = "key3".getBytes(UTF_8);
+ final byte v3[] = "value3".getBytes(UTF_8);
+
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions();
+ final Transaction txn = dbContainer.beginTransaction()) {
+
+ txn.put(k1, v1);
+
+ assertThat(txn.get(readOptions, k1)).isEqualTo(v1);
+ assertThat(txn.getNumKeys()).isEqualTo(1);
+
+ try(final WriteBatch writeBatch = new WriteBatch()) {
+ writeBatch.put(k2, v2);
+ writeBatch.put(k3, v3);
+ txn.rebuildFromWriteBatch(writeBatch);
+
+ assertThat(txn.get(readOptions, k1)).isEqualTo(v1);
+ assertThat(txn.get(readOptions, k2)).isEqualTo(v2);
+ assertThat(txn.get(readOptions, k3)).isEqualTo(v3);
+ assertThat(txn.getNumKeys()).isEqualTo(3);
+ }
+ }
+ }
+
+ @Test
+ public void getCommitTimeWriteBatch() throws RocksDBException {
+ final byte k1[] = "key1".getBytes(UTF_8);
+ final byte v1[] = "value1".getBytes(UTF_8);
+
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+
+ txn.put(k1, v1);
+ final WriteBatch writeBatch = txn.getCommitTimeWriteBatch();
+
+ assertThat(writeBatch).isNotNull();
+ assertThat(writeBatch.isOwningHandle()).isFalse();
+ assertThat(writeBatch.count()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void logNumber() throws RocksDBException {
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ assertThat(txn.getLogNumber()).isEqualTo(0);
+ final long logNumber = rand.nextLong();
+ txn.setLogNumber(logNumber);
+ assertThat(txn.getLogNumber()).isEqualTo(logNumber);
+ }
+ }
+
+ private static byte[] concat(final byte[][] bufs) {
+ int resultLength = 0;
+ for(final byte[] buf : bufs) {
+ resultLength += buf.length;
+ }
+
+ final byte[] result = new byte[resultLength];
+ int resultOffset = 0;
+ for(final byte[] buf : bufs) {
+ final int srcLength = buf.length;
+ System.arraycopy(buf, 0, result, resultOffset, srcLength);
+ resultOffset += srcLength;
+ }
+
+ return result;
+ }
+
+ private static class TestTransactionNotifier
+ extends AbstractTransactionNotifier {
+ private final List<Snapshot> createdSnapshots = new ArrayList<>();
+
+ @Override
+ public void snapshotCreated(final Snapshot newSnapshot) {
+ createdSnapshots.add(newSnapshot);
+ }
+
+ public List<Snapshot> getCreatedSnapshots() {
+ return createdSnapshots;
+ }
+ }
+
+ protected static abstract class DBContainer
+ implements AutoCloseable {
+ protected final WriteOptions writeOptions;
+ protected final List<ColumnFamilyHandle> columnFamilyHandles;
+ protected final ColumnFamilyOptions columnFamilyOptions;
+ protected final DBOptions options;
+
+ public DBContainer(final WriteOptions writeOptions,
+ final List<ColumnFamilyHandle> columnFamilyHandles,
+ final ColumnFamilyOptions columnFamilyOptions,
+ final DBOptions options) {
+ this.writeOptions = writeOptions;
+ this.columnFamilyHandles = columnFamilyHandles;
+ this.columnFamilyOptions = columnFamilyOptions;
+ this.options = options;
+ }
+
+ public abstract Transaction beginTransaction();
+
+ public abstract Transaction beginTransaction(
+ final WriteOptions writeOptions);
+
+ public ColumnFamilyHandle getTestColumnFamily() {
+ return columnFamilyHandles.get(1);
+ }
+
+ @Override
+ public abstract void close();
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/BackupEngineTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/BackupEngineTest.java
new file mode 100644
index 000000000..de8017a91
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/BackupEngineTest.java
@@ -0,0 +1,261 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.List;
+import java.util.concurrent.ThreadLocalRandom;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class BackupEngineTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Rule
+ public TemporaryFolder backupFolder = new TemporaryFolder();
+
+ @Test
+ public void backupDb() throws RocksDBException {
+ // Open empty database.
+ try(final Options opt = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+
+ // Fill database with some test values
+ prepareDatabase(db);
+
+ // Create two backups
+ try(final BackupableDBOptions bopt = new BackupableDBOptions(
+ backupFolder.getRoot().getAbsolutePath());
+ final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
+ be.createNewBackup(db, false);
+ be.createNewBackup(db, true);
+ verifyNumberOfValidBackups(be, 2);
+ }
+ }
+ }
+
+ @Test
+ public void deleteBackup() throws RocksDBException {
+ // Open empty database.
+ try(final Options opt = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+ // Fill database with some test values
+ prepareDatabase(db);
+ // Create two backups
+ try(final BackupableDBOptions bopt = new BackupableDBOptions(
+ backupFolder.getRoot().getAbsolutePath());
+ final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
+ be.createNewBackup(db, false);
+ be.createNewBackup(db, true);
+ final List<BackupInfo> backupInfo =
+ verifyNumberOfValidBackups(be, 2);
+ // Delete the first backup
+ be.deleteBackup(backupInfo.get(0).backupId());
+ final List<BackupInfo> newBackupInfo =
+ verifyNumberOfValidBackups(be, 1);
+
+ // The second backup must remain.
+ assertThat(newBackupInfo.get(0).backupId()).
+ isEqualTo(backupInfo.get(1).backupId());
+ }
+ }
+ }
+
+ @Test
+ public void purgeOldBackups() throws RocksDBException {
+ // Open empty database.
+ try(final Options opt = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+ // Fill database with some test values
+ prepareDatabase(db);
+ // Create four backups
+ try(final BackupableDBOptions bopt = new BackupableDBOptions(
+ backupFolder.getRoot().getAbsolutePath());
+ final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
+ be.createNewBackup(db, false);
+ be.createNewBackup(db, true);
+ be.createNewBackup(db, true);
+ be.createNewBackup(db, true);
+ final List<BackupInfo> backupInfo =
+ verifyNumberOfValidBackups(be, 4);
+ // Delete everything except the latest backup
+ be.purgeOldBackups(1);
+ final List<BackupInfo> newBackupInfo =
+ verifyNumberOfValidBackups(be, 1);
+ // The latest backup must remain.
+ assertThat(newBackupInfo.get(0).backupId()).
+ isEqualTo(backupInfo.get(3).backupId());
+ }
+ }
+ }
+
+ @Test
+ public void restoreLatestBackup() throws RocksDBException {
+ try(final Options opt = new Options().setCreateIfMissing(true)) {
+ // Open empty database.
+ RocksDB db = null;
+ try {
+ db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath());
+ // Fill database with some test values
+ prepareDatabase(db);
+
+ try (final BackupableDBOptions bopt = new BackupableDBOptions(
+ backupFolder.getRoot().getAbsolutePath());
+ final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
+ be.createNewBackup(db, true);
+ verifyNumberOfValidBackups(be, 1);
+ db.put("key1".getBytes(), "valueV2".getBytes());
+ db.put("key2".getBytes(), "valueV2".getBytes());
+ be.createNewBackup(db, true);
+ verifyNumberOfValidBackups(be, 2);
+ db.put("key1".getBytes(), "valueV3".getBytes());
+ db.put("key2".getBytes(), "valueV3".getBytes());
+ assertThat(new String(db.get("key1".getBytes()))).endsWith("V3");
+ assertThat(new String(db.get("key2".getBytes()))).endsWith("V3");
+
+ db.close();
+ db = null;
+
+ verifyNumberOfValidBackups(be, 2);
+ // restore db from latest backup
+ try(final RestoreOptions ropts = new RestoreOptions(false)) {
+ be.restoreDbFromLatestBackup(dbFolder.getRoot().getAbsolutePath(),
+ dbFolder.getRoot().getAbsolutePath(), ropts);
+ }
+
+ // Open database again.
+ db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath());
+
+ // Values must have suffix V2 because of restoring latest backup.
+ assertThat(new String(db.get("key1".getBytes()))).endsWith("V2");
+ assertThat(new String(db.get("key2".getBytes()))).endsWith("V2");
+ }
+ } finally {
+ if(db != null) {
+ db.close();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void restoreFromBackup()
+ throws RocksDBException {
+ try(final Options opt = new Options().setCreateIfMissing(true)) {
+ RocksDB db = null;
+ try {
+ // Open empty database.
+ db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath());
+ // Fill database with some test values
+ prepareDatabase(db);
+ try (final BackupableDBOptions bopt = new BackupableDBOptions(
+ backupFolder.getRoot().getAbsolutePath());
+ final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
+ be.createNewBackup(db, true);
+ verifyNumberOfValidBackups(be, 1);
+ db.put("key1".getBytes(), "valueV2".getBytes());
+ db.put("key2".getBytes(), "valueV2".getBytes());
+ be.createNewBackup(db, true);
+ verifyNumberOfValidBackups(be, 2);
+ db.put("key1".getBytes(), "valueV3".getBytes());
+ db.put("key2".getBytes(), "valueV3".getBytes());
+ assertThat(new String(db.get("key1".getBytes()))).endsWith("V3");
+ assertThat(new String(db.get("key2".getBytes()))).endsWith("V3");
+
+ //close the database
+ db.close();
+ db = null;
+
+ //restore the backup
+ final List<BackupInfo> backupInfo = verifyNumberOfValidBackups(be, 2);
+ // restore db from first backup
+ be.restoreDbFromBackup(backupInfo.get(0).backupId(),
+ dbFolder.getRoot().getAbsolutePath(),
+ dbFolder.getRoot().getAbsolutePath(),
+ new RestoreOptions(false));
+ // Open database again.
+ db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath());
+ // Values must have suffix V2 because of restoring latest backup.
+ assertThat(new String(db.get("key1".getBytes()))).endsWith("V1");
+ assertThat(new String(db.get("key2".getBytes()))).endsWith("V1");
+ }
+ } finally {
+ if(db != null) {
+ db.close();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void backupDbWithMetadata() throws RocksDBException {
+ // Open empty database.
+ try (final Options opt = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) {
+ // Fill database with some test values
+ prepareDatabase(db);
+
+ // Create two backups
+ try (final BackupableDBOptions bopt =
+ new BackupableDBOptions(backupFolder.getRoot().getAbsolutePath());
+ final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
+ final String metadata = String.valueOf(ThreadLocalRandom.current().nextInt());
+ be.createNewBackupWithMetadata(db, metadata, true);
+ final List<BackupInfo> backupInfoList = verifyNumberOfValidBackups(be, 1);
+ assertThat(backupInfoList.get(0).appMetadata()).isEqualTo(metadata);
+ }
+ }
+ }
+
+ /**
+ * Verify backups.
+ *
+ * @param be {@link BackupEngine} instance.
+ * @param expectedNumberOfBackups numerical value
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ private List<BackupInfo> verifyNumberOfValidBackups(final BackupEngine be,
+ final int expectedNumberOfBackups) throws RocksDBException {
+ // Verify that backups exist
+ assertThat(be.getCorruptedBackups().length).
+ isEqualTo(0);
+ be.garbageCollect();
+ final List<BackupInfo> backupInfo = be.getBackupInfo();
+ assertThat(backupInfo.size()).
+ isEqualTo(expectedNumberOfBackups);
+ return backupInfo;
+ }
+
+ /**
+ * Fill database with some test values.
+ *
+ * @param db {@link RocksDB} instance.
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ private void prepareDatabase(final RocksDB db)
+ throws RocksDBException {
+ db.put("key1".getBytes(), "valueV1".getBytes());
+ db.put("key2".getBytes(), "valueV1".getBytes());
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java
new file mode 100644
index 000000000..a216ba574
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java
@@ -0,0 +1,351 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.util.Random;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+public class BackupableDBOptionsTest {
+
+ private final static String ARBITRARY_PATH =
+ System.getProperty("java.io.tmpdir");
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public ExpectedException exception = ExpectedException.none();
+
+ public static final Random rand = PlatformRandomHelper.
+ getPlatformSpecificRandomFactory();
+
+ @Test
+ public void backupDir() {
+ try (final BackupableDBOptions backupableDBOptions =
+ new BackupableDBOptions(ARBITRARY_PATH)) {
+ assertThat(backupableDBOptions.backupDir()).
+ isEqualTo(ARBITRARY_PATH);
+ }
+ }
+
+ @Test
+ public void env() {
+ try (final BackupableDBOptions backupableDBOptions =
+ new BackupableDBOptions(ARBITRARY_PATH)) {
+ assertThat(backupableDBOptions.backupEnv()).
+ isNull();
+
+ try(final Env env = new RocksMemEnv(Env.getDefault())) {
+ backupableDBOptions.setBackupEnv(env);
+ assertThat(backupableDBOptions.backupEnv())
+ .isEqualTo(env);
+ }
+ }
+ }
+
+ @Test
+ public void shareTableFiles() {
+ try (final BackupableDBOptions backupableDBOptions =
+ new BackupableDBOptions(ARBITRARY_PATH)) {
+ final boolean value = rand.nextBoolean();
+ backupableDBOptions.setShareTableFiles(value);
+ assertThat(backupableDBOptions.shareTableFiles()).
+ isEqualTo(value);
+ }
+ }
+
+ @Test
+ public void infoLog() {
+ try (final BackupableDBOptions backupableDBOptions =
+ new BackupableDBOptions(ARBITRARY_PATH)) {
+ assertThat(backupableDBOptions.infoLog()).
+ isNull();
+
+ try(final Options options = new Options();
+ final Logger logger = new Logger(options){
+ @Override
+ protected void log(InfoLogLevel infoLogLevel, String logMsg) {
+
+ }
+ }) {
+ backupableDBOptions.setInfoLog(logger);
+ assertThat(backupableDBOptions.infoLog())
+ .isEqualTo(logger);
+ }
+ }
+ }
+
+ @Test
+ public void sync() {
+ try (final BackupableDBOptions backupableDBOptions =
+ new BackupableDBOptions(ARBITRARY_PATH)) {
+ final boolean value = rand.nextBoolean();
+ backupableDBOptions.setSync(value);
+ assertThat(backupableDBOptions.sync()).isEqualTo(value);
+ }
+ }
+
+ @Test
+ public void destroyOldData() {
+ try (final BackupableDBOptions backupableDBOptions =
+ new BackupableDBOptions(ARBITRARY_PATH);) {
+ final boolean value = rand.nextBoolean();
+ backupableDBOptions.setDestroyOldData(value);
+ assertThat(backupableDBOptions.destroyOldData()).
+ isEqualTo(value);
+ }
+ }
+
+ @Test
+ public void backupLogFiles() {
+ try (final BackupableDBOptions backupableDBOptions =
+ new BackupableDBOptions(ARBITRARY_PATH)) {
+ final boolean value = rand.nextBoolean();
+ backupableDBOptions.setBackupLogFiles(value);
+ assertThat(backupableDBOptions.backupLogFiles()).
+ isEqualTo(value);
+ }
+ }
+
+ @Test
+ public void backupRateLimit() {
+ try (final BackupableDBOptions backupableDBOptions =
+ new BackupableDBOptions(ARBITRARY_PATH)) {
+ final long value = Math.abs(rand.nextLong());
+ backupableDBOptions.setBackupRateLimit(value);
+ assertThat(backupableDBOptions.backupRateLimit()).
+ isEqualTo(value);
+ // negative will be mapped to 0
+ backupableDBOptions.setBackupRateLimit(-1);
+ assertThat(backupableDBOptions.backupRateLimit()).
+ isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void backupRateLimiter() {
+ try (final BackupableDBOptions backupableDBOptions =
+ new BackupableDBOptions(ARBITRARY_PATH)) {
+ assertThat(backupableDBOptions.backupEnv()).
+ isNull();
+
+ try(final RateLimiter backupRateLimiter =
+ new RateLimiter(999)) {
+ backupableDBOptions.setBackupRateLimiter(backupRateLimiter);
+ assertThat(backupableDBOptions.backupRateLimiter())
+ .isEqualTo(backupRateLimiter);
+ }
+ }
+ }
+
+ @Test
+ public void restoreRateLimit() {
+ try (final BackupableDBOptions backupableDBOptions =
+ new BackupableDBOptions(ARBITRARY_PATH)) {
+ final long value = Math.abs(rand.nextLong());
+ backupableDBOptions.setRestoreRateLimit(value);
+ assertThat(backupableDBOptions.restoreRateLimit()).
+ isEqualTo(value);
+ // negative will be mapped to 0
+ backupableDBOptions.setRestoreRateLimit(-1);
+ assertThat(backupableDBOptions.restoreRateLimit()).
+ isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void restoreRateLimiter() {
+ try (final BackupableDBOptions backupableDBOptions =
+ new BackupableDBOptions(ARBITRARY_PATH)) {
+ assertThat(backupableDBOptions.backupEnv()).
+ isNull();
+
+ try(final RateLimiter restoreRateLimiter =
+ new RateLimiter(911)) {
+ backupableDBOptions.setRestoreRateLimiter(restoreRateLimiter);
+ assertThat(backupableDBOptions.restoreRateLimiter())
+ .isEqualTo(restoreRateLimiter);
+ }
+ }
+ }
+
+ @Test
+ public void shareFilesWithChecksum() {
+ try (final BackupableDBOptions backupableDBOptions =
+ new BackupableDBOptions(ARBITRARY_PATH)) {
+ boolean value = rand.nextBoolean();
+ backupableDBOptions.setShareFilesWithChecksum(value);
+ assertThat(backupableDBOptions.shareFilesWithChecksum()).
+ isEqualTo(value);
+ }
+ }
+
+ @Test
+ public void maxBackgroundOperations() {
+ try (final BackupableDBOptions backupableDBOptions =
+ new BackupableDBOptions(ARBITRARY_PATH)) {
+ final int value = rand.nextInt();
+ backupableDBOptions.setMaxBackgroundOperations(value);
+ assertThat(backupableDBOptions.maxBackgroundOperations()).
+ isEqualTo(value);
+ }
+ }
+
+ @Test
+ public void callbackTriggerIntervalSize() {
+ try (final BackupableDBOptions backupableDBOptions =
+ new BackupableDBOptions(ARBITRARY_PATH)) {
+ final long value = rand.nextLong();
+ backupableDBOptions.setCallbackTriggerIntervalSize(value);
+ assertThat(backupableDBOptions.callbackTriggerIntervalSize()).
+ isEqualTo(value);
+ }
+ }
+
+ @Test
+ public void failBackupDirIsNull() {
+ exception.expect(IllegalArgumentException.class);
+ try (final BackupableDBOptions opts = new BackupableDBOptions(null)) {
+ //no-op
+ }
+ }
+
+ @Test
+ public void failBackupDirIfDisposed() {
+ try (final BackupableDBOptions options =
+ setupUninitializedBackupableDBOptions(exception)) {
+ options.backupDir();
+ }
+ }
+
+ @Test
+ public void failSetShareTableFilesIfDisposed() {
+ try (final BackupableDBOptions options =
+ setupUninitializedBackupableDBOptions(exception)) {
+ options.setShareTableFiles(true);
+ }
+ }
+
+ @Test
+ public void failShareTableFilesIfDisposed() {
+ try (BackupableDBOptions options =
+ setupUninitializedBackupableDBOptions(exception)) {
+ options.shareTableFiles();
+ }
+ }
+
+ @Test
+ public void failSetSyncIfDisposed() {
+ try (final BackupableDBOptions options =
+ setupUninitializedBackupableDBOptions(exception)) {
+ options.setSync(true);
+ }
+ }
+
+ @Test
+ public void failSyncIfDisposed() {
+ try (final BackupableDBOptions options =
+ setupUninitializedBackupableDBOptions(exception)) {
+ options.sync();
+ }
+ }
+
+ @Test
+ public void failSetDestroyOldDataIfDisposed() {
+ try (final BackupableDBOptions options =
+ setupUninitializedBackupableDBOptions(exception)) {
+ options.setDestroyOldData(true);
+ }
+ }
+
+ @Test
+ public void failDestroyOldDataIfDisposed() {
+ try (final BackupableDBOptions options =
+ setupUninitializedBackupableDBOptions(exception)) {
+ options.destroyOldData();
+ }
+ }
+
+ @Test
+ public void failSetBackupLogFilesIfDisposed() {
+ try (final BackupableDBOptions options =
+ setupUninitializedBackupableDBOptions(exception)) {
+ options.setBackupLogFiles(true);
+ }
+ }
+
+ @Test
+ public void failBackupLogFilesIfDisposed() {
+ try (final BackupableDBOptions options =
+ setupUninitializedBackupableDBOptions(exception)) {
+ options.backupLogFiles();
+ }
+ }
+
+ @Test
+ public void failSetBackupRateLimitIfDisposed() {
+ try (final BackupableDBOptions options =
+ setupUninitializedBackupableDBOptions(exception)) {
+ options.setBackupRateLimit(1);
+ }
+ }
+
+ @Test
+ public void failBackupRateLimitIfDisposed() {
+ try (final BackupableDBOptions options =
+ setupUninitializedBackupableDBOptions(exception)) {
+ options.backupRateLimit();
+ }
+ }
+
+ @Test
+ public void failSetRestoreRateLimitIfDisposed() {
+ try (final BackupableDBOptions options =
+ setupUninitializedBackupableDBOptions(exception)) {
+ options.setRestoreRateLimit(1);
+ }
+ }
+
+ @Test
+ public void failRestoreRateLimitIfDisposed() {
+ try (final BackupableDBOptions options =
+ setupUninitializedBackupableDBOptions(exception)) {
+ options.restoreRateLimit();
+ }
+ }
+
+ @Test
+ public void failSetShareFilesWithChecksumIfDisposed() {
+ try (final BackupableDBOptions options =
+ setupUninitializedBackupableDBOptions(exception)) {
+ options.setShareFilesWithChecksum(true);
+ }
+ }
+
+ @Test
+ public void failShareFilesWithChecksumIfDisposed() {
+ try (final BackupableDBOptions options =
+ setupUninitializedBackupableDBOptions(exception)) {
+ options.shareFilesWithChecksum();
+ }
+ }
+
+ private BackupableDBOptions setupUninitializedBackupableDBOptions(
+ ExpectedException exception) {
+ final BackupableDBOptions backupableDBOptions =
+ new BackupableDBOptions(ARBITRARY_PATH);
+ backupableDBOptions.close();
+ exception.expect(AssertionError.class);
+ return backupableDBOptions;
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java
new file mode 100644
index 000000000..6fdd314cb
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java
@@ -0,0 +1,393 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.nio.charset.StandardCharsets;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class BlockBasedTableConfigTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void cacheIndexAndFilterBlocks() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setCacheIndexAndFilterBlocks(true);
+ assertThat(blockBasedTableConfig.cacheIndexAndFilterBlocks()).
+ isTrue();
+
+ }
+
+ @Test
+ public void cacheIndexAndFilterBlocksWithHighPriority() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setCacheIndexAndFilterBlocksWithHighPriority(true);
+ assertThat(blockBasedTableConfig.cacheIndexAndFilterBlocksWithHighPriority()).
+ isTrue();
+ }
+
+ @Test
+ public void pinL0FilterAndIndexBlocksInCache() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setPinL0FilterAndIndexBlocksInCache(true);
+ assertThat(blockBasedTableConfig.pinL0FilterAndIndexBlocksInCache()).
+ isTrue();
+ }
+
+ @Test
+ public void pinTopLevelIndexAndFilter() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setPinTopLevelIndexAndFilter(false);
+ assertThat(blockBasedTableConfig.pinTopLevelIndexAndFilter()).
+ isFalse();
+ }
+
+ @Test
+ public void indexType() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ assertThat(IndexType.values().length).isEqualTo(3);
+ blockBasedTableConfig.setIndexType(IndexType.kHashSearch);
+ assertThat(blockBasedTableConfig.indexType().equals(
+ IndexType.kHashSearch));
+ assertThat(IndexType.valueOf("kBinarySearch")).isNotNull();
+ blockBasedTableConfig.setIndexType(IndexType.valueOf("kBinarySearch"));
+ assertThat(blockBasedTableConfig.indexType().equals(
+ IndexType.kBinarySearch));
+ }
+
+ @Test
+ public void dataBlockIndexType() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setDataBlockIndexType(DataBlockIndexType.kDataBlockBinaryAndHash);
+ assertThat(blockBasedTableConfig.dataBlockIndexType().equals(
+ DataBlockIndexType.kDataBlockBinaryAndHash));
+ blockBasedTableConfig.setDataBlockIndexType(DataBlockIndexType.kDataBlockBinarySearch);
+ assertThat(blockBasedTableConfig.dataBlockIndexType().equals(
+ DataBlockIndexType.kDataBlockBinarySearch));
+ }
+
+ @Test
+ public void checksumType() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ assertThat(ChecksumType.values().length).isEqualTo(3);
+ assertThat(ChecksumType.valueOf("kxxHash")).
+ isEqualTo(ChecksumType.kxxHash);
+ blockBasedTableConfig.setChecksumType(ChecksumType.kNoChecksum);
+ blockBasedTableConfig.setChecksumType(ChecksumType.kxxHash);
+ assertThat(blockBasedTableConfig.checksumType().equals(
+ ChecksumType.kxxHash));
+ }
+
+ @Test
+ public void noBlockCache() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setNoBlockCache(true);
+ assertThat(blockBasedTableConfig.noBlockCache()).isTrue();
+ }
+
+ @Test
+ public void blockCache() {
+ try (
+ final Cache cache = new LRUCache(17 * 1024 * 1024);
+ final Options options = new Options().setTableFormatConfig(
+ new BlockBasedTableConfig().setBlockCache(cache))) {
+ assertThat(options.tableFactoryName()).isEqualTo("BlockBasedTable");
+ }
+ }
+
+ @Test
+ public void blockCacheIntegration() throws RocksDBException {
+ try (final Cache cache = new LRUCache(8 * 1024 * 1024);
+ final Statistics statistics = new Statistics()) {
+ for (int shard = 0; shard < 8; shard++) {
+ try (final Options options =
+ new Options()
+ .setCreateIfMissing(true)
+ .setStatistics(statistics)
+ .setTableFormatConfig(new BlockBasedTableConfig().setBlockCache(cache));
+ final RocksDB db =
+ RocksDB.open(options, dbFolder.getRoot().getAbsolutePath() + "/" + shard)) {
+ final byte[] key = "some-key".getBytes(StandardCharsets.UTF_8);
+ final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8);
+
+ db.put(key, value);
+ db.flush(new FlushOptions());
+ db.get(key);
+
+ assertThat(statistics.getTickerCount(TickerType.BLOCK_CACHE_ADD)).isEqualTo(shard + 1);
+ }
+ }
+ }
+ }
+
+ @Test
+ public void persistentCache() throws RocksDBException {
+ try (final DBOptions dbOptions = new DBOptions().
+ setInfoLogLevel(InfoLogLevel.INFO_LEVEL).
+ setCreateIfMissing(true);
+ final Logger logger = new Logger(dbOptions) {
+ @Override
+ protected void log(final InfoLogLevel infoLogLevel, final String logMsg) {
+ System.out.println(infoLogLevel.name() + ": " + logMsg);
+ }
+ }) {
+ try (final PersistentCache persistentCache =
+ new PersistentCache(Env.getDefault(), dbFolder.getRoot().getPath(), 1024 * 1024 * 100, logger, false);
+ final Options options = new Options().setTableFormatConfig(
+ new BlockBasedTableConfig().setPersistentCache(persistentCache))) {
+ assertThat(options.tableFactoryName()).isEqualTo("BlockBasedTable");
+ }
+ }
+ }
+
+ @Test
+ public void blockCacheCompressed() {
+ try (final Cache cache = new LRUCache(17 * 1024 * 1024);
+ final Options options = new Options().setTableFormatConfig(
+ new BlockBasedTableConfig().setBlockCacheCompressed(cache))) {
+ assertThat(options.tableFactoryName()).isEqualTo("BlockBasedTable");
+ }
+ }
+
+ @Ignore("See issue: https://github.com/facebook/rocksdb/issues/4822")
+ @Test
+ public void blockCacheCompressedIntegration() throws RocksDBException {
+ final byte[] key1 = "some-key1".getBytes(StandardCharsets.UTF_8);
+ final byte[] key2 = "some-key1".getBytes(StandardCharsets.UTF_8);
+ final byte[] key3 = "some-key1".getBytes(StandardCharsets.UTF_8);
+ final byte[] key4 = "some-key1".getBytes(StandardCharsets.UTF_8);
+ final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8);
+
+ try (final Cache compressedCache = new LRUCache(8 * 1024 * 1024);
+ final Statistics statistics = new Statistics()) {
+
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig()
+ .setNoBlockCache(true)
+ .setBlockCache(null)
+ .setBlockCacheCompressed(compressedCache)
+ .setFormatVersion(4);
+
+ try (final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setStatistics(statistics)
+ .setTableFormatConfig(blockBasedTableConfig)) {
+
+ for (int shard = 0; shard < 8; shard++) {
+ try (final FlushOptions flushOptions = new FlushOptions();
+ final WriteOptions writeOptions = new WriteOptions();
+ final ReadOptions readOptions = new ReadOptions();
+ final RocksDB db =
+ RocksDB.open(options, dbFolder.getRoot().getAbsolutePath() + "/" + shard)) {
+
+ db.put(writeOptions, key1, value);
+ db.put(writeOptions, key2, value);
+ db.put(writeOptions, key3, value);
+ db.put(writeOptions, key4, value);
+ db.flush(flushOptions);
+
+ db.get(readOptions, key1);
+ db.get(readOptions, key2);
+ db.get(readOptions, key3);
+ db.get(readOptions, key4);
+
+ assertThat(statistics.getTickerCount(TickerType.BLOCK_CACHE_COMPRESSED_ADD)).isEqualTo(shard + 1);
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void blockSize() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setBlockSize(10);
+ assertThat(blockBasedTableConfig.blockSize()).isEqualTo(10);
+ }
+
+ @Test
+ public void blockSizeDeviation() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setBlockSizeDeviation(12);
+ assertThat(blockBasedTableConfig.blockSizeDeviation()).
+ isEqualTo(12);
+ }
+
+ @Test
+ public void blockRestartInterval() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setBlockRestartInterval(15);
+ assertThat(blockBasedTableConfig.blockRestartInterval()).
+ isEqualTo(15);
+ }
+
+ @Test
+ public void indexBlockRestartInterval() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setIndexBlockRestartInterval(15);
+ assertThat(blockBasedTableConfig.indexBlockRestartInterval()).
+ isEqualTo(15);
+ }
+
+ @Test
+ public void metadataBlockSize() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setMetadataBlockSize(1024);
+ assertThat(blockBasedTableConfig.metadataBlockSize()).
+ isEqualTo(1024);
+ }
+
+ @Test
+ public void partitionFilters() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setPartitionFilters(true);
+ assertThat(blockBasedTableConfig.partitionFilters()).
+ isTrue();
+ }
+
+ @Test
+ public void useDeltaEncoding() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setUseDeltaEncoding(false);
+ assertThat(blockBasedTableConfig.useDeltaEncoding()).
+ isFalse();
+ }
+
+ @Test
+ public void blockBasedTableWithFilterPolicy() {
+ try(final Options options = new Options()
+ .setTableFormatConfig(new BlockBasedTableConfig()
+ .setFilterPolicy(new BloomFilter(10)))) {
+ assertThat(options.tableFactoryName()).
+ isEqualTo("BlockBasedTable");
+ }
+ }
+
+ @Test
+ public void blockBasedTableWithoutFilterPolicy() {
+ try(final Options options = new Options().setTableFormatConfig(
+ new BlockBasedTableConfig().setFilterPolicy(null))) {
+ assertThat(options.tableFactoryName()).
+ isEqualTo("BlockBasedTable");
+ }
+ }
+
+ @Test
+ public void wholeKeyFiltering() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setWholeKeyFiltering(false);
+ assertThat(blockBasedTableConfig.wholeKeyFiltering()).
+ isFalse();
+ }
+
+ @Test
+ public void verifyCompression() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setVerifyCompression(true);
+ assertThat(blockBasedTableConfig.verifyCompression()).
+ isTrue();
+ }
+
+ @Test
+ public void readAmpBytesPerBit() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setReadAmpBytesPerBit(2);
+ assertThat(blockBasedTableConfig.readAmpBytesPerBit()).
+ isEqualTo(2);
+ }
+
+ @Test
+ public void formatVersion() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ for (int version = 0; version < 5; version++) {
+ blockBasedTableConfig.setFormatVersion(version);
+ assertThat(blockBasedTableConfig.formatVersion()).isEqualTo(version);
+ }
+ }
+
+ @Test(expected = AssertionError.class)
+ public void formatVersionFailNegative() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setFormatVersion(-1);
+ }
+
+ @Test(expected = AssertionError.class)
+ public void formatVersionFailIllegalVersion() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setFormatVersion(99);
+ }
+
+ @Test
+ public void enableIndexCompression() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setEnableIndexCompression(false);
+ assertThat(blockBasedTableConfig.enableIndexCompression()).
+ isFalse();
+ }
+
+ @Test
+ public void blockAlign() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setBlockAlign(true);
+ assertThat(blockBasedTableConfig.blockAlign()).
+ isTrue();
+ }
+
+ @Deprecated
+ @Test
+ public void hashIndexAllowCollision() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setHashIndexAllowCollision(false);
+ assertThat(blockBasedTableConfig.hashIndexAllowCollision()).
+ isTrue(); // NOTE: setHashIndexAllowCollision should do nothing!
+ }
+
+ @Deprecated
+ @Test
+ public void blockCacheSize() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setBlockCacheSize(8 * 1024);
+ assertThat(blockBasedTableConfig.blockCacheSize()).
+ isEqualTo(8 * 1024);
+ }
+
+ @Deprecated
+ @Test
+ public void blockCacheNumShardBits() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setCacheNumShardBits(5);
+ assertThat(blockBasedTableConfig.cacheNumShardBits()).
+ isEqualTo(5);
+ }
+
+ @Deprecated
+ @Test
+ public void blockCacheCompressedSize() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setBlockCacheCompressedSize(40);
+ assertThat(blockBasedTableConfig.blockCacheCompressedSize()).
+ isEqualTo(40);
+ }
+
+ @Deprecated
+ @Test
+ public void blockCacheCompressedNumShardBits() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setBlockCacheCompressedNumShardBits(4);
+ assertThat(blockBasedTableConfig.blockCacheCompressedNumShardBits()).
+ isEqualTo(4);
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/BuiltinComparatorTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/BuiltinComparatorTest.java
new file mode 100644
index 000000000..e238ae07b
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/BuiltinComparatorTest.java
@@ -0,0 +1,145 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class BuiltinComparatorTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void builtinForwardComparator()
+ throws RocksDBException {
+ try (final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setComparator(BuiltinComparator.BYTEWISE_COMPARATOR);
+ final RocksDB rocksDb = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())
+ ) {
+ rocksDb.put("abc1".getBytes(), "abc1".getBytes());
+ rocksDb.put("abc2".getBytes(), "abc2".getBytes());
+ rocksDb.put("abc3".getBytes(), "abc3".getBytes());
+
+ try(final RocksIterator rocksIterator = rocksDb.newIterator()) {
+ // Iterate over keys using a iterator
+ rocksIterator.seekToFirst();
+ assertThat(rocksIterator.isValid()).isTrue();
+ assertThat(rocksIterator.key()).isEqualTo(
+ "abc1".getBytes());
+ assertThat(rocksIterator.value()).isEqualTo(
+ "abc1".getBytes());
+ rocksIterator.next();
+ assertThat(rocksIterator.isValid()).isTrue();
+ assertThat(rocksIterator.key()).isEqualTo(
+ "abc2".getBytes());
+ assertThat(rocksIterator.value()).isEqualTo(
+ "abc2".getBytes());
+ rocksIterator.next();
+ assertThat(rocksIterator.isValid()).isTrue();
+ assertThat(rocksIterator.key()).isEqualTo(
+ "abc3".getBytes());
+ assertThat(rocksIterator.value()).isEqualTo(
+ "abc3".getBytes());
+ rocksIterator.next();
+ assertThat(rocksIterator.isValid()).isFalse();
+ // Get last one
+ rocksIterator.seekToLast();
+ assertThat(rocksIterator.isValid()).isTrue();
+ assertThat(rocksIterator.key()).isEqualTo(
+ "abc3".getBytes());
+ assertThat(rocksIterator.value()).isEqualTo(
+ "abc3".getBytes());
+ // Seek for abc
+ rocksIterator.seek("abc".getBytes());
+ assertThat(rocksIterator.isValid()).isTrue();
+ assertThat(rocksIterator.key()).isEqualTo(
+ "abc1".getBytes());
+ assertThat(rocksIterator.value()).isEqualTo(
+ "abc1".getBytes());
+ }
+ }
+ }
+
+ @Test
+ public void builtinReverseComparator()
+ throws RocksDBException {
+ try (final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setComparator(BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR);
+ final RocksDB rocksDb = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())
+ ) {
+
+ rocksDb.put("abc1".getBytes(), "abc1".getBytes());
+ rocksDb.put("abc2".getBytes(), "abc2".getBytes());
+ rocksDb.put("abc3".getBytes(), "abc3".getBytes());
+
+ try (final RocksIterator rocksIterator = rocksDb.newIterator()) {
+ // Iterate over keys using a iterator
+ rocksIterator.seekToFirst();
+ assertThat(rocksIterator.isValid()).isTrue();
+ assertThat(rocksIterator.key()).isEqualTo(
+ "abc3".getBytes());
+ assertThat(rocksIterator.value()).isEqualTo(
+ "abc3".getBytes());
+ rocksIterator.next();
+ assertThat(rocksIterator.isValid()).isTrue();
+ assertThat(rocksIterator.key()).isEqualTo(
+ "abc2".getBytes());
+ assertThat(rocksIterator.value()).isEqualTo(
+ "abc2".getBytes());
+ rocksIterator.next();
+ assertThat(rocksIterator.isValid()).isTrue();
+ assertThat(rocksIterator.key()).isEqualTo(
+ "abc1".getBytes());
+ assertThat(rocksIterator.value()).isEqualTo(
+ "abc1".getBytes());
+ rocksIterator.next();
+ assertThat(rocksIterator.isValid()).isFalse();
+ // Get last one
+ rocksIterator.seekToLast();
+ assertThat(rocksIterator.isValid()).isTrue();
+ assertThat(rocksIterator.key()).isEqualTo(
+ "abc1".getBytes());
+ assertThat(rocksIterator.value()).isEqualTo(
+ "abc1".getBytes());
+ // Will be invalid because abc is after abc1
+ rocksIterator.seek("abc".getBytes());
+ assertThat(rocksIterator.isValid()).isFalse();
+ // Will be abc3 because the next one after abc999
+ // is abc3
+ rocksIterator.seek("abc999".getBytes());
+ assertThat(rocksIterator.key()).isEqualTo(
+ "abc3".getBytes());
+ assertThat(rocksIterator.value()).isEqualTo(
+ "abc3".getBytes());
+ }
+ }
+ }
+
+ @Test
+ public void builtinComparatorEnum(){
+ assertThat(BuiltinComparator.BYTEWISE_COMPARATOR.ordinal())
+ .isEqualTo(0);
+ assertThat(
+ BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR.ordinal())
+ .isEqualTo(1);
+ assertThat(BuiltinComparator.values().length).isEqualTo(2);
+ assertThat(BuiltinComparator.valueOf("BYTEWISE_COMPARATOR")).
+ isEqualTo(BuiltinComparator.BYTEWISE_COMPARATOR);
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CheckPointTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CheckPointTest.java
new file mode 100644
index 000000000..c2cc6fc62
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/CheckPointTest.java
@@ -0,0 +1,83 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class CheckPointTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Rule
+ public TemporaryFolder checkpointFolder = new TemporaryFolder();
+
+ @Test
+ public void checkPoint() throws RocksDBException {
+ try (final Options options = new Options().
+ setCreateIfMissing(true)) {
+
+ try (final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ db.put("key".getBytes(), "value".getBytes());
+ try (final Checkpoint checkpoint = Checkpoint.create(db)) {
+ checkpoint.createCheckpoint(checkpointFolder.
+ getRoot().getAbsolutePath() + "/snapshot1");
+ db.put("key2".getBytes(), "value2".getBytes());
+ checkpoint.createCheckpoint(checkpointFolder.
+ getRoot().getAbsolutePath() + "/snapshot2");
+ }
+ }
+
+ try (final RocksDB db = RocksDB.open(options,
+ checkpointFolder.getRoot().getAbsolutePath() +
+ "/snapshot1")) {
+ assertThat(new String(db.get("key".getBytes()))).
+ isEqualTo("value");
+ assertThat(db.get("key2".getBytes())).isNull();
+ }
+
+ try (final RocksDB db = RocksDB.open(options,
+ checkpointFolder.getRoot().getAbsolutePath() +
+ "/snapshot2")) {
+ assertThat(new String(db.get("key".getBytes()))).
+ isEqualTo("value");
+ assertThat(new String(db.get("key2".getBytes()))).
+ isEqualTo("value2");
+ }
+ }
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void failIfDbIsNull() {
+ try (final Checkpoint checkpoint = Checkpoint.create(null)) {
+
+ }
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void failIfDbNotInitialized() throws RocksDBException {
+ try (final RocksDB db = RocksDB.open(
+ dbFolder.getRoot().getAbsolutePath())) {
+ db.close();
+ Checkpoint.create(db);
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void failWithIllegalPath() throws RocksDBException {
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
+ final Checkpoint checkpoint = Checkpoint.create(db)) {
+ checkpoint.createCheckpoint("/Z:///:\\C:\\TZ/-");
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/ClockCacheTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/ClockCacheTest.java
new file mode 100644
index 000000000..d1241ac75
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/ClockCacheTest.java
@@ -0,0 +1,26 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Test;
+
+public class ClockCacheTest {
+
+ static {
+ RocksDB.loadLibrary();
+ }
+
+ @Test
+ public void newClockCache() {
+ final long capacity = 1000;
+ final int numShardBits = 16;
+ final boolean strictCapacityLimit = true;
+ try(final Cache clockCache = new ClockCache(capacity,
+ numShardBits, strictCapacityLimit)) {
+ //no op
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java
new file mode 100644
index 000000000..af67f4663
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java
@@ -0,0 +1,625 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.rocksdb.test.RemoveEmptyValueCompactionFilterFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+import java.util.Random;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class ColumnFamilyOptionsTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ public static final Random rand = PlatformRandomHelper.
+ getPlatformSpecificRandomFactory();
+
+ @Test
+ public void copyConstructor() {
+ ColumnFamilyOptions origOpts = new ColumnFamilyOptions();
+ origOpts.setNumLevels(rand.nextInt(8));
+ origOpts.setTargetFileSizeMultiplier(rand.nextInt(100));
+ origOpts.setLevel0StopWritesTrigger(rand.nextInt(50));
+ ColumnFamilyOptions copyOpts = new ColumnFamilyOptions(origOpts);
+ assertThat(origOpts.numLevels()).isEqualTo(copyOpts.numLevels());
+ assertThat(origOpts.targetFileSizeMultiplier()).isEqualTo(copyOpts.targetFileSizeMultiplier());
+ assertThat(origOpts.level0StopWritesTrigger()).isEqualTo(copyOpts.level0StopWritesTrigger());
+ }
+
+ @Test
+ public void getColumnFamilyOptionsFromProps() {
+ Properties properties = new Properties();
+ properties.put("write_buffer_size", "112");
+ properties.put("max_write_buffer_number", "13");
+
+ try (final ColumnFamilyOptions opt = ColumnFamilyOptions.
+ getColumnFamilyOptionsFromProps(properties)) {
+ // setup sample properties
+ assertThat(opt).isNotNull();
+ assertThat(String.valueOf(opt.writeBufferSize())).
+ isEqualTo(properties.get("write_buffer_size"));
+ assertThat(String.valueOf(opt.maxWriteBufferNumber())).
+ isEqualTo(properties.get("max_write_buffer_number"));
+ }
+ }
+
+ @Test
+ public void failColumnFamilyOptionsFromPropsWithIllegalValue() {
+ // setup sample properties
+ final Properties properties = new Properties();
+ properties.put("tomato", "1024");
+ properties.put("burger", "2");
+
+ try (final ColumnFamilyOptions opt =
+ ColumnFamilyOptions.getColumnFamilyOptionsFromProps(properties)) {
+ assertThat(opt).isNull();
+ }
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void failColumnFamilyOptionsFromPropsWithNullValue() {
+ try (final ColumnFamilyOptions opt =
+ ColumnFamilyOptions.getColumnFamilyOptionsFromProps(null)) {
+ }
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void failColumnFamilyOptionsFromPropsWithEmptyProps() {
+ try (final ColumnFamilyOptions opt =
+ ColumnFamilyOptions.getColumnFamilyOptionsFromProps(
+ new Properties())) {
+ }
+ }
+
+ @Test
+ public void writeBufferSize() throws RocksDBException {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setWriteBufferSize(longValue);
+ assertThat(opt.writeBufferSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void maxWriteBufferNumber() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setMaxWriteBufferNumber(intValue);
+ assertThat(opt.maxWriteBufferNumber()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void minWriteBufferNumberToMerge() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setMinWriteBufferNumberToMerge(intValue);
+ assertThat(opt.minWriteBufferNumberToMerge()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void numLevels() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setNumLevels(intValue);
+ assertThat(opt.numLevels()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void levelZeroFileNumCompactionTrigger() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setLevelZeroFileNumCompactionTrigger(intValue);
+ assertThat(opt.levelZeroFileNumCompactionTrigger()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void levelZeroSlowdownWritesTrigger() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setLevelZeroSlowdownWritesTrigger(intValue);
+ assertThat(opt.levelZeroSlowdownWritesTrigger()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void levelZeroStopWritesTrigger() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setLevelZeroStopWritesTrigger(intValue);
+ assertThat(opt.levelZeroStopWritesTrigger()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void targetFileSizeBase() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setTargetFileSizeBase(longValue);
+ assertThat(opt.targetFileSizeBase()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void targetFileSizeMultiplier() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setTargetFileSizeMultiplier(intValue);
+ assertThat(opt.targetFileSizeMultiplier()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void maxBytesForLevelBase() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setMaxBytesForLevelBase(longValue);
+ assertThat(opt.maxBytesForLevelBase()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void levelCompactionDynamicLevelBytes() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setLevelCompactionDynamicLevelBytes(boolValue);
+ assertThat(opt.levelCompactionDynamicLevelBytes())
+ .isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void maxBytesForLevelMultiplier() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final double doubleValue = rand.nextDouble();
+ opt.setMaxBytesForLevelMultiplier(doubleValue);
+ assertThat(opt.maxBytesForLevelMultiplier()).isEqualTo(doubleValue);
+ }
+ }
+
+ @Test
+ public void maxBytesForLevelMultiplierAdditional() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final int intValue1 = rand.nextInt();
+ final int intValue2 = rand.nextInt();
+ final int[] ints = new int[]{intValue1, intValue2};
+ opt.setMaxBytesForLevelMultiplierAdditional(ints);
+ assertThat(opt.maxBytesForLevelMultiplierAdditional()).isEqualTo(ints);
+ }
+ }
+
+ @Test
+ public void maxCompactionBytes() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setMaxCompactionBytes(longValue);
+ assertThat(opt.maxCompactionBytes()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void softPendingCompactionBytesLimit() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setSoftPendingCompactionBytesLimit(longValue);
+ assertThat(opt.softPendingCompactionBytesLimit()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void hardPendingCompactionBytesLimit() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setHardPendingCompactionBytesLimit(longValue);
+ assertThat(opt.hardPendingCompactionBytesLimit()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void level0FileNumCompactionTrigger() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setLevel0FileNumCompactionTrigger(intValue);
+ assertThat(opt.level0FileNumCompactionTrigger()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void level0SlowdownWritesTrigger() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setLevel0SlowdownWritesTrigger(intValue);
+ assertThat(opt.level0SlowdownWritesTrigger()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void level0StopWritesTrigger() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setLevel0StopWritesTrigger(intValue);
+ assertThat(opt.level0StopWritesTrigger()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void arenaBlockSize() throws RocksDBException {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setArenaBlockSize(longValue);
+ assertThat(opt.arenaBlockSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void disableAutoCompactions() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setDisableAutoCompactions(boolValue);
+ assertThat(opt.disableAutoCompactions()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void maxSequentialSkipInIterations() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setMaxSequentialSkipInIterations(longValue);
+ assertThat(opt.maxSequentialSkipInIterations()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void inplaceUpdateSupport() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setInplaceUpdateSupport(boolValue);
+ assertThat(opt.inplaceUpdateSupport()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void inplaceUpdateNumLocks() throws RocksDBException {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setInplaceUpdateNumLocks(longValue);
+ assertThat(opt.inplaceUpdateNumLocks()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void memtablePrefixBloomSizeRatio() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final double doubleValue = rand.nextDouble();
+ opt.setMemtablePrefixBloomSizeRatio(doubleValue);
+ assertThat(opt.memtablePrefixBloomSizeRatio()).isEqualTo(doubleValue);
+ }
+ }
+
+ @Test
+ public void memtableHugePageSize() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setMemtableHugePageSize(longValue);
+ assertThat(opt.memtableHugePageSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void bloomLocality() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setBloomLocality(intValue);
+ assertThat(opt.bloomLocality()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void maxSuccessiveMerges() throws RocksDBException {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setMaxSuccessiveMerges(longValue);
+ assertThat(opt.maxSuccessiveMerges()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void optimizeFiltersForHits() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final boolean aBoolean = rand.nextBoolean();
+ opt.setOptimizeFiltersForHits(aBoolean);
+ assertThat(opt.optimizeFiltersForHits()).isEqualTo(aBoolean);
+ }
+ }
+
+ @Test
+ public void memTable() throws RocksDBException {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ opt.setMemTableConfig(new HashLinkedListMemTableConfig());
+ assertThat(opt.memTableFactoryName()).
+ isEqualTo("HashLinkedListRepFactory");
+ }
+ }
+
+ @Test
+ public void comparator() throws RocksDBException {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ opt.setComparator(BuiltinComparator.BYTEWISE_COMPARATOR);
+ }
+ }
+
+ @Test
+ public void linkageOfPrepMethods() {
+ try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) {
+ options.optimizeUniversalStyleCompaction();
+ options.optimizeUniversalStyleCompaction(4000);
+ options.optimizeLevelStyleCompaction();
+ options.optimizeLevelStyleCompaction(3000);
+ options.optimizeForPointLookup(10);
+ options.optimizeForSmallDb();
+ }
+ }
+
+ @Test
+ public void shouldSetTestPrefixExtractor() {
+ try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) {
+ options.useFixedLengthPrefixExtractor(100);
+ options.useFixedLengthPrefixExtractor(10);
+ }
+ }
+
+ @Test
+ public void shouldSetTestCappedPrefixExtractor() {
+ try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) {
+ options.useCappedPrefixExtractor(100);
+ options.useCappedPrefixExtractor(10);
+ }
+ }
+
+ @Test
+ public void compressionTypes() {
+ try (final ColumnFamilyOptions columnFamilyOptions
+ = new ColumnFamilyOptions()) {
+ for (final CompressionType compressionType :
+ CompressionType.values()) {
+ columnFamilyOptions.setCompressionType(compressionType);
+ assertThat(columnFamilyOptions.compressionType()).
+ isEqualTo(compressionType);
+ assertThat(CompressionType.valueOf("NO_COMPRESSION")).
+ isEqualTo(CompressionType.NO_COMPRESSION);
+ }
+ }
+ }
+
+ @Test
+ public void compressionPerLevel() {
+ try (final ColumnFamilyOptions columnFamilyOptions
+ = new ColumnFamilyOptions()) {
+ assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty();
+ List<CompressionType> compressionTypeList = new ArrayList<>();
+ for (int i = 0; i < columnFamilyOptions.numLevels(); i++) {
+ compressionTypeList.add(CompressionType.NO_COMPRESSION);
+ }
+ columnFamilyOptions.setCompressionPerLevel(compressionTypeList);
+ compressionTypeList = columnFamilyOptions.compressionPerLevel();
+ for (CompressionType compressionType : compressionTypeList) {
+ assertThat(compressionType).isEqualTo(
+ CompressionType.NO_COMPRESSION);
+ }
+ }
+ }
+
+ @Test
+ public void differentCompressionsPerLevel() {
+ try (final ColumnFamilyOptions columnFamilyOptions
+ = new ColumnFamilyOptions()) {
+ columnFamilyOptions.setNumLevels(3);
+
+ assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty();
+ List<CompressionType> compressionTypeList = new ArrayList<>();
+
+ compressionTypeList.add(CompressionType.BZLIB2_COMPRESSION);
+ compressionTypeList.add(CompressionType.SNAPPY_COMPRESSION);
+ compressionTypeList.add(CompressionType.LZ4_COMPRESSION);
+
+ columnFamilyOptions.setCompressionPerLevel(compressionTypeList);
+ compressionTypeList = columnFamilyOptions.compressionPerLevel();
+
+ assertThat(compressionTypeList.size()).isEqualTo(3);
+ assertThat(compressionTypeList).
+ containsExactly(
+ CompressionType.BZLIB2_COMPRESSION,
+ CompressionType.SNAPPY_COMPRESSION,
+ CompressionType.LZ4_COMPRESSION);
+
+ }
+ }
+
+ @Test
+ public void bottommostCompressionType() {
+ try (final ColumnFamilyOptions columnFamilyOptions
+ = new ColumnFamilyOptions()) {
+ assertThat(columnFamilyOptions.bottommostCompressionType())
+ .isEqualTo(CompressionType.DISABLE_COMPRESSION_OPTION);
+
+ for (final CompressionType compressionType : CompressionType.values()) {
+ columnFamilyOptions.setBottommostCompressionType(compressionType);
+ assertThat(columnFamilyOptions.bottommostCompressionType())
+ .isEqualTo(compressionType);
+ }
+ }
+ }
+
+ @Test
+ public void bottommostCompressionOptions() {
+ try (final ColumnFamilyOptions columnFamilyOptions =
+ new ColumnFamilyOptions();
+ final CompressionOptions bottommostCompressionOptions =
+ new CompressionOptions()
+ .setMaxDictBytes(123)) {
+
+ columnFamilyOptions.setBottommostCompressionOptions(
+ bottommostCompressionOptions);
+ assertThat(columnFamilyOptions.bottommostCompressionOptions())
+ .isEqualTo(bottommostCompressionOptions);
+ assertThat(columnFamilyOptions.bottommostCompressionOptions()
+ .maxDictBytes()).isEqualTo(123);
+ }
+ }
+
+ @Test
+ public void compressionOptions() {
+ try (final ColumnFamilyOptions columnFamilyOptions
+ = new ColumnFamilyOptions();
+ final CompressionOptions compressionOptions = new CompressionOptions()
+ .setMaxDictBytes(123)) {
+
+ columnFamilyOptions.setCompressionOptions(compressionOptions);
+ assertThat(columnFamilyOptions.compressionOptions())
+ .isEqualTo(compressionOptions);
+ assertThat(columnFamilyOptions.compressionOptions().maxDictBytes())
+ .isEqualTo(123);
+ }
+ }
+
+ @Test
+ public void compactionStyles() {
+ try (final ColumnFamilyOptions columnFamilyOptions
+ = new ColumnFamilyOptions()) {
+ for (final CompactionStyle compactionStyle :
+ CompactionStyle.values()) {
+ columnFamilyOptions.setCompactionStyle(compactionStyle);
+ assertThat(columnFamilyOptions.compactionStyle()).
+ isEqualTo(compactionStyle);
+ assertThat(CompactionStyle.valueOf("FIFO")).
+ isEqualTo(CompactionStyle.FIFO);
+ }
+ }
+ }
+
+ @Test
+ public void maxTableFilesSizeFIFO() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ long longValue = rand.nextLong();
+ // Size has to be positive
+ longValue = (longValue < 0) ? -longValue : longValue;
+ longValue = (longValue == 0) ? longValue + 1 : longValue;
+ opt.setMaxTableFilesSizeFIFO(longValue);
+ assertThat(opt.maxTableFilesSizeFIFO()).
+ isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void maxWriteBufferNumberToMaintain() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ int intValue = rand.nextInt();
+ // Size has to be positive
+ intValue = (intValue < 0) ? -intValue : intValue;
+ intValue = (intValue == 0) ? intValue + 1 : intValue;
+ opt.setMaxWriteBufferNumberToMaintain(intValue);
+ assertThat(opt.maxWriteBufferNumberToMaintain()).
+ isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void compactionPriorities() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ for (final CompactionPriority compactionPriority :
+ CompactionPriority.values()) {
+ opt.setCompactionPriority(compactionPriority);
+ assertThat(opt.compactionPriority()).
+ isEqualTo(compactionPriority);
+ }
+ }
+ }
+
+ @Test
+ public void reportBgIoStats() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final boolean booleanValue = true;
+ opt.setReportBgIoStats(booleanValue);
+ assertThat(opt.reportBgIoStats()).
+ isEqualTo(booleanValue);
+ }
+ }
+
+ @Test
+ public void ttl() {
+ try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) {
+ options.setTtl(1000 * 60);
+ assertThat(options.ttl()).
+ isEqualTo(1000 * 60);
+ }
+ }
+
+ @Test
+ public void compactionOptionsUniversal() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions();
+ final CompactionOptionsUniversal optUni = new CompactionOptionsUniversal()
+ .setCompressionSizePercent(7)) {
+ opt.setCompactionOptionsUniversal(optUni);
+ assertThat(opt.compactionOptionsUniversal()).
+ isEqualTo(optUni);
+ assertThat(opt.compactionOptionsUniversal().compressionSizePercent())
+ .isEqualTo(7);
+ }
+ }
+
+ @Test
+ public void compactionOptionsFIFO() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions();
+ final CompactionOptionsFIFO optFifo = new CompactionOptionsFIFO()
+ .setMaxTableFilesSize(2000)) {
+ opt.setCompactionOptionsFIFO(optFifo);
+ assertThat(opt.compactionOptionsFIFO()).
+ isEqualTo(optFifo);
+ assertThat(opt.compactionOptionsFIFO().maxTableFilesSize())
+ .isEqualTo(2000);
+ }
+ }
+
+ @Test
+ public void forceConsistencyChecks() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ final boolean booleanValue = true;
+ opt.setForceConsistencyChecks(booleanValue);
+ assertThat(opt.forceConsistencyChecks()).
+ isEqualTo(booleanValue);
+ }
+ }
+
+ @Test
+ public void compactionFilter() {
+ try(final ColumnFamilyOptions options = new ColumnFamilyOptions();
+ final RemoveEmptyValueCompactionFilter cf = new RemoveEmptyValueCompactionFilter()) {
+ options.setCompactionFilter(cf);
+ assertThat(options.compactionFilter()).isEqualTo(cf);
+ }
+ }
+
+ @Test
+ public void compactionFilterFactory() {
+ try(final ColumnFamilyOptions options = new ColumnFamilyOptions();
+ final RemoveEmptyValueCompactionFilterFactory cff = new RemoveEmptyValueCompactionFilterFactory()) {
+ options.setCompactionFilterFactory(cff);
+ assertThat(options.compactionFilterFactory()).isEqualTo(cff);
+ }
+ }
+
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyTest.java
new file mode 100644
index 000000000..cc8199ec4
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyTest.java
@@ -0,0 +1,734 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.*;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class ColumnFamilyTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void columnFamilyDescriptorName() throws RocksDBException {
+ final byte[] cfName = "some_name".getBytes(UTF_8);
+
+ try(final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions()) {
+ final ColumnFamilyDescriptor cfDescriptor =
+ new ColumnFamilyDescriptor(cfName, cfOptions);
+ assertThat(cfDescriptor.getName()).isEqualTo(cfName);
+ }
+ }
+
+ @Test
+ public void columnFamilyDescriptorOptions() throws RocksDBException {
+ final byte[] cfName = "some_name".getBytes(UTF_8);
+
+ try(final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions()
+ .setCompressionType(CompressionType.BZLIB2_COMPRESSION)) {
+ final ColumnFamilyDescriptor cfDescriptor =
+ new ColumnFamilyDescriptor(cfName, cfOptions);
+
+ assertThat(cfDescriptor.getOptions().compressionType())
+ .isEqualTo(CompressionType.BZLIB2_COMPRESSION);
+ }
+ }
+
+ @Test
+ public void listColumnFamilies() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ // Test listColumnFamilies
+ final List<byte[]> columnFamilyNames = RocksDB.listColumnFamilies(options,
+ dbFolder.getRoot().getAbsolutePath());
+ assertThat(columnFamilyNames).isNotNull();
+ assertThat(columnFamilyNames.size()).isGreaterThan(0);
+ assertThat(columnFamilyNames.size()).isEqualTo(1);
+ assertThat(new String(columnFamilyNames.get(0))).isEqualTo("default");
+ }
+ }
+
+ @Test
+ public void defaultColumnFamily() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ final ColumnFamilyHandle cfh = db.getDefaultColumnFamily();
+ try {
+ assertThat(cfh).isNotNull();
+
+ assertThat(cfh.getName()).isEqualTo("default".getBytes(UTF_8));
+ assertThat(cfh.getID()).isEqualTo(0);
+
+ final byte[] key = "key".getBytes();
+ final byte[] value = "value".getBytes();
+
+ db.put(cfh, key, value);
+
+ final byte[] actualValue = db.get(cfh, key);
+
+ assertThat(cfh).isNotNull();
+ assertThat(actualValue).isEqualTo(value);
+ } finally {
+ cfh.close();
+ }
+ }
+ }
+
+ @Test
+ public void createColumnFamily() throws RocksDBException {
+ final byte[] cfName = "new_cf".getBytes(UTF_8);
+ final ColumnFamilyDescriptor cfDescriptor = new ColumnFamilyDescriptor(cfName,
+ new ColumnFamilyOptions());
+
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+
+ final ColumnFamilyHandle columnFamilyHandle = db.createColumnFamily(cfDescriptor);
+
+ try {
+ assertThat(columnFamilyHandle.getName()).isEqualTo(cfName);
+ assertThat(columnFamilyHandle.getID()).isEqualTo(1);
+
+ final ColumnFamilyDescriptor latestDescriptor = columnFamilyHandle.getDescriptor();
+ assertThat(latestDescriptor.getName()).isEqualTo(cfName);
+
+ final List<byte[]> columnFamilyNames = RocksDB.listColumnFamilies(
+ options, dbFolder.getRoot().getAbsolutePath());
+ assertThat(columnFamilyNames).isNotNull();
+ assertThat(columnFamilyNames.size()).isGreaterThan(0);
+ assertThat(columnFamilyNames.size()).isEqualTo(2);
+ assertThat(new String(columnFamilyNames.get(0))).isEqualTo("default");
+ assertThat(new String(columnFamilyNames.get(1))).isEqualTo("new_cf");
+ } finally {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+
+ @Test
+ public void openWithColumnFamilies() throws RocksDBException {
+ final List<ColumnFamilyDescriptor> cfNames = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes())
+ );
+
+ final List<ColumnFamilyHandle> columnFamilyHandleList =
+ new ArrayList<>();
+
+ // Test open database with column family names
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(), cfNames,
+ columnFamilyHandleList)) {
+
+ try {
+ assertThat(columnFamilyHandleList.size()).isEqualTo(2);
+ db.put("dfkey1".getBytes(), "dfvalue".getBytes());
+ db.put(columnFamilyHandleList.get(0), "dfkey2".getBytes(),
+ "dfvalue".getBytes());
+ db.put(columnFamilyHandleList.get(1), "newcfkey1".getBytes(),
+ "newcfvalue".getBytes());
+
+ String retVal = new String(db.get(columnFamilyHandleList.get(1),
+ "newcfkey1".getBytes()));
+ assertThat(retVal).isEqualTo("newcfvalue");
+ assertThat((db.get(columnFamilyHandleList.get(1),
+ "dfkey1".getBytes()))).isNull();
+ db.delete(columnFamilyHandleList.get(1), "newcfkey1".getBytes());
+ assertThat((db.get(columnFamilyHandleList.get(1),
+ "newcfkey1".getBytes()))).isNull();
+ db.delete(columnFamilyHandleList.get(0), new WriteOptions(),
+ "dfkey2".getBytes());
+ assertThat(db.get(columnFamilyHandleList.get(0), new ReadOptions(),
+ "dfkey2".getBytes())).isNull();
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ columnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void getWithOutValueAndCf() throws RocksDBException {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY));
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+
+ // Test open database with column family names
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
+ columnFamilyHandleList)) {
+ try {
+ db.put(columnFamilyHandleList.get(0), new WriteOptions(),
+ "key1".getBytes(), "value".getBytes());
+ db.put("key2".getBytes(), "12345678".getBytes());
+ final byte[] outValue = new byte[5];
+ // not found value
+ int getResult = db.get("keyNotFound".getBytes(), outValue);
+ assertThat(getResult).isEqualTo(RocksDB.NOT_FOUND);
+ // found value which fits in outValue
+ getResult = db.get(columnFamilyHandleList.get(0), "key1".getBytes(),
+ outValue);
+ assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND);
+ assertThat(outValue).isEqualTo("value".getBytes());
+ // found value which fits partially
+ getResult = db.get(columnFamilyHandleList.get(0), new ReadOptions(),
+ "key2".getBytes(), outValue);
+ assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND);
+ assertThat(outValue).isEqualTo("12345".getBytes());
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ columnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void createWriteDropColumnFamily() throws RocksDBException {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes()));
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
+ columnFamilyHandleList)) {
+ ColumnFamilyHandle tmpColumnFamilyHandle = null;
+ try {
+ tmpColumnFamilyHandle = db.createColumnFamily(
+ new ColumnFamilyDescriptor("tmpCF".getBytes(),
+ new ColumnFamilyOptions()));
+ db.put(tmpColumnFamilyHandle, "key".getBytes(), "value".getBytes());
+ db.dropColumnFamily(tmpColumnFamilyHandle);
+ assertThat(tmpColumnFamilyHandle.isOwningHandle()).isTrue();
+ } finally {
+ if (tmpColumnFamilyHandle != null) {
+ tmpColumnFamilyHandle.close();
+ }
+ for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void createWriteDropColumnFamilies() throws RocksDBException {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes()));
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
+ columnFamilyHandleList)) {
+ ColumnFamilyHandle tmpColumnFamilyHandle = null;
+ ColumnFamilyHandle tmpColumnFamilyHandle2 = null;
+ try {
+ tmpColumnFamilyHandle = db.createColumnFamily(
+ new ColumnFamilyDescriptor("tmpCF".getBytes(),
+ new ColumnFamilyOptions()));
+ tmpColumnFamilyHandle2 = db.createColumnFamily(
+ new ColumnFamilyDescriptor("tmpCF2".getBytes(),
+ new ColumnFamilyOptions()));
+ db.put(tmpColumnFamilyHandle, "key".getBytes(), "value".getBytes());
+ db.put(tmpColumnFamilyHandle2, "key".getBytes(), "value".getBytes());
+ db.dropColumnFamilies(Arrays.asList(tmpColumnFamilyHandle, tmpColumnFamilyHandle2));
+ assertThat(tmpColumnFamilyHandle.isOwningHandle()).isTrue();
+ assertThat(tmpColumnFamilyHandle2.isOwningHandle()).isTrue();
+ } finally {
+ if (tmpColumnFamilyHandle != null) {
+ tmpColumnFamilyHandle.close();
+ }
+ if (tmpColumnFamilyHandle2 != null) {
+ tmpColumnFamilyHandle2.close();
+ }
+ for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void writeBatch() throws RocksDBException {
+ try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
+ final ColumnFamilyOptions defaultCfOptions = new ColumnFamilyOptions()
+ .setMergeOperator(stringAppendOperator)) {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY,
+ defaultCfOptions),
+ new ColumnFamilyDescriptor("new_cf".getBytes()));
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(),
+ cfDescriptors, columnFamilyHandleList);
+ final WriteBatch writeBatch = new WriteBatch();
+ final WriteOptions writeOpt = new WriteOptions()) {
+ try {
+ writeBatch.put("key".getBytes(), "value".getBytes());
+ writeBatch.put(db.getDefaultColumnFamily(),
+ "mergeKey".getBytes(), "merge".getBytes());
+ writeBatch.merge(db.getDefaultColumnFamily(), "mergeKey".getBytes(),
+ "merge".getBytes());
+ writeBatch.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(),
+ "value".getBytes());
+ writeBatch.put(columnFamilyHandleList.get(1), "newcfkey2".getBytes(),
+ "value2".getBytes());
+ writeBatch.delete("xyz".getBytes());
+ writeBatch.delete(columnFamilyHandleList.get(1), "xyz".getBytes());
+ db.write(writeOpt, writeBatch);
+
+ assertThat(db.get(columnFamilyHandleList.get(1),
+ "xyz".getBytes()) == null);
+ assertThat(new String(db.get(columnFamilyHandleList.get(1),
+ "newcfkey".getBytes()))).isEqualTo("value");
+ assertThat(new String(db.get(columnFamilyHandleList.get(1),
+ "newcfkey2".getBytes()))).isEqualTo("value2");
+ assertThat(new String(db.get("key".getBytes()))).isEqualTo("value");
+ // check if key is merged
+ assertThat(new String(db.get(db.getDefaultColumnFamily(),
+ "mergeKey".getBytes()))).isEqualTo("merge,merge");
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ columnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void iteratorOnColumnFamily() throws RocksDBException {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes()));
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(),
+ cfDescriptors, columnFamilyHandleList)) {
+ try {
+
+ db.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(),
+ "value".getBytes());
+ db.put(columnFamilyHandleList.get(1), "newcfkey2".getBytes(),
+ "value2".getBytes());
+ try (final RocksIterator rocksIterator =
+ db.newIterator(columnFamilyHandleList.get(1))) {
+ rocksIterator.seekToFirst();
+ Map<String, String> refMap = new HashMap<>();
+ refMap.put("newcfkey", "value");
+ refMap.put("newcfkey2", "value2");
+ int i = 0;
+ while (rocksIterator.isValid()) {
+ i++;
+ assertThat(refMap.get(new String(rocksIterator.key()))).
+ isEqualTo(new String(rocksIterator.value()));
+ rocksIterator.next();
+ }
+ assertThat(i).isEqualTo(2);
+ }
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ columnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void multiGet() throws RocksDBException {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes()));
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(),
+ cfDescriptors, columnFamilyHandleList)) {
+ try {
+ db.put(columnFamilyHandleList.get(0), "key".getBytes(),
+ "value".getBytes());
+ db.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(),
+ "value".getBytes());
+
+ final List<byte[]> keys = Arrays.asList(new byte[][]{
+ "key".getBytes(), "newcfkey".getBytes()
+ });
+
+ List<byte[]> retValues = db.multiGetAsList(columnFamilyHandleList, keys);
+ assertThat(retValues.size()).isEqualTo(2);
+ assertThat(new String(retValues.get(0)))
+ .isEqualTo("value");
+ assertThat(new String(retValues.get(1)))
+ .isEqualTo("value");
+ retValues = db.multiGetAsList(new ReadOptions(), columnFamilyHandleList,
+ keys);
+ assertThat(retValues.size()).isEqualTo(2);
+ assertThat(new String(retValues.get(0)))
+ .isEqualTo("value");
+ assertThat(new String(retValues.get(1)))
+ .isEqualTo("value");
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ columnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void multiGetAsList() throws RocksDBException {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes()));
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(),
+ cfDescriptors, columnFamilyHandleList)) {
+ try {
+ db.put(columnFamilyHandleList.get(0), "key".getBytes(),
+ "value".getBytes());
+ db.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(),
+ "value".getBytes());
+
+ final List<byte[]> keys = Arrays.asList(new byte[][]{
+ "key".getBytes(), "newcfkey".getBytes()
+ });
+ List<byte[]> retValues = db.multiGetAsList(columnFamilyHandleList,
+ keys);
+ assertThat(retValues.size()).isEqualTo(2);
+ assertThat(new String(retValues.get(0)))
+ .isEqualTo("value");
+ assertThat(new String(retValues.get(1)))
+ .isEqualTo("value");
+ retValues = db.multiGetAsList(new ReadOptions(), columnFamilyHandleList,
+ keys);
+ assertThat(retValues.size()).isEqualTo(2);
+ assertThat(new String(retValues.get(0)))
+ .isEqualTo("value");
+ assertThat(new String(retValues.get(1)))
+ .isEqualTo("value");
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ columnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void properties() throws RocksDBException {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes()));
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(),
+ cfDescriptors, columnFamilyHandleList)) {
+ try {
+ assertThat(db.getProperty("rocksdb.estimate-num-keys")).
+ isNotNull();
+ assertThat(db.getLongProperty(columnFamilyHandleList.get(0),
+ "rocksdb.estimate-num-keys")).isGreaterThanOrEqualTo(0);
+ assertThat(db.getProperty("rocksdb.stats")).isNotNull();
+ assertThat(db.getProperty(columnFamilyHandleList.get(0),
+ "rocksdb.sstables")).isNotNull();
+ assertThat(db.getProperty(columnFamilyHandleList.get(1),
+ "rocksdb.estimate-num-keys")).isNotNull();
+ assertThat(db.getProperty(columnFamilyHandleList.get(1),
+ "rocksdb.stats")).isNotNull();
+ assertThat(db.getProperty(columnFamilyHandleList.get(1),
+ "rocksdb.sstables")).isNotNull();
+ assertThat(db.getAggregatedLongProperty("rocksdb.estimate-num-keys")).
+ isNotNull();
+ assertThat(db.getAggregatedLongProperty("rocksdb.estimate-num-keys")).
+ isGreaterThanOrEqualTo(0);
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ columnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+
+
+ @Test
+ public void iterators() throws RocksDBException {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes()));
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
+ columnFamilyHandleList)) {
+ List<RocksIterator> iterators = null;
+ try {
+ iterators = db.newIterators(columnFamilyHandleList);
+ assertThat(iterators.size()).isEqualTo(2);
+ RocksIterator iter = iterators.get(0);
+ iter.seekToFirst();
+ final Map<String, String> defRefMap = new HashMap<>();
+ defRefMap.put("dfkey1", "dfvalue");
+ defRefMap.put("key", "value");
+ while (iter.isValid()) {
+ assertThat(defRefMap.get(new String(iter.key()))).
+ isEqualTo(new String(iter.value()));
+ iter.next();
+ }
+ // iterate over new_cf key/value pairs
+ final Map<String, String> cfRefMap = new HashMap<>();
+ cfRefMap.put("newcfkey", "value");
+ cfRefMap.put("newcfkey2", "value2");
+ iter = iterators.get(1);
+ iter.seekToFirst();
+ while (iter.isValid()) {
+ assertThat(cfRefMap.get(new String(iter.key()))).
+ isEqualTo(new String(iter.value()));
+ iter.next();
+ }
+ } finally {
+ if (iterators != null) {
+ for (final RocksIterator rocksIterator : iterators) {
+ rocksIterator.close();
+ }
+ }
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ columnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void failPutDisposedCF() throws RocksDBException {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes()));
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(),
+ cfDescriptors, columnFamilyHandleList)) {
+ try {
+ db.dropColumnFamily(columnFamilyHandleList.get(1));
+ db.put(columnFamilyHandleList.get(1), "key".getBytes(),
+ "value".getBytes());
+ } finally {
+ for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void failRemoveDisposedCF() throws RocksDBException {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes()));
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(),
+ cfDescriptors, columnFamilyHandleList)) {
+ try {
+ db.dropColumnFamily(columnFamilyHandleList.get(1));
+ db.delete(columnFamilyHandleList.get(1), "key".getBytes());
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ columnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void failGetDisposedCF() throws RocksDBException {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes()));
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
+ columnFamilyHandleList)) {
+ try {
+ db.dropColumnFamily(columnFamilyHandleList.get(1));
+ db.get(columnFamilyHandleList.get(1), "key".getBytes());
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ columnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void failMultiGetWithoutCorrectNumberOfCF() throws RocksDBException {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes()));
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
+ columnFamilyHandleList)) {
+ try {
+ final List<byte[]> keys = new ArrayList<>();
+ keys.add("key".getBytes());
+ keys.add("newcfkey".getBytes());
+ final List<ColumnFamilyHandle> cfCustomList = new ArrayList<>();
+ db.multiGetAsList(cfCustomList, keys);
+
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ columnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void testByteCreateFolumnFamily() throws RocksDBException {
+
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())
+ ) {
+ final byte[] b0 = new byte[]{(byte) 0x00};
+ final byte[] b1 = new byte[]{(byte) 0x01};
+ final byte[] b2 = new byte[]{(byte) 0x02};
+ ColumnFamilyHandle cf1 = null, cf2 = null, cf3 = null;
+ try {
+ cf1 = db.createColumnFamily(new ColumnFamilyDescriptor(b0));
+ cf2 = db.createColumnFamily(new ColumnFamilyDescriptor(b1));
+ final List<byte[]> families = RocksDB.listColumnFamilies(options,
+ dbFolder.getRoot().getAbsolutePath());
+ assertThat(families).contains("default".getBytes(), b0, b1);
+ cf3 = db.createColumnFamily(new ColumnFamilyDescriptor(b2));
+ } finally {
+ if (cf1 != null) {
+ cf1.close();
+ }
+ if (cf2 != null) {
+ cf2.close();
+ }
+ if (cf3 != null) {
+ cf3.close();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void testCFNamesWithZeroBytes() throws RocksDBException {
+ ColumnFamilyHandle cf1 = null, cf2 = null;
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath());
+ ) {
+ try {
+ final byte[] b0 = new byte[]{0, 0};
+ final byte[] b1 = new byte[]{0, 1};
+ cf1 = db.createColumnFamily(new ColumnFamilyDescriptor(b0));
+ cf2 = db.createColumnFamily(new ColumnFamilyDescriptor(b1));
+ final List<byte[]> families = RocksDB.listColumnFamilies(options,
+ dbFolder.getRoot().getAbsolutePath());
+ assertThat(families).contains("default".getBytes(), b0, b1);
+ } finally {
+ if (cf1 != null) {
+ cf1.close();
+ }
+ if (cf2 != null) {
+ cf2.close();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void testCFNameSimplifiedChinese() throws RocksDBException {
+ ColumnFamilyHandle columnFamilyHandle = null;
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath());
+ ) {
+ try {
+ final String simplifiedChinese = "\u7b80\u4f53\u5b57";
+ columnFamilyHandle = db.createColumnFamily(
+ new ColumnFamilyDescriptor(simplifiedChinese.getBytes()));
+
+ final List<byte[]> families = RocksDB.listColumnFamilies(options,
+ dbFolder.getRoot().getAbsolutePath());
+ assertThat(families).contains("default".getBytes(),
+ simplifiedChinese.getBytes());
+ } finally {
+ if (columnFamilyHandle != null) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java
new file mode 100644
index 000000000..18c187ddb
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java
@@ -0,0 +1,98 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Test;
+import org.rocksdb.CompactRangeOptions.BottommostLevelCompaction;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class CompactRangeOptionsTest {
+
+ static {
+ RocksDB.loadLibrary();
+ }
+
+ @Test
+ public void exclusiveManualCompaction() {
+ CompactRangeOptions opt = new CompactRangeOptions();
+ boolean value = false;
+ opt.setExclusiveManualCompaction(value);
+ assertThat(opt.exclusiveManualCompaction()).isEqualTo(value);
+ value = true;
+ opt.setExclusiveManualCompaction(value);
+ assertThat(opt.exclusiveManualCompaction()).isEqualTo(value);
+ }
+
+ @Test
+ public void bottommostLevelCompaction() {
+ CompactRangeOptions opt = new CompactRangeOptions();
+ BottommostLevelCompaction value = BottommostLevelCompaction.kSkip;
+ opt.setBottommostLevelCompaction(value);
+ assertThat(opt.bottommostLevelCompaction()).isEqualTo(value);
+ value = BottommostLevelCompaction.kForce;
+ opt.setBottommostLevelCompaction(value);
+ assertThat(opt.bottommostLevelCompaction()).isEqualTo(value);
+ value = BottommostLevelCompaction.kIfHaveCompactionFilter;
+ opt.setBottommostLevelCompaction(value);
+ assertThat(opt.bottommostLevelCompaction()).isEqualTo(value);
+ }
+
+ @Test
+ public void changeLevel() {
+ CompactRangeOptions opt = new CompactRangeOptions();
+ boolean value = false;
+ opt.setChangeLevel(value);
+ assertThat(opt.changeLevel()).isEqualTo(value);
+ value = true;
+ opt.setChangeLevel(value);
+ assertThat(opt.changeLevel()).isEqualTo(value);
+ }
+
+ @Test
+ public void targetLevel() {
+ CompactRangeOptions opt = new CompactRangeOptions();
+ int value = 2;
+ opt.setTargetLevel(value);
+ assertThat(opt.targetLevel()).isEqualTo(value);
+ value = 3;
+ opt.setTargetLevel(value);
+ assertThat(opt.targetLevel()).isEqualTo(value);
+ }
+
+ @Test
+ public void targetPathId() {
+ CompactRangeOptions opt = new CompactRangeOptions();
+ int value = 2;
+ opt.setTargetPathId(value);
+ assertThat(opt.targetPathId()).isEqualTo(value);
+ value = 3;
+ opt.setTargetPathId(value);
+ assertThat(opt.targetPathId()).isEqualTo(value);
+ }
+
+ @Test
+ public void allowWriteStall() {
+ CompactRangeOptions opt = new CompactRangeOptions();
+ boolean value = false;
+ opt.setAllowWriteStall(value);
+ assertThat(opt.allowWriteStall()).isEqualTo(value);
+ value = true;
+ opt.setAllowWriteStall(value);
+ assertThat(opt.allowWriteStall()).isEqualTo(value);
+ }
+
+ @Test
+ public void maxSubcompactions() {
+ CompactRangeOptions opt = new CompactRangeOptions();
+ int value = 2;
+ opt.setMaxSubcompactions(value);
+ assertThat(opt.maxSubcompactions()).isEqualTo(value);
+ value = 3;
+ opt.setMaxSubcompactions(value);
+ assertThat(opt.maxSubcompactions()).isEqualTo(value);
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompactionFilterFactoryTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionFilterFactoryTest.java
new file mode 100644
index 000000000..e05f1eef3
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionFilterFactoryTest.java
@@ -0,0 +1,68 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.rocksdb.test.RemoveEmptyValueCompactionFilterFactory;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class CompactionFilterFactoryTest {
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void columnFamilyOptions_setCompactionFilterFactory()
+ throws RocksDBException {
+ try(final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final RemoveEmptyValueCompactionFilterFactory compactionFilterFactory
+ = new RemoveEmptyValueCompactionFilterFactory();
+ final ColumnFamilyOptions new_cf_opts
+ = new ColumnFamilyOptions()
+ .setCompactionFilterFactory(compactionFilterFactory)) {
+
+ final List<ColumnFamilyDescriptor> cfNames = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts));
+
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+
+ try (final RocksDB rocksDb = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(), cfNames, cfHandles);
+ ) {
+ try {
+ final byte[] key1 = "key1".getBytes();
+ final byte[] key2 = "key2".getBytes();
+
+ final byte[] value1 = "value1".getBytes();
+ final byte[] value2 = new byte[0];
+
+ rocksDb.put(cfHandles.get(1), key1, value1);
+ rocksDb.put(cfHandles.get(1), key2, value2);
+
+ rocksDb.compactRange(cfHandles.get(1));
+
+ assertThat(rocksDb.get(cfHandles.get(1), key1)).isEqualTo(value1);
+ final boolean exists = rocksDb.keyMayExist(cfHandles.get(1), key2, null);
+ assertThat(exists).isFalse();
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompactionJobInfoTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionJobInfoTest.java
new file mode 100644
index 000000000..c71b0da16
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionJobInfoTest.java
@@ -0,0 +1,114 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class CompactionJobInfoTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Test
+ public void columnFamilyName() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.columnFamilyName())
+ .isEmpty();
+ }
+ }
+
+ @Test
+ public void status() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.status().getCode())
+ .isEqualTo(Status.Code.Ok);
+ }
+ }
+
+ @Test
+ public void threadId() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.threadId())
+ .isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void jobId() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.jobId())
+ .isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void baseInputLevel() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.baseInputLevel())
+ .isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void outputLevel() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.outputLevel())
+ .isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void inputFiles() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.inputFiles())
+ .isEmpty();
+ }
+ }
+
+ @Test
+ public void outputFiles() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.outputFiles())
+ .isEmpty();
+ }
+ }
+
+ @Test
+ public void tableProperties() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.tableProperties())
+ .isEmpty();
+ }
+ }
+
+ @Test
+ public void compactionReason() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.compactionReason())
+ .isEqualTo(CompactionReason.kUnknown);
+ }
+ }
+
+ @Test
+ public void compression() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.compression())
+ .isEqualTo(CompressionType.NO_COMPRESSION);
+ }
+ }
+
+ @Test
+ public void stats() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.stats())
+ .isNotNull();
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompactionJobStatsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionJobStatsTest.java
new file mode 100644
index 000000000..5c1eb2aab
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionJobStatsTest.java
@@ -0,0 +1,196 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class CompactionJobStatsTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Test
+ public void reset() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ compactionJobStats.reset();
+ assertThat(compactionJobStats.elapsedMicros()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void add() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats();
+ final CompactionJobStats otherCompactionJobStats = new CompactionJobStats()) {
+ compactionJobStats.add(otherCompactionJobStats);
+ }
+ }
+
+ @Test
+ public void elapsedMicros() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.elapsedMicros()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void numInputRecords() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numInputRecords()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void numInputFiles() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numInputFiles()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void numInputFilesAtOutputLevel() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numInputFilesAtOutputLevel()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void numOutputRecords() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numOutputRecords()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void numOutputFiles() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numOutputFiles()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void isManualCompaction() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.isManualCompaction()).isFalse();
+ }
+ }
+
+ @Test
+ public void totalInputBytes() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.totalInputBytes()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void totalOutputBytes() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.totalOutputBytes()).isEqualTo(0);
+ }
+ }
+
+
+ @Test
+ public void numRecordsReplaced() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numRecordsReplaced()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void totalInputRawKeyBytes() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.totalInputRawKeyBytes()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void totalInputRawValueBytes() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.totalInputRawValueBytes()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void numInputDeletionRecords() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numInputDeletionRecords()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void numExpiredDeletionRecords() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numExpiredDeletionRecords()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void numCorruptKeys() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numCorruptKeys()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void fileWriteNanos() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.fileWriteNanos()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void fileRangeSyncNanos() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.fileRangeSyncNanos()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void fileFsyncNanos() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.fileFsyncNanos()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void filePrepareWriteNanos() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.filePrepareWriteNanos()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void smallestOutputKeyPrefix() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.smallestOutputKeyPrefix()).isEmpty();
+ }
+ }
+
+ @Test
+ public void largestOutputKeyPrefix() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.largestOutputKeyPrefix()).isEmpty();
+ }
+ }
+
+ @Test
+ public void numSingleDelFallthru() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numSingleDelFallthru()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void numSingleDelMismatch() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numSingleDelMismatch()).isEqualTo(0);
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java
new file mode 100644
index 000000000..841615e67
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java
@@ -0,0 +1,35 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class CompactionOptionsFIFOTest {
+
+ static {
+ RocksDB.loadLibrary();
+ }
+
+ @Test
+ public void maxTableFilesSize() {
+ final long size = 500 * 1024 * 1026;
+ try (final CompactionOptionsFIFO opt = new CompactionOptionsFIFO()) {
+ opt.setMaxTableFilesSize(size);
+ assertThat(opt.maxTableFilesSize()).isEqualTo(size);
+ }
+ }
+
+ @Test
+ public void allowCompaction() {
+ final boolean allowCompaction = true;
+ try (final CompactionOptionsFIFO opt = new CompactionOptionsFIFO()) {
+ opt.setAllowCompaction(allowCompaction);
+ assertThat(opt.allowCompaction()).isEqualTo(allowCompaction);
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsTest.java
new file mode 100644
index 000000000..9b7d79694
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsTest.java
@@ -0,0 +1,52 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class CompactionOptionsTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Test
+ public void compression() {
+ try (final CompactionOptions compactionOptions = new CompactionOptions()) {
+ assertThat(compactionOptions.compression())
+ .isEqualTo(CompressionType.SNAPPY_COMPRESSION);
+ compactionOptions.setCompression(CompressionType.NO_COMPRESSION);
+ assertThat(compactionOptions.compression())
+ .isEqualTo(CompressionType.NO_COMPRESSION);
+ }
+ }
+
+ @Test
+ public void outputFileSizeLimit() {
+ final long mb250 = 1024 * 1024 * 250;
+ try (final CompactionOptions compactionOptions = new CompactionOptions()) {
+ assertThat(compactionOptions.outputFileSizeLimit())
+ .isEqualTo(-1);
+ compactionOptions.setOutputFileSizeLimit(mb250);
+ assertThat(compactionOptions.outputFileSizeLimit())
+ .isEqualTo(mb250);
+ }
+ }
+
+ @Test
+ public void maxSubcompactions() {
+ try (final CompactionOptions compactionOptions = new CompactionOptions()) {
+ assertThat(compactionOptions.maxSubcompactions())
+ .isEqualTo(0);
+ compactionOptions.setMaxSubcompactions(9);
+ assertThat(compactionOptions.maxSubcompactions())
+ .isEqualTo(9);
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java
new file mode 100644
index 000000000..5e2d195b6
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java
@@ -0,0 +1,80 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class CompactionOptionsUniversalTest {
+
+ static {
+ RocksDB.loadLibrary();
+ }
+
+ @Test
+ public void sizeRatio() {
+ final int sizeRatio = 4;
+ try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
+ opt.setSizeRatio(sizeRatio);
+ assertThat(opt.sizeRatio()).isEqualTo(sizeRatio);
+ }
+ }
+
+ @Test
+ public void minMergeWidth() {
+ final int minMergeWidth = 3;
+ try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
+ opt.setMinMergeWidth(minMergeWidth);
+ assertThat(opt.minMergeWidth()).isEqualTo(minMergeWidth);
+ }
+ }
+
+ @Test
+ public void maxMergeWidth() {
+ final int maxMergeWidth = Integer.MAX_VALUE - 1234;
+ try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
+ opt.setMaxMergeWidth(maxMergeWidth);
+ assertThat(opt.maxMergeWidth()).isEqualTo(maxMergeWidth);
+ }
+ }
+
+ @Test
+ public void maxSizeAmplificationPercent() {
+ final int maxSizeAmplificationPercent = 150;
+ try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
+ opt.setMaxSizeAmplificationPercent(maxSizeAmplificationPercent);
+ assertThat(opt.maxSizeAmplificationPercent()).isEqualTo(maxSizeAmplificationPercent);
+ }
+ }
+
+ @Test
+ public void compressionSizePercent() {
+ final int compressionSizePercent = 500;
+ try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
+ opt.setCompressionSizePercent(compressionSizePercent);
+ assertThat(opt.compressionSizePercent()).isEqualTo(compressionSizePercent);
+ }
+ }
+
+ @Test
+ public void stopStyle() {
+ final CompactionStopStyle stopStyle = CompactionStopStyle.CompactionStopStyleSimilarSize;
+ try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
+ opt.setStopStyle(stopStyle);
+ assertThat(opt.stopStyle()).isEqualTo(stopStyle);
+ }
+ }
+
+ @Test
+ public void allowTrivialMove() {
+ final boolean allowTrivialMove = true;
+ try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
+ opt.setAllowTrivialMove(allowTrivialMove);
+ assertThat(opt.allowTrivialMove()).isEqualTo(allowTrivialMove);
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompactionPriorityTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionPriorityTest.java
new file mode 100644
index 000000000..b078e132f
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionPriorityTest.java
@@ -0,0 +1,31 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class CompactionPriorityTest {
+
+ @Test(expected = IllegalArgumentException.class)
+ public void failIfIllegalByteValueProvided() {
+ CompactionPriority.getCompactionPriority((byte) -1);
+ }
+
+ @Test
+ public void getCompactionPriority() {
+ assertThat(CompactionPriority.getCompactionPriority(
+ CompactionPriority.OldestLargestSeqFirst.getValue()))
+ .isEqualTo(CompactionPriority.OldestLargestSeqFirst);
+ }
+
+ @Test
+ public void valueOf() {
+ assertThat(CompactionPriority.valueOf("OldestSmallestSeqFirst")).
+ isEqualTo(CompactionPriority.OldestSmallestSeqFirst);
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java
new file mode 100644
index 000000000..4c8a20950
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java
@@ -0,0 +1,31 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class CompactionStopStyleTest {
+
+ @Test(expected = IllegalArgumentException.class)
+ public void failIfIllegalByteValueProvided() {
+ CompactionStopStyle.getCompactionStopStyle((byte) -1);
+ }
+
+ @Test
+ public void getCompactionStopStyle() {
+ assertThat(CompactionStopStyle.getCompactionStopStyle(
+ CompactionStopStyle.CompactionStopStyleTotalSize.getValue()))
+ .isEqualTo(CompactionStopStyle.CompactionStopStyleTotalSize);
+ }
+
+ @Test
+ public void valueOf() {
+ assertThat(CompactionStopStyle.valueOf("CompactionStopStyleSimilarSize")).
+ isEqualTo(CompactionStopStyle.CompactionStopStyleSimilarSize);
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java
new file mode 100644
index 000000000..3e90b9f10
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java
@@ -0,0 +1,58 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class ComparatorOptionsTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Test
+ public void reusedSynchronisationType() {
+ try(final ComparatorOptions copt = new ComparatorOptions()) {
+
+ copt.setReusedSynchronisationType(ReusedSynchronisationType.MUTEX);
+ assertThat(copt.reusedSynchronisationType())
+ .isEqualTo(ReusedSynchronisationType.MUTEX);
+
+ copt.setReusedSynchronisationType(ReusedSynchronisationType.ADAPTIVE_MUTEX);
+ assertThat(copt.reusedSynchronisationType())
+ .isEqualTo(ReusedSynchronisationType.ADAPTIVE_MUTEX);
+
+ copt.setReusedSynchronisationType(ReusedSynchronisationType.THREAD_LOCAL);
+ assertThat(copt.reusedSynchronisationType())
+ .isEqualTo(ReusedSynchronisationType.THREAD_LOCAL);
+ }
+ }
+
+ @Test
+ public void useDirectBuffer() {
+ try(final ComparatorOptions copt = new ComparatorOptions()) {
+ copt.setUseDirectBuffer(true);
+ assertThat(copt.useDirectBuffer()).isTrue();
+
+ copt.setUseDirectBuffer(false);
+ assertThat(copt.useDirectBuffer()).isFalse();
+ }
+ }
+
+ @Test
+ public void maxReusedBufferSize() {
+ try(final ComparatorOptions copt = new ComparatorOptions()) {
+ copt.setMaxReusedBufferSize(12345);
+ assertThat(copt.maxReusedBufferSize()).isEqualTo(12345);
+
+ copt.setMaxReusedBufferSize(-1);
+ assertThat(copt.maxReusedBufferSize()).isEqualTo(-1);
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompressionOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompressionOptionsTest.java
new file mode 100644
index 000000000..116552c32
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompressionOptionsTest.java
@@ -0,0 +1,71 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class CompressionOptionsTest {
+
+ static {
+ RocksDB.loadLibrary();
+ }
+
+ @Test
+ public void windowBits() {
+ final int windowBits = 7;
+ try(final CompressionOptions opt = new CompressionOptions()) {
+ opt.setWindowBits(windowBits);
+ assertThat(opt.windowBits()).isEqualTo(windowBits);
+ }
+ }
+
+ @Test
+ public void level() {
+ final int level = 6;
+ try(final CompressionOptions opt = new CompressionOptions()) {
+ opt.setLevel(level);
+ assertThat(opt.level()).isEqualTo(level);
+ }
+ }
+
+ @Test
+ public void strategy() {
+ final int strategy = 2;
+ try(final CompressionOptions opt = new CompressionOptions()) {
+ opt.setStrategy(strategy);
+ assertThat(opt.strategy()).isEqualTo(strategy);
+ }
+ }
+
+ @Test
+ public void maxDictBytes() {
+ final int maxDictBytes = 999;
+ try(final CompressionOptions opt = new CompressionOptions()) {
+ opt.setMaxDictBytes(maxDictBytes);
+ assertThat(opt.maxDictBytes()).isEqualTo(maxDictBytes);
+ }
+ }
+
+ @Test
+ public void zstdMaxTrainBytes() {
+ final int zstdMaxTrainBytes = 999;
+ try(final CompressionOptions opt = new CompressionOptions()) {
+ opt.setZStdMaxTrainBytes(zstdMaxTrainBytes);
+ assertThat(opt.zstdMaxTrainBytes()).isEqualTo(zstdMaxTrainBytes);
+ }
+ }
+
+ @Test
+ public void enabled() {
+ try(final CompressionOptions opt = new CompressionOptions()) {
+ assertThat(opt.enabled()).isFalse();
+ opt.setEnabled(true);
+ assertThat(opt.enabled()).isTrue();
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompressionTypesTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompressionTypesTest.java
new file mode 100644
index 000000000..e26cc0aca
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompressionTypesTest.java
@@ -0,0 +1,20 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Test;
+
+
+public class CompressionTypesTest {
+ @Test
+ public void getCompressionType() {
+ for (final CompressionType compressionType : CompressionType.values()) {
+ String libraryName = compressionType.getLibraryName();
+ compressionType.equals(CompressionType.getCompressionType(
+ libraryName));
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/DBOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/DBOptionsTest.java
new file mode 100644
index 000000000..71cada204
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/DBOptionsTest.java
@@ -0,0 +1,813 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import java.nio.file.Paths;
+import java.util.*;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class DBOptionsTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ public static final Random rand = PlatformRandomHelper.
+ getPlatformSpecificRandomFactory();
+
+ @Test
+ public void copyConstructor() {
+ DBOptions origOpts = new DBOptions();
+ origOpts.setCreateIfMissing(rand.nextBoolean());
+ origOpts.setAllow2pc(rand.nextBoolean());
+ origOpts.setMaxBackgroundJobs(rand.nextInt(10));
+ DBOptions copyOpts = new DBOptions(origOpts);
+ assertThat(origOpts.createIfMissing()).isEqualTo(copyOpts.createIfMissing());
+ assertThat(origOpts.allow2pc()).isEqualTo(copyOpts.allow2pc());
+ assertThat(origOpts.baseBackgroundCompactions()).isEqualTo(
+ copyOpts.baseBackgroundCompactions());
+ }
+
+ @Test
+ public void getDBOptionsFromProps() {
+ // setup sample properties
+ final Properties properties = new Properties();
+ properties.put("allow_mmap_reads", "true");
+ properties.put("bytes_per_sync", "13");
+ try(final DBOptions opt = DBOptions.getDBOptionsFromProps(properties)) {
+ assertThat(opt).isNotNull();
+ assertThat(String.valueOf(opt.allowMmapReads())).
+ isEqualTo(properties.get("allow_mmap_reads"));
+ assertThat(String.valueOf(opt.bytesPerSync())).
+ isEqualTo(properties.get("bytes_per_sync"));
+ }
+ }
+
+ @Test
+ public void failDBOptionsFromPropsWithIllegalValue() {
+ // setup sample properties
+ final Properties properties = new Properties();
+ properties.put("tomato", "1024");
+ properties.put("burger", "2");
+ try(final DBOptions opt = DBOptions.getDBOptionsFromProps(properties)) {
+ assertThat(opt).isNull();
+ }
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void failDBOptionsFromPropsWithNullValue() {
+ try(final DBOptions opt = DBOptions.getDBOptionsFromProps(null)) {
+ //no-op
+ }
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void failDBOptionsFromPropsWithEmptyProps() {
+ try(final DBOptions opt = DBOptions.getDBOptionsFromProps(
+ new Properties())) {
+ //no-op
+ }
+ }
+
+ @Test
+ public void linkageOfPrepMethods() {
+ try (final DBOptions opt = new DBOptions()) {
+ opt.optimizeForSmallDb();
+ }
+ }
+
+ @Test
+ public void env() {
+ try (final DBOptions opt = new DBOptions();
+ final Env env = Env.getDefault()) {
+ opt.setEnv(env);
+ assertThat(opt.getEnv()).isSameAs(env);
+ }
+ }
+
+ @Test
+ public void setIncreaseParallelism() {
+ try(final DBOptions opt = new DBOptions()) {
+ final int threads = Runtime.getRuntime().availableProcessors() * 2;
+ opt.setIncreaseParallelism(threads);
+ }
+ }
+
+ @Test
+ public void createIfMissing() {
+ try(final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setCreateIfMissing(boolValue);
+ assertThat(opt.createIfMissing()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void createMissingColumnFamilies() {
+ try(final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setCreateMissingColumnFamilies(boolValue);
+ assertThat(opt.createMissingColumnFamilies()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void errorIfExists() {
+ try(final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setErrorIfExists(boolValue);
+ assertThat(opt.errorIfExists()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void paranoidChecks() {
+ try(final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setParanoidChecks(boolValue);
+ assertThat(opt.paranoidChecks()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void maxTotalWalSize() {
+ try(final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setMaxTotalWalSize(longValue);
+ assertThat(opt.maxTotalWalSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void maxOpenFiles() {
+ try(final DBOptions opt = new DBOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setMaxOpenFiles(intValue);
+ assertThat(opt.maxOpenFiles()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void maxFileOpeningThreads() {
+ try(final DBOptions opt = new DBOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setMaxFileOpeningThreads(intValue);
+ assertThat(opt.maxFileOpeningThreads()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void useFsync() {
+ try(final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setUseFsync(boolValue);
+ assertThat(opt.useFsync()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void dbPaths() {
+ final List<DbPath> dbPaths = new ArrayList<>();
+ dbPaths.add(new DbPath(Paths.get("/a"), 10));
+ dbPaths.add(new DbPath(Paths.get("/b"), 100));
+ dbPaths.add(new DbPath(Paths.get("/c"), 1000));
+
+ try(final DBOptions opt = new DBOptions()) {
+ assertThat(opt.dbPaths()).isEqualTo(Collections.emptyList());
+
+ opt.setDbPaths(dbPaths);
+
+ assertThat(opt.dbPaths()).isEqualTo(dbPaths);
+ }
+ }
+
+ @Test
+ public void dbLogDir() {
+ try(final DBOptions opt = new DBOptions()) {
+ final String str = "path/to/DbLogDir";
+ opt.setDbLogDir(str);
+ assertThat(opt.dbLogDir()).isEqualTo(str);
+ }
+ }
+
+ @Test
+ public void walDir() {
+ try(final DBOptions opt = new DBOptions()) {
+ final String str = "path/to/WalDir";
+ opt.setWalDir(str);
+ assertThat(opt.walDir()).isEqualTo(str);
+ }
+ }
+
+ @Test
+ public void deleteObsoleteFilesPeriodMicros() {
+ try(final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setDeleteObsoleteFilesPeriodMicros(longValue);
+ assertThat(opt.deleteObsoleteFilesPeriodMicros()).isEqualTo(longValue);
+ }
+ }
+
+ @SuppressWarnings("deprecated")
+ @Test
+ public void baseBackgroundCompactions() {
+ try (final DBOptions opt = new DBOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setBaseBackgroundCompactions(intValue);
+ assertThat(opt.baseBackgroundCompactions()).
+ isEqualTo(intValue);
+ }
+ }
+
+ @SuppressWarnings("deprecated")
+ @Test
+ public void maxBackgroundCompactions() {
+ try(final DBOptions opt = new DBOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setMaxBackgroundCompactions(intValue);
+ assertThat(opt.maxBackgroundCompactions()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void maxSubcompactions() {
+ try (final DBOptions opt = new DBOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setMaxSubcompactions(intValue);
+ assertThat(opt.maxSubcompactions()).
+ isEqualTo(intValue);
+ }
+ }
+
+ @SuppressWarnings("deprecated")
+ @Test
+ public void maxBackgroundFlushes() {
+ try(final DBOptions opt = new DBOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setMaxBackgroundFlushes(intValue);
+ assertThat(opt.maxBackgroundFlushes()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void maxBackgroundJobs() {
+ try (final DBOptions opt = new DBOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setMaxBackgroundJobs(intValue);
+ assertThat(opt.maxBackgroundJobs()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void maxLogFileSize() throws RocksDBException {
+ try(final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setMaxLogFileSize(longValue);
+ assertThat(opt.maxLogFileSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void logFileTimeToRoll() throws RocksDBException {
+ try(final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setLogFileTimeToRoll(longValue);
+ assertThat(opt.logFileTimeToRoll()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void keepLogFileNum() throws RocksDBException {
+ try(final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setKeepLogFileNum(longValue);
+ assertThat(opt.keepLogFileNum()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void recycleLogFileNum() throws RocksDBException {
+ try(final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setRecycleLogFileNum(longValue);
+ assertThat(opt.recycleLogFileNum()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void maxManifestFileSize() {
+ try(final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setMaxManifestFileSize(longValue);
+ assertThat(opt.maxManifestFileSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void tableCacheNumshardbits() {
+ try(final DBOptions opt = new DBOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setTableCacheNumshardbits(intValue);
+ assertThat(opt.tableCacheNumshardbits()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void walSizeLimitMB() {
+ try(final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setWalSizeLimitMB(longValue);
+ assertThat(opt.walSizeLimitMB()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void walTtlSeconds() {
+ try(final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setWalTtlSeconds(longValue);
+ assertThat(opt.walTtlSeconds()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void manifestPreallocationSize() throws RocksDBException {
+ try(final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setManifestPreallocationSize(longValue);
+ assertThat(opt.manifestPreallocationSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void useDirectReads() {
+ try(final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setUseDirectReads(boolValue);
+ assertThat(opt.useDirectReads()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void useDirectIoForFlushAndCompaction() {
+ try(final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setUseDirectIoForFlushAndCompaction(boolValue);
+ assertThat(opt.useDirectIoForFlushAndCompaction()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void allowFAllocate() {
+ try(final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setAllowFAllocate(boolValue);
+ assertThat(opt.allowFAllocate()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void allowMmapReads() {
+ try(final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setAllowMmapReads(boolValue);
+ assertThat(opt.allowMmapReads()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void allowMmapWrites() {
+ try(final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setAllowMmapWrites(boolValue);
+ assertThat(opt.allowMmapWrites()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void isFdCloseOnExec() {
+ try(final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setIsFdCloseOnExec(boolValue);
+ assertThat(opt.isFdCloseOnExec()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void statsDumpPeriodSec() {
+ try(final DBOptions opt = new DBOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setStatsDumpPeriodSec(intValue);
+ assertThat(opt.statsDumpPeriodSec()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void statsPersistPeriodSec() {
+ try (final DBOptions opt = new DBOptions()) {
+ final int intValue = rand.nextInt();
+ opt.setStatsPersistPeriodSec(intValue);
+ assertThat(opt.statsPersistPeriodSec()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void statsHistoryBufferSize() {
+ try (final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setStatsHistoryBufferSize(longValue);
+ assertThat(opt.statsHistoryBufferSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void adviseRandomOnOpen() {
+ try(final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setAdviseRandomOnOpen(boolValue);
+ assertThat(opt.adviseRandomOnOpen()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void dbWriteBufferSize() {
+ try(final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setDbWriteBufferSize(longValue);
+ assertThat(opt.dbWriteBufferSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void setWriteBufferManager() throws RocksDBException {
+ try (final DBOptions opt = new DBOptions();
+ final Cache cache = new LRUCache(1 * 1024 * 1024);
+ final WriteBufferManager writeBufferManager = new WriteBufferManager(2000l, cache)) {
+ opt.setWriteBufferManager(writeBufferManager);
+ assertThat(opt.writeBufferManager()).isEqualTo(writeBufferManager);
+ }
+ }
+
+ @Test
+ public void setWriteBufferManagerWithZeroBufferSize() throws RocksDBException {
+ try (final DBOptions opt = new DBOptions();
+ final Cache cache = new LRUCache(1 * 1024 * 1024);
+ final WriteBufferManager writeBufferManager = new WriteBufferManager(0l, cache)) {
+ opt.setWriteBufferManager(writeBufferManager);
+ assertThat(opt.writeBufferManager()).isEqualTo(writeBufferManager);
+ }
+ }
+
+ @Test
+ public void accessHintOnCompactionStart() {
+ try(final DBOptions opt = new DBOptions()) {
+ final AccessHint accessHint = AccessHint.SEQUENTIAL;
+ opt.setAccessHintOnCompactionStart(accessHint);
+ assertThat(opt.accessHintOnCompactionStart()).isEqualTo(accessHint);
+ }
+ }
+
+ @Test
+ public void newTableReaderForCompactionInputs() {
+ try(final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setNewTableReaderForCompactionInputs(boolValue);
+ assertThat(opt.newTableReaderForCompactionInputs()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void compactionReadaheadSize() {
+ try(final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setCompactionReadaheadSize(longValue);
+ assertThat(opt.compactionReadaheadSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void randomAccessMaxBufferSize() {
+ try(final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setRandomAccessMaxBufferSize(longValue);
+ assertThat(opt.randomAccessMaxBufferSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void writableFileMaxBufferSize() {
+ try(final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setWritableFileMaxBufferSize(longValue);
+ assertThat(opt.writableFileMaxBufferSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void useAdaptiveMutex() {
+ try(final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setUseAdaptiveMutex(boolValue);
+ assertThat(opt.useAdaptiveMutex()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void bytesPerSync() {
+ try(final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setBytesPerSync(longValue);
+ assertThat(opt.bytesPerSync()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void walBytesPerSync() {
+ try(final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setWalBytesPerSync(longValue);
+ assertThat(opt.walBytesPerSync()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void strictBytesPerSync() {
+ try (final DBOptions opt = new DBOptions()) {
+ assertThat(opt.strictBytesPerSync()).isFalse();
+ opt.setStrictBytesPerSync(true);
+ assertThat(opt.strictBytesPerSync()).isTrue();
+ }
+ }
+
+ @Test
+ public void enableThreadTracking() {
+ try (final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setEnableThreadTracking(boolValue);
+ assertThat(opt.enableThreadTracking()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void delayedWriteRate() {
+ try(final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setDelayedWriteRate(longValue);
+ assertThat(opt.delayedWriteRate()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void enablePipelinedWrite() {
+ try(final DBOptions opt = new DBOptions()) {
+ assertThat(opt.enablePipelinedWrite()).isFalse();
+ opt.setEnablePipelinedWrite(true);
+ assertThat(opt.enablePipelinedWrite()).isTrue();
+ }
+ }
+
+ @Test
+ public void unordredWrite() {
+ try(final DBOptions opt = new DBOptions()) {
+ assertThat(opt.unorderedWrite()).isFalse();
+ opt.setUnorderedWrite(true);
+ assertThat(opt.unorderedWrite()).isTrue();
+ }
+ }
+
+ @Test
+ public void allowConcurrentMemtableWrite() {
+ try (final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setAllowConcurrentMemtableWrite(boolValue);
+ assertThat(opt.allowConcurrentMemtableWrite()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void enableWriteThreadAdaptiveYield() {
+ try (final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setEnableWriteThreadAdaptiveYield(boolValue);
+ assertThat(opt.enableWriteThreadAdaptiveYield()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void writeThreadMaxYieldUsec() {
+ try (final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setWriteThreadMaxYieldUsec(longValue);
+ assertThat(opt.writeThreadMaxYieldUsec()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void writeThreadSlowYieldUsec() {
+ try (final DBOptions opt = new DBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setWriteThreadSlowYieldUsec(longValue);
+ assertThat(opt.writeThreadSlowYieldUsec()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void skipStatsUpdateOnDbOpen() {
+ try (final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setSkipStatsUpdateOnDbOpen(boolValue);
+ assertThat(opt.skipStatsUpdateOnDbOpen()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void walRecoveryMode() {
+ try (final DBOptions opt = new DBOptions()) {
+ for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) {
+ opt.setWalRecoveryMode(walRecoveryMode);
+ assertThat(opt.walRecoveryMode()).isEqualTo(walRecoveryMode);
+ }
+ }
+ }
+
+ @Test
+ public void allow2pc() {
+ try (final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setAllow2pc(boolValue);
+ assertThat(opt.allow2pc()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void rowCache() {
+ try (final DBOptions opt = new DBOptions()) {
+ assertThat(opt.rowCache()).isNull();
+
+ try(final Cache lruCache = new LRUCache(1000)) {
+ opt.setRowCache(lruCache);
+ assertThat(opt.rowCache()).isEqualTo(lruCache);
+ }
+
+ try(final Cache clockCache = new ClockCache(1000)) {
+ opt.setRowCache(clockCache);
+ assertThat(opt.rowCache()).isEqualTo(clockCache);
+ }
+ }
+ }
+
+ @Test
+ public void walFilter() {
+ try (final DBOptions opt = new DBOptions()) {
+ assertThat(opt.walFilter()).isNull();
+
+ try (final AbstractWalFilter walFilter = new AbstractWalFilter() {
+ @Override
+ public void columnFamilyLogNumberMap(
+ final Map<Integer, Long> cfLognumber,
+ final Map<String, Integer> cfNameId) {
+ // no-op
+ }
+
+ @Override
+ public LogRecordFoundResult logRecordFound(final long logNumber,
+ final String logFileName, final WriteBatch batch,
+ final WriteBatch newBatch) {
+ return new LogRecordFoundResult(
+ WalProcessingOption.CONTINUE_PROCESSING, false);
+ }
+
+ @Override
+ public String name() {
+ return "test-wal-filter";
+ }
+ }) {
+ opt.setWalFilter(walFilter);
+ assertThat(opt.walFilter()).isEqualTo(walFilter);
+ }
+ }
+ }
+
+ @Test
+ public void failIfOptionsFileError() {
+ try (final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setFailIfOptionsFileError(boolValue);
+ assertThat(opt.failIfOptionsFileError()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void dumpMallocStats() {
+ try (final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setDumpMallocStats(boolValue);
+ assertThat(opt.dumpMallocStats()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void avoidFlushDuringRecovery() {
+ try (final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setAvoidFlushDuringRecovery(boolValue);
+ assertThat(opt.avoidFlushDuringRecovery()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void avoidFlushDuringShutdown() {
+ try (final DBOptions opt = new DBOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setAvoidFlushDuringShutdown(boolValue);
+ assertThat(opt.avoidFlushDuringShutdown()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void allowIngestBehind() {
+ try (final DBOptions opt = new DBOptions()) {
+ assertThat(opt.allowIngestBehind()).isFalse();
+ opt.setAllowIngestBehind(true);
+ assertThat(opt.allowIngestBehind()).isTrue();
+ }
+ }
+
+ @Test
+ public void preserveDeletes() {
+ try (final DBOptions opt = new DBOptions()) {
+ assertThat(opt.preserveDeletes()).isFalse();
+ opt.setPreserveDeletes(true);
+ assertThat(opt.preserveDeletes()).isTrue();
+ }
+ }
+
+ @Test
+ public void twoWriteQueues() {
+ try (final DBOptions opt = new DBOptions()) {
+ assertThat(opt.twoWriteQueues()).isFalse();
+ opt.setTwoWriteQueues(true);
+ assertThat(opt.twoWriteQueues()).isTrue();
+ }
+ }
+
+ @Test
+ public void manualWalFlush() {
+ try (final DBOptions opt = new DBOptions()) {
+ assertThat(opt.manualWalFlush()).isFalse();
+ opt.setManualWalFlush(true);
+ assertThat(opt.manualWalFlush()).isTrue();
+ }
+ }
+
+ @Test
+ public void atomicFlush() {
+ try (final DBOptions opt = new DBOptions()) {
+ assertThat(opt.atomicFlush()).isFalse();
+ opt.setAtomicFlush(true);
+ assertThat(opt.atomicFlush()).isTrue();
+ }
+ }
+
+ @Test
+ public void rateLimiter() {
+ try(final DBOptions options = new DBOptions();
+ final DBOptions anotherOptions = new DBOptions();
+ final RateLimiter rateLimiter = new RateLimiter(1000, 100 * 1000, 1)) {
+ options.setRateLimiter(rateLimiter);
+ // Test with parameter initialization
+ anotherOptions.setRateLimiter(
+ new RateLimiter(1000));
+ }
+ }
+
+ @Test
+ public void sstFileManager() throws RocksDBException {
+ try (final DBOptions options = new DBOptions();
+ final SstFileManager sstFileManager =
+ new SstFileManager(Env.getDefault())) {
+ options.setSstFileManager(sstFileManager);
+ }
+ }
+
+ @Test
+ public void statistics() {
+ try(final DBOptions options = new DBOptions()) {
+ final Statistics statistics = options.statistics();
+ assertThat(statistics).isNull();
+ }
+
+ try(final Statistics statistics = new Statistics();
+ final DBOptions options = new DBOptions().setStatistics(statistics);
+ final Statistics stats = options.statistics()) {
+ assertThat(stats).isNotNull();
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/DefaultEnvTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/DefaultEnvTest.java
new file mode 100644
index 000000000..3fb563ecb
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/DefaultEnvTest.java
@@ -0,0 +1,113 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.Collection;
+import java.util.List;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class DefaultEnvTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void backgroundThreads() {
+ try (final Env defaultEnv = RocksEnv.getDefault()) {
+ defaultEnv.setBackgroundThreads(5, Priority.BOTTOM);
+ assertThat(defaultEnv.getBackgroundThreads(Priority.BOTTOM)).isEqualTo(5);
+
+ defaultEnv.setBackgroundThreads(5);
+ assertThat(defaultEnv.getBackgroundThreads(Priority.LOW)).isEqualTo(5);
+
+ defaultEnv.setBackgroundThreads(5, Priority.LOW);
+ assertThat(defaultEnv.getBackgroundThreads(Priority.LOW)).isEqualTo(5);
+
+ defaultEnv.setBackgroundThreads(5, Priority.HIGH);
+ assertThat(defaultEnv.getBackgroundThreads(Priority.HIGH)).isEqualTo(5);
+ }
+ }
+
+ @Test
+ public void threadPoolQueueLen() {
+ try (final Env defaultEnv = RocksEnv.getDefault()) {
+ assertThat(defaultEnv.getThreadPoolQueueLen(Priority.BOTTOM)).isEqualTo(0);
+ assertThat(defaultEnv.getThreadPoolQueueLen(Priority.LOW)).isEqualTo(0);
+ assertThat(defaultEnv.getThreadPoolQueueLen(Priority.HIGH)).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void incBackgroundThreadsIfNeeded() {
+ try (final Env defaultEnv = RocksEnv.getDefault()) {
+ defaultEnv.incBackgroundThreadsIfNeeded(20, Priority.BOTTOM);
+ assertThat(defaultEnv.getBackgroundThreads(Priority.BOTTOM)).isGreaterThanOrEqualTo(20);
+
+ defaultEnv.incBackgroundThreadsIfNeeded(20, Priority.LOW);
+ assertThat(defaultEnv.getBackgroundThreads(Priority.LOW)).isGreaterThanOrEqualTo(20);
+
+ defaultEnv.incBackgroundThreadsIfNeeded(20, Priority.HIGH);
+ assertThat(defaultEnv.getBackgroundThreads(Priority.HIGH)).isGreaterThanOrEqualTo(20);
+ }
+ }
+
+ @Test
+ public void lowerThreadPoolIOPriority() {
+ try (final Env defaultEnv = RocksEnv.getDefault()) {
+ defaultEnv.lowerThreadPoolIOPriority(Priority.BOTTOM);
+
+ defaultEnv.lowerThreadPoolIOPriority(Priority.LOW);
+
+ defaultEnv.lowerThreadPoolIOPriority(Priority.HIGH);
+ }
+ }
+
+ @Test
+ public void lowerThreadPoolCPUPriority() {
+ try (final Env defaultEnv = RocksEnv.getDefault()) {
+ defaultEnv.lowerThreadPoolCPUPriority(Priority.BOTTOM);
+
+ defaultEnv.lowerThreadPoolCPUPriority(Priority.LOW);
+
+ defaultEnv.lowerThreadPoolCPUPriority(Priority.HIGH);
+ }
+ }
+
+ @Test
+ public void threadList() throws RocksDBException {
+ try (final Env defaultEnv = RocksEnv.getDefault()) {
+ final Collection<ThreadStatus> threadList = defaultEnv.getThreadList();
+ assertThat(threadList.size()).isGreaterThan(0);
+ }
+ }
+
+ @Test
+ public void threadList_integration() throws RocksDBException {
+ try (final Env env = RocksEnv.getDefault();
+ final Options opt = new Options()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true)
+ .setEnv(env)) {
+ // open database
+ try (final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+
+ final List<ThreadStatus> threadList = env.getThreadList();
+ assertThat(threadList.size()).isGreaterThan(0);
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/DirectSliceTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/DirectSliceTest.java
new file mode 100644
index 000000000..67385345c
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/DirectSliceTest.java
@@ -0,0 +1,93 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import java.nio.ByteBuffer;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class DirectSliceTest {
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Test
+ public void directSlice() {
+ try(final DirectSlice directSlice = new DirectSlice("abc");
+ final DirectSlice otherSlice = new DirectSlice("abc")) {
+ assertThat(directSlice.toString()).isEqualTo("abc");
+ // clear first slice
+ directSlice.clear();
+ assertThat(directSlice.toString()).isEmpty();
+ // get first char in otherslice
+ assertThat(otherSlice.get(0)).isEqualTo("a".getBytes()[0]);
+ // remove prefix
+ otherSlice.removePrefix(1);
+ assertThat(otherSlice.toString()).isEqualTo("bc");
+ }
+ }
+
+ @Test
+ public void directSliceWithByteBuffer() {
+ final byte[] data = "Some text".getBytes();
+ final ByteBuffer buffer = ByteBuffer.allocateDirect(data.length + 1);
+ buffer.put(data);
+ buffer.put(data.length, (byte)0);
+
+ try(final DirectSlice directSlice = new DirectSlice(buffer)) {
+ assertThat(directSlice.toString()).isEqualTo("Some text");
+ }
+ }
+
+ @Test
+ public void directSliceWithByteBufferAndLength() {
+ final byte[] data = "Some text".getBytes();
+ final ByteBuffer buffer = ByteBuffer.allocateDirect(data.length);
+ buffer.put(data);
+ try(final DirectSlice directSlice = new DirectSlice(buffer, 4)) {
+ assertThat(directSlice.toString()).isEqualTo("Some");
+ }
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void directSliceInitWithoutDirectAllocation() {
+ final byte[] data = "Some text".getBytes();
+ final ByteBuffer buffer = ByteBuffer.wrap(data);
+ try(final DirectSlice directSlice = new DirectSlice(buffer)) {
+ //no-op
+ }
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void directSlicePrefixInitWithoutDirectAllocation() {
+ final byte[] data = "Some text".getBytes();
+ final ByteBuffer buffer = ByteBuffer.wrap(data);
+ try(final DirectSlice directSlice = new DirectSlice(buffer, 4)) {
+ //no-op
+ }
+ }
+
+ @Test
+ public void directSliceClear() {
+ try(final DirectSlice directSlice = new DirectSlice("abc")) {
+ assertThat(directSlice.toString()).isEqualTo("abc");
+ directSlice.clear();
+ assertThat(directSlice.toString()).isEmpty();
+ directSlice.clear(); // make sure we don't double-free
+ }
+ }
+
+ @Test
+ public void directSliceRemovePrefix() {
+ try(final DirectSlice directSlice = new DirectSlice("abc")) {
+ assertThat(directSlice.toString()).isEqualTo("abc");
+ directSlice.removePrefix(1);
+ assertThat(directSlice.toString()).isEqualTo("bc");
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/EnvOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/EnvOptionsTest.java
new file mode 100644
index 000000000..75768e2ae
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/EnvOptionsTest.java
@@ -0,0 +1,145 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import java.util.Random;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class EnvOptionsTest {
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = new RocksNativeLibraryResource();
+
+ public static final Random rand = PlatformRandomHelper.getPlatformSpecificRandomFactory();
+
+ @Test
+ public void dbOptionsConstructor() {
+ final long compactionReadaheadSize = 4 * 1024 * 1024;
+ try (final DBOptions dbOptions = new DBOptions()
+ .setCompactionReadaheadSize(compactionReadaheadSize)) {
+ try (final EnvOptions envOptions = new EnvOptions(dbOptions)) {
+ assertThat(envOptions.compactionReadaheadSize())
+ .isEqualTo(compactionReadaheadSize);
+ }
+ }
+ }
+
+ @Test
+ public void useMmapReads() {
+ try (final EnvOptions envOptions = new EnvOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ envOptions.setUseMmapReads(boolValue);
+ assertThat(envOptions.useMmapReads()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void useMmapWrites() {
+ try (final EnvOptions envOptions = new EnvOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ envOptions.setUseMmapWrites(boolValue);
+ assertThat(envOptions.useMmapWrites()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void useDirectReads() {
+ try (final EnvOptions envOptions = new EnvOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ envOptions.setUseDirectReads(boolValue);
+ assertThat(envOptions.useDirectReads()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void useDirectWrites() {
+ try (final EnvOptions envOptions = new EnvOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ envOptions.setUseDirectWrites(boolValue);
+ assertThat(envOptions.useDirectWrites()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void allowFallocate() {
+ try (final EnvOptions envOptions = new EnvOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ envOptions.setAllowFallocate(boolValue);
+ assertThat(envOptions.allowFallocate()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void setFdCloexecs() {
+ try (final EnvOptions envOptions = new EnvOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ envOptions.setSetFdCloexec(boolValue);
+ assertThat(envOptions.setFdCloexec()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void bytesPerSync() {
+ try (final EnvOptions envOptions = new EnvOptions()) {
+ final long longValue = rand.nextLong();
+ envOptions.setBytesPerSync(longValue);
+ assertThat(envOptions.bytesPerSync()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void fallocateWithKeepSize() {
+ try (final EnvOptions envOptions = new EnvOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ envOptions.setFallocateWithKeepSize(boolValue);
+ assertThat(envOptions.fallocateWithKeepSize()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void compactionReadaheadSize() {
+ try (final EnvOptions envOptions = new EnvOptions()) {
+ final int intValue = rand.nextInt();
+ envOptions.setCompactionReadaheadSize(intValue);
+ assertThat(envOptions.compactionReadaheadSize()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void randomAccessMaxBufferSize() {
+ try (final EnvOptions envOptions = new EnvOptions()) {
+ final int intValue = rand.nextInt();
+ envOptions.setRandomAccessMaxBufferSize(intValue);
+ assertThat(envOptions.randomAccessMaxBufferSize()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void writableFileMaxBufferSize() {
+ try (final EnvOptions envOptions = new EnvOptions()) {
+ final int intValue = rand.nextInt();
+ envOptions.setWritableFileMaxBufferSize(intValue);
+ assertThat(envOptions.writableFileMaxBufferSize()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void rateLimiter() {
+ try (final EnvOptions envOptions = new EnvOptions();
+ final RateLimiter rateLimiter1 = new RateLimiter(1000, 100 * 1000, 1)) {
+ envOptions.setRateLimiter(rateLimiter1);
+ assertThat(envOptions.rateLimiter()).isEqualTo(rateLimiter1);
+
+ try(final RateLimiter rateLimiter2 = new RateLimiter(1000)) {
+ envOptions.setRateLimiter(rateLimiter2);
+ assertThat(envOptions.rateLimiter()).isEqualTo(rateLimiter2);
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/FilterTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/FilterTest.java
new file mode 100644
index 000000000..dc5c19fbc
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/FilterTest.java
@@ -0,0 +1,39 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+
+public class FilterTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Test
+ public void filter() {
+ // new Bloom filter
+ final BlockBasedTableConfig blockConfig = new BlockBasedTableConfig();
+ try(final Options options = new Options()) {
+
+ try(final Filter bloomFilter = new BloomFilter()) {
+ blockConfig.setFilterPolicy(bloomFilter);
+ options.setTableFormatConfig(blockConfig);
+ }
+
+ try(final Filter bloomFilter = new BloomFilter(10)) {
+ blockConfig.setFilterPolicy(bloomFilter);
+ options.setTableFormatConfig(blockConfig);
+ }
+
+ try(final Filter bloomFilter = new BloomFilter(10, false)) {
+ blockConfig.setFilterPolicy(bloomFilter);
+ options.setTableFormatConfig(blockConfig);
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/FlushOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/FlushOptionsTest.java
new file mode 100644
index 000000000..f90ae911d
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/FlushOptionsTest.java
@@ -0,0 +1,31 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class FlushOptionsTest {
+
+ @Test
+ public void waitForFlush() {
+ try (final FlushOptions flushOptions = new FlushOptions()) {
+ assertThat(flushOptions.waitForFlush()).isTrue();
+ flushOptions.setWaitForFlush(false);
+ assertThat(flushOptions.waitForFlush()).isFalse();
+ }
+ }
+
+ @Test
+ public void allowWriteStall() {
+ try (final FlushOptions flushOptions = new FlushOptions()) {
+ assertThat(flushOptions.allowWriteStall()).isFalse();
+ flushOptions.setAllowWriteStall(true);
+ assertThat(flushOptions.allowWriteStall()).isTrue();
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/FlushTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/FlushTest.java
new file mode 100644
index 000000000..1a354f4ce
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/FlushTest.java
@@ -0,0 +1,49 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class FlushTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void flush() throws RocksDBException {
+ try(final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setMaxWriteBufferNumber(10)
+ .setMinWriteBufferNumberToMerge(10);
+ final WriteOptions wOpt = new WriteOptions()
+ .setDisableWAL(true);
+ final FlushOptions flushOptions = new FlushOptions()
+ .setWaitForFlush(true)) {
+ assertThat(flushOptions.waitForFlush()).isTrue();
+
+ try(final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ db.put(wOpt, "key1".getBytes(), "value1".getBytes());
+ db.put(wOpt, "key2".getBytes(), "value2".getBytes());
+ db.put(wOpt, "key3".getBytes(), "value3".getBytes());
+ db.put(wOpt, "key4".getBytes(), "value4".getBytes());
+ assertThat(db.getProperty("rocksdb.num-entries-active-mem-table"))
+ .isEqualTo("4");
+ db.flush(flushOptions);
+ assertThat(db.getProperty("rocksdb.num-entries-active-mem-table"))
+ .isEqualTo("0");
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/HdfsEnvTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/HdfsEnvTest.java
new file mode 100644
index 000000000..c1b064f48
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/HdfsEnvTest.java
@@ -0,0 +1,45 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+public class HdfsEnvTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ // expect org.rocksdb.RocksDBException: Not compiled with hdfs support
+ @Test(expected = RocksDBException.class)
+ public void construct() throws RocksDBException {
+ try (final Env env = new HdfsEnv("hdfs://localhost:5000")) {
+ // no-op
+ }
+ }
+
+ // expect org.rocksdb.RocksDBException: Not compiled with hdfs support
+ @Test(expected = RocksDBException.class)
+ public void construct_integration() throws RocksDBException {
+ try (final Env env = new HdfsEnv("hdfs://localhost:5000");
+ final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setEnv(env);
+ ) {
+ try (final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getPath())) {
+ db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/InfoLogLevelTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/InfoLogLevelTest.java
new file mode 100644
index 000000000..12ee537d9
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/InfoLogLevelTest.java
@@ -0,0 +1,109 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.rocksdb.util.Environment;
+
+import java.io.IOException;
+
+import static java.nio.file.Files.readAllBytes;
+import static java.nio.file.Paths.get;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class InfoLogLevelTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void testInfoLogLevel() throws RocksDBException,
+ IOException {
+ try (final RocksDB db =
+ RocksDB.open(dbFolder.getRoot().getAbsolutePath())) {
+ db.put("key".getBytes(), "value".getBytes());
+ db.flush(new FlushOptions().setWaitForFlush(true));
+ assertThat(getLogContentsWithoutHeader()).isNotEmpty();
+ }
+ }
+
+ @Test
+ public void testFatalLogLevel() throws RocksDBException,
+ IOException {
+ try (final Options options = new Options().
+ setCreateIfMissing(true).
+ setInfoLogLevel(InfoLogLevel.FATAL_LEVEL);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ assertThat(options.infoLogLevel()).
+ isEqualTo(InfoLogLevel.FATAL_LEVEL);
+ db.put("key".getBytes(), "value".getBytes());
+ // As InfoLogLevel is set to FATAL_LEVEL, here we expect the log
+ // content to be empty.
+ assertThat(getLogContentsWithoutHeader()).isEmpty();
+ }
+ }
+
+ @Test
+ public void testFatalLogLevelWithDBOptions()
+ throws RocksDBException, IOException {
+ try (final DBOptions dbOptions = new DBOptions().
+ setInfoLogLevel(InfoLogLevel.FATAL_LEVEL);
+ final Options options = new Options(dbOptions,
+ new ColumnFamilyOptions()).
+ setCreateIfMissing(true);
+ final RocksDB db =
+ RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) {
+ assertThat(dbOptions.infoLogLevel()).
+ isEqualTo(InfoLogLevel.FATAL_LEVEL);
+ assertThat(options.infoLogLevel()).
+ isEqualTo(InfoLogLevel.FATAL_LEVEL);
+ db.put("key".getBytes(), "value".getBytes());
+ assertThat(getLogContentsWithoutHeader()).isEmpty();
+ }
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void failIfIllegalByteValueProvided() {
+ InfoLogLevel.getInfoLogLevel((byte) -1);
+ }
+
+ @Test
+ public void valueOf() {
+ assertThat(InfoLogLevel.valueOf("DEBUG_LEVEL")).
+ isEqualTo(InfoLogLevel.DEBUG_LEVEL);
+ }
+
+ /**
+ * Read LOG file contents into String.
+ *
+ * @return LOG file contents as String.
+ * @throws IOException if file is not found.
+ */
+ private String getLogContentsWithoutHeader() throws IOException {
+ final String separator = Environment.isWindows() ?
+ "\n" : System.getProperty("line.separator");
+ final String[] lines = new String(readAllBytes(get(
+ dbFolder.getRoot().getAbsolutePath() + "/LOG"))).split(separator);
+
+ int first_non_header = lines.length;
+ // Identify the last line of the header
+ for (int i = lines.length - 1; i >= 0; --i) {
+ if (lines[i].indexOf("DB pointer") >= 0) {
+ first_non_header = i + 1;
+ break;
+ }
+ }
+ StringBuilder builder = new StringBuilder();
+ for (int i = first_non_header; i < lines.length; ++i) {
+ builder.append(lines[i]).append(separator);
+ }
+ return builder.toString();
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java
new file mode 100644
index 000000000..ab7e21568
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java
@@ -0,0 +1,107 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import java.util.Random;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class IngestExternalFileOptionsTest {
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE
+ = new RocksNativeLibraryResource();
+
+ public static final Random rand =
+ PlatformRandomHelper.getPlatformSpecificRandomFactory();
+
+ @Test
+ public void createExternalSstFileInfoWithoutParameters() {
+ try (final IngestExternalFileOptions options =
+ new IngestExternalFileOptions()) {
+ assertThat(options).isNotNull();
+ }
+ }
+
+ @Test
+ public void createExternalSstFileInfoWithParameters() {
+ final boolean moveFiles = rand.nextBoolean();
+ final boolean snapshotConsistency = rand.nextBoolean();
+ final boolean allowGlobalSeqNo = rand.nextBoolean();
+ final boolean allowBlockingFlush = rand.nextBoolean();
+ try (final IngestExternalFileOptions options =
+ new IngestExternalFileOptions(moveFiles, snapshotConsistency,
+ allowGlobalSeqNo, allowBlockingFlush)) {
+ assertThat(options).isNotNull();
+ assertThat(options.moveFiles()).isEqualTo(moveFiles);
+ assertThat(options.snapshotConsistency()).isEqualTo(snapshotConsistency);
+ assertThat(options.allowGlobalSeqNo()).isEqualTo(allowGlobalSeqNo);
+ assertThat(options.allowBlockingFlush()).isEqualTo(allowBlockingFlush);
+ }
+ }
+
+ @Test
+ public void moveFiles() {
+ try (final IngestExternalFileOptions options =
+ new IngestExternalFileOptions()) {
+ final boolean moveFiles = rand.nextBoolean();
+ options.setMoveFiles(moveFiles);
+ assertThat(options.moveFiles()).isEqualTo(moveFiles);
+ }
+ }
+
+ @Test
+ public void snapshotConsistency() {
+ try (final IngestExternalFileOptions options =
+ new IngestExternalFileOptions()) {
+ final boolean snapshotConsistency = rand.nextBoolean();
+ options.setSnapshotConsistency(snapshotConsistency);
+ assertThat(options.snapshotConsistency()).isEqualTo(snapshotConsistency);
+ }
+ }
+
+ @Test
+ public void allowGlobalSeqNo() {
+ try (final IngestExternalFileOptions options =
+ new IngestExternalFileOptions()) {
+ final boolean allowGlobalSeqNo = rand.nextBoolean();
+ options.setAllowGlobalSeqNo(allowGlobalSeqNo);
+ assertThat(options.allowGlobalSeqNo()).isEqualTo(allowGlobalSeqNo);
+ }
+ }
+
+ @Test
+ public void allowBlockingFlush() {
+ try (final IngestExternalFileOptions options =
+ new IngestExternalFileOptions()) {
+ final boolean allowBlockingFlush = rand.nextBoolean();
+ options.setAllowBlockingFlush(allowBlockingFlush);
+ assertThat(options.allowBlockingFlush()).isEqualTo(allowBlockingFlush);
+ }
+ }
+
+ @Test
+ public void ingestBehind() {
+ try (final IngestExternalFileOptions options =
+ new IngestExternalFileOptions()) {
+ assertThat(options.ingestBehind()).isFalse();
+ options.setIngestBehind(true);
+ assertThat(options.ingestBehind()).isTrue();
+ }
+ }
+
+ @Test
+ public void writeGlobalSeqno() {
+ try (final IngestExternalFileOptions options =
+ new IngestExternalFileOptions()) {
+ assertThat(options.writeGlobalSeqno()).isTrue();
+ options.setWriteGlobalSeqno(false);
+ assertThat(options.writeGlobalSeqno()).isFalse();
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/KeyMayExistTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/KeyMayExistTest.java
new file mode 100644
index 000000000..45b06be35
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/KeyMayExistTest.java
@@ -0,0 +1,192 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class KeyMayExistTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void keyMayExist() throws RocksDBException {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes())
+ );
+
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(),
+ cfDescriptors, columnFamilyHandleList)) {
+ try {
+ assertThat(columnFamilyHandleList.size()).
+ isEqualTo(2);
+ db.put("key".getBytes(UTF_8), "value".getBytes(UTF_8));
+ // Test without column family
+ final Holder<byte[]> holder = new Holder<>();
+ boolean exists = db.keyMayExist("key".getBytes(UTF_8), holder);
+ assertThat(exists).isTrue();
+ assertThat(holder.getValue()).isNotNull();
+ assertThat(new String(holder.getValue(), UTF_8)).isEqualTo("value");
+
+ exists = db.keyMayExist("key".getBytes(UTF_8), null);
+ assertThat(exists).isTrue();
+
+ // Slice key
+ final StringBuilder builder = new StringBuilder("prefix");
+ final int offset = builder.toString().length();
+ builder.append("slice key 0");
+ final int len = builder.toString().length() - offset;
+ builder.append("suffix");
+
+ final byte[] sliceKey = builder.toString().getBytes(UTF_8);
+ final byte[] sliceValue = "slice value 0".getBytes(UTF_8);
+ db.put(sliceKey, offset, len, sliceValue, 0, sliceValue.length);
+
+ exists = db.keyMayExist(sliceKey, offset, len, holder);
+ assertThat(exists).isTrue();
+ assertThat(holder.getValue()).isNotNull();
+ assertThat(holder.getValue()).isEqualTo(sliceValue);
+
+ exists = db.keyMayExist(sliceKey, offset, len, null);
+ assertThat(exists).isTrue();
+
+ // Test without column family but with readOptions
+ try (final ReadOptions readOptions = new ReadOptions()) {
+ exists = db.keyMayExist(readOptions, "key".getBytes(UTF_8), holder);
+ assertThat(exists).isTrue();
+ assertThat(holder.getValue()).isNotNull();
+ assertThat(new String(holder.getValue(), UTF_8)).isEqualTo("value");
+
+ exists = db.keyMayExist(readOptions, "key".getBytes(UTF_8), null);
+ assertThat(exists).isTrue();
+
+ exists = db.keyMayExist(readOptions, sliceKey, offset, len, holder);
+ assertThat(exists).isTrue();
+ assertThat(holder.getValue()).isNotNull();
+ assertThat(holder.getValue()).isEqualTo(sliceValue);
+
+ exists = db.keyMayExist(readOptions, sliceKey, offset, len, null);
+ assertThat(exists).isTrue();
+ }
+
+ // Test with column family
+ exists = db.keyMayExist(columnFamilyHandleList.get(0), "key".getBytes(UTF_8),
+ holder);
+ assertThat(exists).isTrue();
+ assertThat(holder.getValue()).isNotNull();
+ assertThat(new String(holder.getValue(), UTF_8)).isEqualTo("value");
+
+ exists = db.keyMayExist(columnFamilyHandleList.get(0), "key".getBytes(UTF_8),
+ null);
+ assertThat(exists).isTrue();
+
+ // Test slice sky with column family
+ exists = db.keyMayExist(columnFamilyHandleList.get(0), sliceKey, offset, len,
+ holder);
+ assertThat(exists).isTrue();
+ assertThat(holder.getValue()).isNotNull();
+ assertThat(holder.getValue()).isEqualTo(sliceValue);
+
+ exists = db.keyMayExist(columnFamilyHandleList.get(0), sliceKey, offset, len,
+ null);
+ assertThat(exists).isTrue();
+
+ // Test with column family and readOptions
+ try (final ReadOptions readOptions = new ReadOptions()) {
+ exists = db.keyMayExist(columnFamilyHandleList.get(0), readOptions,
+ "key".getBytes(UTF_8), holder);
+ assertThat(exists).isTrue();
+ assertThat(holder.getValue()).isNotNull();
+ assertThat(new String(holder.getValue(), UTF_8)).isEqualTo("value");
+
+ exists = db.keyMayExist(columnFamilyHandleList.get(0), readOptions,
+ "key".getBytes(UTF_8), null);
+ assertThat(exists).isTrue();
+
+ // Test slice key with column family and read options
+ exists = db.keyMayExist(columnFamilyHandleList.get(0), readOptions,
+ sliceKey, offset, len, holder);
+ assertThat(exists).isTrue();
+ assertThat(holder.getValue()).isNotNull();
+ assertThat(holder.getValue()).isEqualTo(sliceValue);
+
+ exists = db.keyMayExist(columnFamilyHandleList.get(0), readOptions,
+ sliceKey, offset, len, null);
+ assertThat(exists).isTrue();
+ }
+
+ // KeyMayExist in CF1 must return null value
+ exists = db.keyMayExist(columnFamilyHandleList.get(1),
+ "key".getBytes(UTF_8), holder);
+ assertThat(exists).isFalse();
+ assertThat(holder.getValue()).isNull();
+ exists = db.keyMayExist(columnFamilyHandleList.get(1),
+ "key".getBytes(UTF_8), null);
+ assertThat(exists).isFalse();
+
+ // slice key
+ exists = db.keyMayExist(columnFamilyHandleList.get(1),
+ sliceKey, 1, 3, holder);
+ assertThat(exists).isFalse();
+ assertThat(holder.getValue()).isNull();
+ exists = db.keyMayExist(columnFamilyHandleList.get(1),
+ sliceKey, 1, 3, null);
+ assertThat(exists).isFalse();
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ columnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void keyMayExistNonUnicodeString() throws RocksDBException {
+ try (final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ final byte key[] = "key".getBytes(UTF_8);
+ final byte value[] = { (byte)0x80 }; // invalid unicode code-point
+ db.put(key, value);
+
+ final byte buf[] = new byte[10];
+ final int read = db.get(key, buf);
+ assertThat(read).isEqualTo(1);
+ assertThat(buf).startsWith(value);
+
+ final Holder<byte[]> holder = new Holder<>();
+ boolean exists = db.keyMayExist("key".getBytes(UTF_8), holder);
+ assertThat(exists).isTrue();
+ assertThat(holder.getValue()).isNotNull();
+ assertThat(holder.getValue()).isEqualTo(value);
+
+ exists = db.keyMayExist("key".getBytes(UTF_8), null);
+ assertThat(exists).isTrue();
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/LRUCacheTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/LRUCacheTest.java
new file mode 100644
index 000000000..d2cd15b7e
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/LRUCacheTest.java
@@ -0,0 +1,27 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Test;
+
+public class LRUCacheTest {
+
+ static {
+ RocksDB.loadLibrary();
+ }
+
+ @Test
+ public void newLRUCache() {
+ final long capacity = 1000;
+ final int numShardBits = 16;
+ final boolean strictCapacityLimit = true;
+ final double highPriPoolRatio = 5;
+ try(final Cache lruCache = new LRUCache(capacity,
+ numShardBits, strictCapacityLimit, highPriPoolRatio)) {
+ //no op
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/LoggerTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/LoggerTest.java
new file mode 100644
index 000000000..5bc299f11
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/LoggerTest.java
@@ -0,0 +1,239 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class LoggerTest {
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void customLogger() throws RocksDBException {
+ final AtomicInteger logMessageCounter = new AtomicInteger();
+ try (final Options options = new Options().
+ setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL).
+ setCreateIfMissing(true);
+ final Logger logger = new Logger(options) {
+ // Create new logger with max log level passed by options
+ @Override
+ protected void log(InfoLogLevel infoLogLevel, String logMsg) {
+ assertThat(logMsg).isNotNull();
+ assertThat(logMsg.length()).isGreaterThan(0);
+ logMessageCounter.incrementAndGet();
+ }
+ }
+ ) {
+ // Set custom logger to options
+ options.setLogger(logger);
+
+ try (final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ // there should be more than zero received log messages in
+ // debug level.
+ assertThat(logMessageCounter.get()).isGreaterThan(0);
+ }
+ }
+ }
+
+ @Test
+ public void warnLogger() throws RocksDBException {
+ final AtomicInteger logMessageCounter = new AtomicInteger();
+ try (final Options options = new Options().
+ setInfoLogLevel(InfoLogLevel.WARN_LEVEL).
+ setCreateIfMissing(true);
+
+ final Logger logger = new Logger(options) {
+ // Create new logger with max log level passed by options
+ @Override
+ protected void log(InfoLogLevel infoLogLevel, String logMsg) {
+ assertThat(logMsg).isNotNull();
+ assertThat(logMsg.length()).isGreaterThan(0);
+ logMessageCounter.incrementAndGet();
+ }
+ }
+ ) {
+
+ // Set custom logger to options
+ options.setLogger(logger);
+
+ try (final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ // there should be zero messages
+ // using warn level as log level.
+ assertThat(logMessageCounter.get()).isEqualTo(0);
+ }
+ }
+ }
+
+
+ @Test
+ public void fatalLogger() throws RocksDBException {
+ final AtomicInteger logMessageCounter = new AtomicInteger();
+ try (final Options options = new Options().
+ setInfoLogLevel(InfoLogLevel.FATAL_LEVEL).
+ setCreateIfMissing(true);
+
+ final Logger logger = new Logger(options) {
+ // Create new logger with max log level passed by options
+ @Override
+ protected void log(InfoLogLevel infoLogLevel, String logMsg) {
+ assertThat(logMsg).isNotNull();
+ assertThat(logMsg.length()).isGreaterThan(0);
+ logMessageCounter.incrementAndGet();
+ }
+ }
+ ) {
+
+ // Set custom logger to options
+ options.setLogger(logger);
+
+ try (final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ // there should be zero messages
+ // using fatal level as log level.
+ assertThat(logMessageCounter.get()).isEqualTo(0);
+ }
+ }
+ }
+
+ @Test
+ public void dbOptionsLogger() throws RocksDBException {
+ final AtomicInteger logMessageCounter = new AtomicInteger();
+ try (final DBOptions options = new DBOptions().
+ setInfoLogLevel(InfoLogLevel.FATAL_LEVEL).
+ setCreateIfMissing(true);
+ final Logger logger = new Logger(options) {
+ // Create new logger with max log level passed by options
+ @Override
+ protected void log(InfoLogLevel infoLogLevel, String logMsg) {
+ assertThat(logMsg).isNotNull();
+ assertThat(logMsg.length()).isGreaterThan(0);
+ logMessageCounter.incrementAndGet();
+ }
+ }
+ ) {
+ // Set custom logger to options
+ options.setLogger(logger);
+
+ final List<ColumnFamilyDescriptor> cfDescriptors =
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY));
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+
+ try (final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(),
+ cfDescriptors, cfHandles)) {
+ try {
+ // there should be zero messages
+ // using fatal level as log level.
+ assertThat(logMessageCounter.get()).isEqualTo(0);
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle : cfHandles) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void setWarnLogLevel() {
+ final AtomicInteger logMessageCounter = new AtomicInteger();
+ try (final Options options = new Options().
+ setInfoLogLevel(InfoLogLevel.FATAL_LEVEL).
+ setCreateIfMissing(true);
+ final Logger logger = new Logger(options) {
+ // Create new logger with max log level passed by options
+ @Override
+ protected void log(InfoLogLevel infoLogLevel, String logMsg) {
+ assertThat(logMsg).isNotNull();
+ assertThat(logMsg.length()).isGreaterThan(0);
+ logMessageCounter.incrementAndGet();
+ }
+ }
+ ) {
+ assertThat(logger.infoLogLevel()).
+ isEqualTo(InfoLogLevel.FATAL_LEVEL);
+ logger.setInfoLogLevel(InfoLogLevel.WARN_LEVEL);
+ assertThat(logger.infoLogLevel()).
+ isEqualTo(InfoLogLevel.WARN_LEVEL);
+ }
+ }
+
+ @Test
+ public void setInfoLogLevel() {
+ final AtomicInteger logMessageCounter = new AtomicInteger();
+ try (final Options options = new Options().
+ setInfoLogLevel(InfoLogLevel.FATAL_LEVEL).
+ setCreateIfMissing(true);
+ final Logger logger = new Logger(options) {
+ // Create new logger with max log level passed by options
+ @Override
+ protected void log(InfoLogLevel infoLogLevel, String logMsg) {
+ assertThat(logMsg).isNotNull();
+ assertThat(logMsg.length()).isGreaterThan(0);
+ logMessageCounter.incrementAndGet();
+ }
+ }
+ ) {
+ assertThat(logger.infoLogLevel()).
+ isEqualTo(InfoLogLevel.FATAL_LEVEL);
+ logger.setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL);
+ assertThat(logger.infoLogLevel()).
+ isEqualTo(InfoLogLevel.DEBUG_LEVEL);
+ }
+ }
+
+ @Test
+ public void changeLogLevelAtRuntime() throws RocksDBException {
+ final AtomicInteger logMessageCounter = new AtomicInteger();
+ try (final Options options = new Options().
+ setInfoLogLevel(InfoLogLevel.FATAL_LEVEL).
+ setCreateIfMissing(true);
+
+ // Create new logger with max log level passed by options
+ final Logger logger = new Logger(options) {
+ @Override
+ protected void log(InfoLogLevel infoLogLevel, String logMsg) {
+ assertThat(logMsg).isNotNull();
+ assertThat(logMsg.length()).isGreaterThan(0);
+ logMessageCounter.incrementAndGet();
+ }
+ }
+ ) {
+ // Set custom logger to options
+ options.setLogger(logger);
+
+ try (final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+
+ // there should be zero messages
+ // using fatal level as log level.
+ assertThat(logMessageCounter.get()).isEqualTo(0);
+
+ // change log level to debug level
+ logger.setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL);
+
+ db.put("key".getBytes(), "value".getBytes());
+ db.flush(new FlushOptions().setWaitForFlush(true));
+
+ // messages shall be received due to previous actions.
+ assertThat(logMessageCounter.get()).isNotEqualTo(0);
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/MemTableTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/MemTableTest.java
new file mode 100644
index 000000000..73ac589a9
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/MemTableTest.java
@@ -0,0 +1,111 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class MemTableTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Test
+ public void hashSkipListMemTable() throws RocksDBException {
+ try(final Options options = new Options()) {
+ // Test HashSkipListMemTableConfig
+ HashSkipListMemTableConfig memTableConfig =
+ new HashSkipListMemTableConfig();
+ assertThat(memTableConfig.bucketCount()).
+ isEqualTo(1000000);
+ memTableConfig.setBucketCount(2000000);
+ assertThat(memTableConfig.bucketCount()).
+ isEqualTo(2000000);
+ assertThat(memTableConfig.height()).
+ isEqualTo(4);
+ memTableConfig.setHeight(5);
+ assertThat(memTableConfig.height()).
+ isEqualTo(5);
+ assertThat(memTableConfig.branchingFactor()).
+ isEqualTo(4);
+ memTableConfig.setBranchingFactor(6);
+ assertThat(memTableConfig.branchingFactor()).
+ isEqualTo(6);
+ options.setMemTableConfig(memTableConfig);
+ }
+ }
+
+ @Test
+ public void skipListMemTable() throws RocksDBException {
+ try(final Options options = new Options()) {
+ SkipListMemTableConfig skipMemTableConfig =
+ new SkipListMemTableConfig();
+ assertThat(skipMemTableConfig.lookahead()).
+ isEqualTo(0);
+ skipMemTableConfig.setLookahead(20);
+ assertThat(skipMemTableConfig.lookahead()).
+ isEqualTo(20);
+ options.setMemTableConfig(skipMemTableConfig);
+ }
+ }
+
+ @Test
+ public void hashLinkedListMemTable() throws RocksDBException {
+ try(final Options options = new Options()) {
+ HashLinkedListMemTableConfig hashLinkedListMemTableConfig =
+ new HashLinkedListMemTableConfig();
+ assertThat(hashLinkedListMemTableConfig.bucketCount()).
+ isEqualTo(50000);
+ hashLinkedListMemTableConfig.setBucketCount(100000);
+ assertThat(hashLinkedListMemTableConfig.bucketCount()).
+ isEqualTo(100000);
+ assertThat(hashLinkedListMemTableConfig.hugePageTlbSize()).
+ isEqualTo(0);
+ hashLinkedListMemTableConfig.setHugePageTlbSize(1);
+ assertThat(hashLinkedListMemTableConfig.hugePageTlbSize()).
+ isEqualTo(1);
+ assertThat(hashLinkedListMemTableConfig.
+ bucketEntriesLoggingThreshold()).
+ isEqualTo(4096);
+ hashLinkedListMemTableConfig.
+ setBucketEntriesLoggingThreshold(200);
+ assertThat(hashLinkedListMemTableConfig.
+ bucketEntriesLoggingThreshold()).
+ isEqualTo(200);
+ assertThat(hashLinkedListMemTableConfig.
+ ifLogBucketDistWhenFlush()).isTrue();
+ hashLinkedListMemTableConfig.
+ setIfLogBucketDistWhenFlush(false);
+ assertThat(hashLinkedListMemTableConfig.
+ ifLogBucketDistWhenFlush()).isFalse();
+ assertThat(hashLinkedListMemTableConfig.
+ thresholdUseSkiplist()).
+ isEqualTo(256);
+ hashLinkedListMemTableConfig.setThresholdUseSkiplist(29);
+ assertThat(hashLinkedListMemTableConfig.
+ thresholdUseSkiplist()).
+ isEqualTo(29);
+ options.setMemTableConfig(hashLinkedListMemTableConfig);
+ }
+ }
+
+ @Test
+ public void vectorMemTable() throws RocksDBException {
+ try(final Options options = new Options()) {
+ VectorMemTableConfig vectorMemTableConfig =
+ new VectorMemTableConfig();
+ assertThat(vectorMemTableConfig.reservedSize()).
+ isEqualTo(0);
+ vectorMemTableConfig.setReservedSize(123);
+ assertThat(vectorMemTableConfig.reservedSize()).
+ isEqualTo(123);
+ options.setMemTableConfig(vectorMemTableConfig);
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/MemoryUtilTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/MemoryUtilTest.java
new file mode 100644
index 000000000..72d3ddc3c
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/MemoryUtilTest.java
@@ -0,0 +1,143 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.nio.charset.StandardCharsets;
+import java.util.*;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class MemoryUtilTest {
+
+ private static final String MEMTABLE_SIZE = "rocksdb.size-all-mem-tables";
+ private static final String UNFLUSHED_MEMTABLE_SIZE = "rocksdb.cur-size-all-mem-tables";
+ private static final String TABLE_READERS = "rocksdb.estimate-table-readers-mem";
+
+ private final byte[] key = "some-key".getBytes(StandardCharsets.UTF_8);
+ private final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8);
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule public TemporaryFolder dbFolder1 = new TemporaryFolder();
+ @Rule public TemporaryFolder dbFolder2 = new TemporaryFolder();
+
+ /**
+ * Test MemoryUtil.getApproximateMemoryUsageByType before and after a put + get
+ */
+ @Test
+ public void getApproximateMemoryUsageByType() throws RocksDBException {
+ try (final Cache cache = new LRUCache(8 * 1024 * 1024);
+ final Options options =
+ new Options()
+ .setCreateIfMissing(true)
+ .setTableFormatConfig(new BlockBasedTableConfig().setBlockCache(cache));
+ final FlushOptions flushOptions =
+ new FlushOptions().setWaitForFlush(true);
+ final RocksDB db =
+ RocksDB.open(options, dbFolder1.getRoot().getAbsolutePath())) {
+
+ List<RocksDB> dbs = new ArrayList<>(1);
+ dbs.add(db);
+ Set<Cache> caches = new HashSet<>(1);
+ caches.add(cache);
+ Map<MemoryUsageType, Long> usage = MemoryUtil.getApproximateMemoryUsageByType(dbs, caches);
+
+ assertThat(usage.get(MemoryUsageType.kMemTableTotal)).isEqualTo(
+ db.getAggregatedLongProperty(MEMTABLE_SIZE));
+ assertThat(usage.get(MemoryUsageType.kMemTableUnFlushed)).isEqualTo(
+ db.getAggregatedLongProperty(UNFLUSHED_MEMTABLE_SIZE));
+ assertThat(usage.get(MemoryUsageType.kTableReadersTotal)).isEqualTo(
+ db.getAggregatedLongProperty(TABLE_READERS));
+ assertThat(usage.get(MemoryUsageType.kCacheTotal)).isEqualTo(0);
+
+ db.put(key, value);
+ db.flush(flushOptions);
+ db.get(key);
+
+ usage = MemoryUtil.getApproximateMemoryUsageByType(dbs, caches);
+ assertThat(usage.get(MemoryUsageType.kMemTableTotal)).isGreaterThan(0);
+ assertThat(usage.get(MemoryUsageType.kMemTableTotal)).isEqualTo(
+ db.getAggregatedLongProperty(MEMTABLE_SIZE));
+ assertThat(usage.get(MemoryUsageType.kMemTableUnFlushed)).isGreaterThan(0);
+ assertThat(usage.get(MemoryUsageType.kMemTableUnFlushed)).isEqualTo(
+ db.getAggregatedLongProperty(UNFLUSHED_MEMTABLE_SIZE));
+ assertThat(usage.get(MemoryUsageType.kTableReadersTotal)).isGreaterThan(0);
+ assertThat(usage.get(MemoryUsageType.kTableReadersTotal)).isEqualTo(
+ db.getAggregatedLongProperty(TABLE_READERS));
+ assertThat(usage.get(MemoryUsageType.kCacheTotal)).isGreaterThan(0);
+
+ }
+ }
+
+ /**
+ * Test MemoryUtil.getApproximateMemoryUsageByType with null inputs
+ */
+ @Test
+ public void getApproximateMemoryUsageByTypeNulls() throws RocksDBException {
+ Map<MemoryUsageType, Long> usage = MemoryUtil.getApproximateMemoryUsageByType(null, null);
+
+ assertThat(usage.get(MemoryUsageType.kMemTableTotal)).isEqualTo(null);
+ assertThat(usage.get(MemoryUsageType.kMemTableUnFlushed)).isEqualTo(null);
+ assertThat(usage.get(MemoryUsageType.kTableReadersTotal)).isEqualTo(null);
+ assertThat(usage.get(MemoryUsageType.kCacheTotal)).isEqualTo(null);
+ }
+
+ /**
+ * Test MemoryUtil.getApproximateMemoryUsageByType with two DBs and two caches
+ */
+ @Test
+ public void getApproximateMemoryUsageByTypeMultiple() throws RocksDBException {
+ try (final Cache cache1 = new LRUCache(1 * 1024 * 1024);
+ final Options options1 =
+ new Options()
+ .setCreateIfMissing(true)
+ .setTableFormatConfig(new BlockBasedTableConfig().setBlockCache(cache1));
+ final RocksDB db1 =
+ RocksDB.open(options1, dbFolder1.getRoot().getAbsolutePath());
+ final Cache cache2 = new LRUCache(1 * 1024 * 1024);
+ final Options options2 =
+ new Options()
+ .setCreateIfMissing(true)
+ .setTableFormatConfig(new BlockBasedTableConfig().setBlockCache(cache2));
+ final RocksDB db2 =
+ RocksDB.open(options2, dbFolder2.getRoot().getAbsolutePath());
+ final FlushOptions flushOptions =
+ new FlushOptions().setWaitForFlush(true);
+
+ ) {
+ List<RocksDB> dbs = new ArrayList<>(1);
+ dbs.add(db1);
+ dbs.add(db2);
+ Set<Cache> caches = new HashSet<>(1);
+ caches.add(cache1);
+ caches.add(cache2);
+
+ for (RocksDB db: dbs) {
+ db.put(key, value);
+ db.flush(flushOptions);
+ db.get(key);
+ }
+
+ Map<MemoryUsageType, Long> usage = MemoryUtil.getApproximateMemoryUsageByType(dbs, caches);
+ assertThat(usage.get(MemoryUsageType.kMemTableTotal)).isEqualTo(
+ db1.getAggregatedLongProperty(MEMTABLE_SIZE) + db2.getAggregatedLongProperty(MEMTABLE_SIZE));
+ assertThat(usage.get(MemoryUsageType.kMemTableUnFlushed)).isEqualTo(
+ db1.getAggregatedLongProperty(UNFLUSHED_MEMTABLE_SIZE) + db2.getAggregatedLongProperty(UNFLUSHED_MEMTABLE_SIZE));
+ assertThat(usage.get(MemoryUsageType.kTableReadersTotal)).isEqualTo(
+ db1.getAggregatedLongProperty(TABLE_READERS) + db2.getAggregatedLongProperty(TABLE_READERS));
+ assertThat(usage.get(MemoryUsageType.kCacheTotal)).isGreaterThan(0);
+
+ }
+ }
+
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/MergeTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/MergeTest.java
new file mode 100644
index 000000000..128d694bf
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/MergeTest.java
@@ -0,0 +1,440 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.List;
+import java.util.ArrayList;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class MergeTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void stringOption()
+ throws InterruptedException, RocksDBException {
+ try (final Options opt = new Options()
+ .setCreateIfMissing(true)
+ .setMergeOperatorName("stringappend");
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+ // writing aa under key
+ db.put("key".getBytes(), "aa".getBytes());
+ // merge bb under key
+ db.merge("key".getBytes(), "bb".getBytes());
+
+ final byte[] value = db.get("key".getBytes());
+ final String strValue = new String(value);
+ assertThat(strValue).isEqualTo("aa,bb");
+ }
+ }
+
+ private byte[] longToByteArray(long l) {
+ ByteBuffer buf = ByteBuffer.allocate(Long.SIZE / Byte.SIZE);
+ buf.putLong(l);
+ return buf.array();
+ }
+
+ private long longFromByteArray(byte[] a) {
+ ByteBuffer buf = ByteBuffer.allocate(Long.SIZE / Byte.SIZE);
+ buf.put(a);
+ buf.flip();
+ return buf.getLong();
+ }
+
+ @Test
+ public void uint64AddOption()
+ throws InterruptedException, RocksDBException {
+ try (final Options opt = new Options()
+ .setCreateIfMissing(true)
+ .setMergeOperatorName("uint64add");
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+ // writing (long)100 under key
+ db.put("key".getBytes(), longToByteArray(100));
+ // merge (long)1 under key
+ db.merge("key".getBytes(), longToByteArray(1));
+
+ final byte[] value = db.get("key".getBytes());
+ final long longValue = longFromByteArray(value);
+ assertThat(longValue).isEqualTo(101);
+ }
+ }
+
+ @Test
+ public void cFStringOption()
+ throws InterruptedException, RocksDBException {
+
+ try (final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions()
+ .setMergeOperatorName("stringappend");
+ final ColumnFamilyOptions cfOpt2 = new ColumnFamilyOptions()
+ .setMergeOperatorName("stringappend")
+ ) {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt1),
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt2)
+ );
+
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+ try (final DBOptions opt = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
+ columnFamilyHandleList)) {
+ try {
+ // writing aa under key
+ db.put(columnFamilyHandleList.get(1),
+ "cfkey".getBytes(), "aa".getBytes());
+ // merge bb under key
+ db.merge(columnFamilyHandleList.get(1),
+ "cfkey".getBytes(), "bb".getBytes());
+
+ byte[] value = db.get(columnFamilyHandleList.get(1),
+ "cfkey".getBytes());
+ String strValue = new String(value);
+ assertThat(strValue).isEqualTo("aa,bb");
+ } finally {
+ for (final ColumnFamilyHandle handle : columnFamilyHandleList) {
+ handle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void cFUInt64AddOption()
+ throws InterruptedException, RocksDBException {
+
+ try (final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions()
+ .setMergeOperatorName("uint64add");
+ final ColumnFamilyOptions cfOpt2 = new ColumnFamilyOptions()
+ .setMergeOperatorName("uint64add")
+ ) {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt1),
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt2)
+ );
+
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+ try (final DBOptions opt = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
+ columnFamilyHandleList)) {
+ try {
+ // writing (long)100 under key
+ db.put(columnFamilyHandleList.get(1),
+ "cfkey".getBytes(), longToByteArray(100));
+ // merge (long)1 under key
+ db.merge(columnFamilyHandleList.get(1),
+ "cfkey".getBytes(), longToByteArray(1));
+
+ byte[] value = db.get(columnFamilyHandleList.get(1),
+ "cfkey".getBytes());
+ long longValue = longFromByteArray(value);
+ assertThat(longValue).isEqualTo(101);
+ } finally {
+ for (final ColumnFamilyHandle handle : columnFamilyHandleList) {
+ handle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void operatorOption()
+ throws InterruptedException, RocksDBException {
+ try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
+ final Options opt = new Options()
+ .setCreateIfMissing(true)
+ .setMergeOperator(stringAppendOperator);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+ // Writing aa under key
+ db.put("key".getBytes(), "aa".getBytes());
+
+ // Writing bb under key
+ db.merge("key".getBytes(), "bb".getBytes());
+
+ final byte[] value = db.get("key".getBytes());
+ final String strValue = new String(value);
+
+ assertThat(strValue).isEqualTo("aa,bb");
+ }
+ }
+
+ @Test
+ public void uint64AddOperatorOption()
+ throws InterruptedException, RocksDBException {
+ try (final UInt64AddOperator uint64AddOperator = new UInt64AddOperator();
+ final Options opt = new Options()
+ .setCreateIfMissing(true)
+ .setMergeOperator(uint64AddOperator);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+ // Writing (long)100 under key
+ db.put("key".getBytes(), longToByteArray(100));
+
+ // Writing (long)1 under key
+ db.merge("key".getBytes(), longToByteArray(1));
+
+ final byte[] value = db.get("key".getBytes());
+ final long longValue = longFromByteArray(value);
+
+ assertThat(longValue).isEqualTo(101);
+ }
+ }
+
+ @Test
+ public void cFOperatorOption()
+ throws InterruptedException, RocksDBException {
+ try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
+ final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions()
+ .setMergeOperator(stringAppendOperator);
+ final ColumnFamilyOptions cfOpt2 = new ColumnFamilyOptions()
+ .setMergeOperator(stringAppendOperator)
+ ) {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt1),
+ new ColumnFamilyDescriptor("new_cf".getBytes(), cfOpt2)
+ );
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+ try (final DBOptions opt = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
+ columnFamilyHandleList)
+ ) {
+ try {
+ // writing aa under key
+ db.put(columnFamilyHandleList.get(1),
+ "cfkey".getBytes(), "aa".getBytes());
+ // merge bb under key
+ db.merge(columnFamilyHandleList.get(1),
+ "cfkey".getBytes(), "bb".getBytes());
+ byte[] value = db.get(columnFamilyHandleList.get(1),
+ "cfkey".getBytes());
+ String strValue = new String(value);
+
+ // Test also with createColumnFamily
+ try (final ColumnFamilyOptions cfHandleOpts =
+ new ColumnFamilyOptions()
+ .setMergeOperator(stringAppendOperator);
+ final ColumnFamilyHandle cfHandle =
+ db.createColumnFamily(
+ new ColumnFamilyDescriptor("new_cf2".getBytes(),
+ cfHandleOpts))
+ ) {
+ // writing xx under cfkey2
+ db.put(cfHandle, "cfkey2".getBytes(), "xx".getBytes());
+ // merge yy under cfkey2
+ db.merge(cfHandle, new WriteOptions(), "cfkey2".getBytes(),
+ "yy".getBytes());
+ value = db.get(cfHandle, "cfkey2".getBytes());
+ String strValueTmpCf = new String(value);
+
+ assertThat(strValue).isEqualTo("aa,bb");
+ assertThat(strValueTmpCf).isEqualTo("xx,yy");
+ }
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ columnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void cFUInt64AddOperatorOption()
+ throws InterruptedException, RocksDBException {
+ try (final UInt64AddOperator uint64AddOperator = new UInt64AddOperator();
+ final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions()
+ .setMergeOperator(uint64AddOperator);
+ final ColumnFamilyOptions cfOpt2 = new ColumnFamilyOptions()
+ .setMergeOperator(uint64AddOperator)
+ ) {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt1),
+ new ColumnFamilyDescriptor("new_cf".getBytes(), cfOpt2)
+ );
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+ try (final DBOptions opt = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
+ columnFamilyHandleList)
+ ) {
+ try {
+ // writing (long)100 under key
+ db.put(columnFamilyHandleList.get(1),
+ "cfkey".getBytes(), longToByteArray(100));
+ // merge (long)1 under key
+ db.merge(columnFamilyHandleList.get(1),
+ "cfkey".getBytes(), longToByteArray(1));
+ byte[] value = db.get(columnFamilyHandleList.get(1),
+ "cfkey".getBytes());
+ long longValue = longFromByteArray(value);
+
+ // Test also with createColumnFamily
+ try (final ColumnFamilyOptions cfHandleOpts =
+ new ColumnFamilyOptions()
+ .setMergeOperator(uint64AddOperator);
+ final ColumnFamilyHandle cfHandle =
+ db.createColumnFamily(
+ new ColumnFamilyDescriptor("new_cf2".getBytes(),
+ cfHandleOpts))
+ ) {
+ // writing (long)200 under cfkey2
+ db.put(cfHandle, "cfkey2".getBytes(), longToByteArray(200));
+ // merge (long)50 under cfkey2
+ db.merge(cfHandle, new WriteOptions(), "cfkey2".getBytes(),
+ longToByteArray(50));
+ value = db.get(cfHandle, "cfkey2".getBytes());
+ long longValueTmpCf = longFromByteArray(value);
+
+ assertThat(longValue).isEqualTo(101);
+ assertThat(longValueTmpCf).isEqualTo(250);
+ }
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ columnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void operatorGcBehaviour()
+ throws RocksDBException {
+ try (final StringAppendOperator stringAppendOperator = new StringAppendOperator()) {
+ try (final Options opt = new Options()
+ .setCreateIfMissing(true)
+ .setMergeOperator(stringAppendOperator);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+ //no-op
+ }
+
+ // test reuse
+ try (final Options opt = new Options()
+ .setMergeOperator(stringAppendOperator);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+ //no-op
+ }
+
+ // test param init
+ try (final StringAppendOperator stringAppendOperator2 = new StringAppendOperator();
+ final Options opt = new Options()
+ .setMergeOperator(stringAppendOperator2);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+ //no-op
+ }
+
+ // test replace one with another merge operator instance
+ try (final Options opt = new Options()
+ .setMergeOperator(stringAppendOperator);
+ final StringAppendOperator newStringAppendOperator = new StringAppendOperator()) {
+ opt.setMergeOperator(newStringAppendOperator);
+ try (final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+ //no-op
+ }
+ }
+ }
+ }
+
+ @Test
+ public void uint64AddOperatorGcBehaviour()
+ throws RocksDBException {
+ try (final UInt64AddOperator uint64AddOperator = new UInt64AddOperator()) {
+ try (final Options opt = new Options()
+ .setCreateIfMissing(true)
+ .setMergeOperator(uint64AddOperator);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+ //no-op
+ }
+
+ // test reuse
+ try (final Options opt = new Options()
+ .setMergeOperator(uint64AddOperator);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+ //no-op
+ }
+
+ // test param init
+ try (final UInt64AddOperator uint64AddOperator2 = new UInt64AddOperator();
+ final Options opt = new Options()
+ .setMergeOperator(uint64AddOperator2);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+ //no-op
+ }
+
+ // test replace one with another merge operator instance
+ try (final Options opt = new Options()
+ .setMergeOperator(uint64AddOperator);
+ final UInt64AddOperator newUInt64AddOperator = new UInt64AddOperator()) {
+ opt.setMergeOperator(newUInt64AddOperator);
+ try (final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+ //no-op
+ }
+ }
+ }
+ }
+
+ @Test
+ public void emptyStringInSetMergeOperatorByName() {
+ try (final Options opt = new Options()
+ .setMergeOperatorName("");
+ final ColumnFamilyOptions cOpt = new ColumnFamilyOptions()
+ .setMergeOperatorName("")) {
+ //no-op
+ }
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void nullStringInSetMergeOperatorByNameOptions() {
+ try (final Options opt = new Options()) {
+ opt.setMergeOperatorName(null);
+ }
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void
+ nullStringInSetMergeOperatorByNameColumnFamilyOptions() {
+ try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
+ opt.setMergeOperatorName(null);
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/MixedOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/MixedOptionsTest.java
new file mode 100644
index 000000000..10c92d49d
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/MixedOptionsTest.java
@@ -0,0 +1,55 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class MixedOptionsTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Test
+ public void mixedOptionsTest(){
+ // Set a table factory and check the names
+ try(final Filter bloomFilter = new BloomFilter();
+ final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions()
+ .setTableFormatConfig(
+ new BlockBasedTableConfig().setFilterPolicy(bloomFilter))
+ ) {
+ assertThat(cfOptions.tableFactoryName()).isEqualTo(
+ "BlockBasedTable");
+ cfOptions.setTableFormatConfig(new PlainTableConfig());
+ assertThat(cfOptions.tableFactoryName()).isEqualTo("PlainTable");
+ // Initialize a dbOptions object from cf options and
+ // db options
+ try (final DBOptions dbOptions = new DBOptions();
+ final Options options = new Options(dbOptions, cfOptions)) {
+ assertThat(options.tableFactoryName()).isEqualTo("PlainTable");
+ // Free instances
+ }
+ }
+
+ // Test Optimize for statements
+ try(final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions()) {
+ cfOptions.optimizeUniversalStyleCompaction();
+ cfOptions.optimizeLevelStyleCompaction();
+ cfOptions.optimizeForPointLookup(1024);
+ try(final Options options = new Options()) {
+ options.optimizeLevelStyleCompaction();
+ options.optimizeLevelStyleCompaction(400);
+ options.optimizeUniversalStyleCompaction();
+ options.optimizeUniversalStyleCompaction(400);
+ options.optimizeForPointLookup(1024);
+ options.prepareForBulkLoad();
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java
new file mode 100644
index 000000000..f631905e1
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java
@@ -0,0 +1,88 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+import org.junit.Test;
+import org.rocksdb.MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder;
+
+import java.util.NoSuchElementException;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class MutableColumnFamilyOptionsTest {
+
+ @Test
+ public void builder() {
+ final MutableColumnFamilyOptionsBuilder builder =
+ MutableColumnFamilyOptions.builder();
+ builder
+ .setWriteBufferSize(10)
+ .setInplaceUpdateNumLocks(5)
+ .setDisableAutoCompactions(true)
+ .setParanoidFileChecks(true);
+
+ assertThat(builder.writeBufferSize()).isEqualTo(10);
+ assertThat(builder.inplaceUpdateNumLocks()).isEqualTo(5);
+ assertThat(builder.disableAutoCompactions()).isEqualTo(true);
+ assertThat(builder.paranoidFileChecks()).isEqualTo(true);
+ }
+
+ @Test(expected = NoSuchElementException.class)
+ public void builder_getWhenNotSet() {
+ final MutableColumnFamilyOptionsBuilder builder =
+ MutableColumnFamilyOptions.builder();
+
+ builder.writeBufferSize();
+ }
+
+ @Test
+ public void builder_build() {
+ final MutableColumnFamilyOptions options = MutableColumnFamilyOptions
+ .builder()
+ .setWriteBufferSize(10)
+ .setParanoidFileChecks(true)
+ .build();
+
+ assertThat(options.getKeys().length).isEqualTo(2);
+ assertThat(options.getValues().length).isEqualTo(2);
+ assertThat(options.getKeys()[0])
+ .isEqualTo(
+ MutableColumnFamilyOptions.MemtableOption.write_buffer_size.name());
+ assertThat(options.getValues()[0]).isEqualTo("10");
+ assertThat(options.getKeys()[1])
+ .isEqualTo(
+ MutableColumnFamilyOptions.MiscOption.paranoid_file_checks.name());
+ assertThat(options.getValues()[1]).isEqualTo("true");
+ }
+
+ @Test
+ public void mutableColumnFamilyOptions_toString() {
+ final String str = MutableColumnFamilyOptions
+ .builder()
+ .setWriteBufferSize(10)
+ .setInplaceUpdateNumLocks(5)
+ .setDisableAutoCompactions(true)
+ .setParanoidFileChecks(true)
+ .build()
+ .toString();
+
+ assertThat(str).isEqualTo("write_buffer_size=10;inplace_update_num_locks=5;"
+ + "disable_auto_compactions=true;paranoid_file_checks=true");
+ }
+
+ @Test
+ public void mutableColumnFamilyOptions_parse() {
+ final String str = "write_buffer_size=10;inplace_update_num_locks=5;"
+ + "disable_auto_compactions=true;paranoid_file_checks=true";
+
+ final MutableColumnFamilyOptionsBuilder builder =
+ MutableColumnFamilyOptions.parse(str);
+
+ assertThat(builder.writeBufferSize()).isEqualTo(10);
+ assertThat(builder.inplaceUpdateNumLocks()).isEqualTo(5);
+ assertThat(builder.disableAutoCompactions()).isEqualTo(true);
+ assertThat(builder.paranoidFileChecks()).isEqualTo(true);
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/MutableDBOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/MutableDBOptionsTest.java
new file mode 100644
index 000000000..063a8de38
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/MutableDBOptionsTest.java
@@ -0,0 +1,85 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+import org.junit.Test;
+import org.rocksdb.MutableDBOptions.MutableDBOptionsBuilder;
+
+import java.util.NoSuchElementException;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class MutableDBOptionsTest {
+
+ @Test
+ public void builder() {
+ final MutableDBOptionsBuilder builder =
+ MutableDBOptions.builder();
+ builder
+ .setBytesPerSync(1024 * 1024 * 7)
+ .setMaxBackgroundJobs(5)
+ .setAvoidFlushDuringShutdown(false);
+
+ assertThat(builder.bytesPerSync()).isEqualTo(1024 * 1024 * 7);
+ assertThat(builder.maxBackgroundJobs()).isEqualTo(5);
+ assertThat(builder.avoidFlushDuringShutdown()).isEqualTo(false);
+ }
+
+ @Test(expected = NoSuchElementException.class)
+ public void builder_getWhenNotSet() {
+ final MutableDBOptionsBuilder builder =
+ MutableDBOptions.builder();
+
+ builder.bytesPerSync();
+ }
+
+ @Test
+ public void builder_build() {
+ final MutableDBOptions options = MutableDBOptions
+ .builder()
+ .setBytesPerSync(1024 * 1024 * 7)
+ .setMaxBackgroundJobs(5)
+ .build();
+
+ assertThat(options.getKeys().length).isEqualTo(2);
+ assertThat(options.getValues().length).isEqualTo(2);
+ assertThat(options.getKeys()[0])
+ .isEqualTo(
+ MutableDBOptions.DBOption.bytes_per_sync.name());
+ assertThat(options.getValues()[0]).isEqualTo("7340032");
+ assertThat(options.getKeys()[1])
+ .isEqualTo(
+ MutableDBOptions.DBOption.max_background_jobs.name());
+ assertThat(options.getValues()[1]).isEqualTo("5");
+ }
+
+ @Test
+ public void mutableDBOptions_toString() {
+ final String str = MutableDBOptions
+ .builder()
+ .setMaxOpenFiles(99)
+ .setDelayedWriteRate(789)
+ .setAvoidFlushDuringShutdown(true)
+ .setStrictBytesPerSync(true)
+ .build()
+ .toString();
+
+ assertThat(str).isEqualTo("max_open_files=99;delayed_write_rate=789;"
+ + "avoid_flush_during_shutdown=true;strict_bytes_per_sync=true");
+ }
+
+ @Test
+ public void mutableDBOptions_parse() {
+ final String str = "max_open_files=99;delayed_write_rate=789;"
+ + "avoid_flush_during_shutdown=true";
+
+ final MutableDBOptionsBuilder builder =
+ MutableDBOptions.parse(str);
+
+ assertThat(builder.maxOpenFiles()).isEqualTo(99);
+ assertThat(builder.delayedWriteRate()).isEqualTo(789);
+ assertThat(builder.avoidFlushDuringShutdown()).isEqualTo(true);
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/NativeComparatorWrapperTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/NativeComparatorWrapperTest.java
new file mode 100644
index 000000000..d1bdf0f88
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/NativeComparatorWrapperTest.java
@@ -0,0 +1,92 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.*;
+import java.util.Comparator;
+
+import static org.junit.Assert.assertEquals;
+
+public class NativeComparatorWrapperTest {
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ private static final Random random = new Random();
+
+ @Test
+ public void rountrip() throws RocksDBException {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ final int ITERATIONS = 1_000;
+
+ final String[] storedKeys = new String[ITERATIONS];
+ try (final NativeStringComparatorWrapper comparator = new NativeStringComparatorWrapper();
+ final Options opt = new Options()
+ .setCreateIfMissing(true)
+ .setComparator(comparator)) {
+
+ // store random integer keys
+ try (final RocksDB db = RocksDB.open(opt, dbPath)) {
+ for (int i = 0; i < ITERATIONS; i++) {
+ final String strKey = randomString();
+ final byte key[] = strKey.getBytes();
+ // does key already exist (avoid duplicates)
+ if (i > 0 && db.get(key) != null) {
+ i--; // generate a different key
+ } else {
+ db.put(key, "value".getBytes());
+ storedKeys[i] = strKey;
+ }
+ }
+ }
+
+ // sort the stored keys into ascending alpha-numeric order
+ Arrays.sort(storedKeys, new Comparator<String>() {
+ @Override
+ public int compare(final String o1, final String o2) {
+ return o1.compareTo(o2);
+ }
+ });
+
+ // re-open db and read from start to end
+ // string keys should be in ascending
+ // order
+ try (final RocksDB db = RocksDB.open(opt, dbPath);
+ final RocksIterator it = db.newIterator()) {
+ int count = 0;
+ for (it.seekToFirst(); it.isValid(); it.next()) {
+ final String strKey = new String(it.key());
+ assertEquals(storedKeys[count++], strKey);
+ }
+ }
+ }
+ }
+
+ private String randomString() {
+ final char[] chars = new char[12];
+ for(int i = 0; i < 12; i++) {
+ final int letterCode = random.nextInt(24);
+ final char letter = (char) (((int) 'a') + letterCode);
+ chars[i] = letter;
+ }
+ return String.copyValueOf(chars);
+ }
+
+ public static class NativeStringComparatorWrapper
+ extends NativeComparatorWrapper {
+
+ @Override
+ protected long initializeNative(final long... nativeParameterHandles) {
+ return newStringComparator();
+ }
+
+ private native long newStringComparator();
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java
new file mode 100644
index 000000000..ab60081a0
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java
@@ -0,0 +1,41 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.rocksdb.util.Environment;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.*;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class NativeLibraryLoaderTest {
+
+ @Rule
+ public TemporaryFolder temporaryFolder = new TemporaryFolder();
+
+ @Test
+ public void tempFolder() throws IOException {
+ NativeLibraryLoader.getInstance().loadLibraryFromJarToTemp(
+ temporaryFolder.getRoot().getAbsolutePath());
+ final Path path = Paths.get(temporaryFolder.getRoot().getAbsolutePath(),
+ Environment.getJniLibraryFileName("rocksdb"));
+ assertThat(Files.exists(path)).isTrue();
+ assertThat(Files.isReadable(path)).isTrue();
+ }
+
+ @Test
+ public void overridesExistingLibrary() throws IOException {
+ File first = NativeLibraryLoader.getInstance().loadLibraryFromJarToTemp(
+ temporaryFolder.getRoot().getAbsolutePath());
+ NativeLibraryLoader.getInstance().loadLibraryFromJarToTemp(
+ temporaryFolder.getRoot().getAbsolutePath());
+ assertThat(first.exists()).isTrue();
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionDBTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionDBTest.java
new file mode 100644
index 000000000..519b70b1d
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionDBTest.java
@@ -0,0 +1,131 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class OptimisticTransactionDBTest {
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void open() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final OptimisticTransactionDB otdb = OptimisticTransactionDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ assertThat(otdb).isNotNull();
+ }
+ }
+
+ @Test
+ public void open_columnFamilies() throws RocksDBException {
+ try(final DBOptions dbOptions = new DBOptions().setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final ColumnFamilyOptions myCfOpts = new ColumnFamilyOptions()) {
+
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("myCf".getBytes(), myCfOpts));
+
+ final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
+
+ try (final OptimisticTransactionDB otdb = OptimisticTransactionDB.open(dbOptions,
+ dbFolder.getRoot().getAbsolutePath(),
+ columnFamilyDescriptors, columnFamilyHandles)) {
+ try {
+ assertThat(otdb).isNotNull();
+ } finally {
+ for (final ColumnFamilyHandle handle : columnFamilyHandles) {
+ handle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void beginTransaction() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final OptimisticTransactionDB otdb = OptimisticTransactionDB.open(
+ options, dbFolder.getRoot().getAbsolutePath());
+ final WriteOptions writeOptions = new WriteOptions()) {
+
+ try(final Transaction txn = otdb.beginTransaction(writeOptions)) {
+ assertThat(txn).isNotNull();
+ }
+ }
+ }
+
+ @Test
+ public void beginTransaction_transactionOptions() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final OptimisticTransactionDB otdb = OptimisticTransactionDB.open(
+ options, dbFolder.getRoot().getAbsolutePath());
+ final WriteOptions writeOptions = new WriteOptions();
+ final OptimisticTransactionOptions optimisticTxnOptions =
+ new OptimisticTransactionOptions()) {
+
+ try(final Transaction txn = otdb.beginTransaction(writeOptions,
+ optimisticTxnOptions)) {
+ assertThat(txn).isNotNull();
+ }
+ }
+ }
+
+ @Test
+ public void beginTransaction_withOld() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final OptimisticTransactionDB otdb = OptimisticTransactionDB.open(
+ options, dbFolder.getRoot().getAbsolutePath());
+ final WriteOptions writeOptions = new WriteOptions()) {
+
+ try(final Transaction txn = otdb.beginTransaction(writeOptions)) {
+ final Transaction txnReused = otdb.beginTransaction(writeOptions, txn);
+ assertThat(txnReused).isSameAs(txn);
+ }
+ }
+ }
+
+ @Test
+ public void beginTransaction_withOld_transactionOptions()
+ throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final OptimisticTransactionDB otdb = OptimisticTransactionDB.open(
+ options, dbFolder.getRoot().getAbsolutePath());
+ final WriteOptions writeOptions = new WriteOptions();
+ final OptimisticTransactionOptions optimisticTxnOptions =
+ new OptimisticTransactionOptions()) {
+
+ try(final Transaction txn = otdb.beginTransaction(writeOptions)) {
+ final Transaction txnReused = otdb.beginTransaction(writeOptions,
+ optimisticTxnOptions, txn);
+ assertThat(txnReused).isSameAs(txn);
+ }
+ }
+ }
+
+ @Test
+ public void baseDB() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final OptimisticTransactionDB otdb = OptimisticTransactionDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ assertThat(otdb).isNotNull();
+ final RocksDB db = otdb.getBaseDB();
+ assertThat(db).isNotNull();
+ assertThat(db.isOwningHandle()).isFalse();
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionOptionsTest.java
new file mode 100644
index 000000000..ef656b958
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionOptionsTest.java
@@ -0,0 +1,38 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Test;
+import org.rocksdb.util.BytewiseComparator;
+
+import java.util.Random;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class OptimisticTransactionOptionsTest {
+
+ private static final Random rand = PlatformRandomHelper.
+ getPlatformSpecificRandomFactory();
+
+ @Test
+ public void setSnapshot() {
+ try (final OptimisticTransactionOptions opt = new OptimisticTransactionOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setSetSnapshot(boolValue);
+ assertThat(opt.isSetSnapshot()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void comparator() {
+ try (final OptimisticTransactionOptions opt = new OptimisticTransactionOptions();
+ final ComparatorOptions copt = new ComparatorOptions()
+ .setUseDirectBuffer(true);
+ final AbstractComparator comparator = new BytewiseComparator(copt)) {
+ opt.setComparator(comparator);
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionTest.java
new file mode 100644
index 000000000..f44816e64
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionTest.java
@@ -0,0 +1,350 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.fail;
+
+public class OptimisticTransactionTest extends AbstractTransactionTest {
+
+ @Test
+ public void getForUpdate_cf_conflict() throws RocksDBException {
+ final byte k1[] = "key1".getBytes(UTF_8);
+ final byte v1[] = "value1".getBytes(UTF_8);
+ final byte v12[] = "value12".getBytes(UTF_8);
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+
+ try(final Transaction txn = dbContainer.beginTransaction()) {
+ txn.put(testCf, k1, v1);
+ assertThat(txn.get(testCf, readOptions, k1)).isEqualTo(v1);
+ txn.commit();
+ }
+
+ try(final Transaction txn2 = dbContainer.beginTransaction()) {
+ try(final Transaction txn3 = dbContainer.beginTransaction()) {
+ assertThat(txn3.getForUpdate(readOptions, testCf, k1, true)).isEqualTo(v1);
+
+ // NOTE: txn2 updates k1, during txn3
+ txn2.put(testCf, k1, v12);
+ assertThat(txn2.get(testCf, readOptions, k1)).isEqualTo(v12);
+ txn2.commit();
+
+ try {
+ txn3.commit(); // should cause an exception!
+ } catch(final RocksDBException e) {
+ assertThat(e.getStatus().getCode()).isSameAs(Status.Code.Busy);
+ return;
+ }
+ }
+ }
+
+ fail("Expected an exception for put after getForUpdate from conflicting" +
+ "transactions");
+ }
+ }
+
+ @Test
+ public void getForUpdate_conflict() throws RocksDBException {
+ final byte k1[] = "key1".getBytes(UTF_8);
+ final byte v1[] = "value1".getBytes(UTF_8);
+ final byte v12[] = "value12".getBytes(UTF_8);
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions()) {
+
+ try(final Transaction txn = dbContainer.beginTransaction()) {
+ txn.put(k1, v1);
+ assertThat(txn.get(readOptions, k1)).isEqualTo(v1);
+ txn.commit();
+ }
+
+ try(final Transaction txn2 = dbContainer.beginTransaction()) {
+ try(final Transaction txn3 = dbContainer.beginTransaction()) {
+ assertThat(txn3.getForUpdate(readOptions, k1, true)).isEqualTo(v1);
+
+ // NOTE: txn2 updates k1, during txn3
+ txn2.put(k1, v12);
+ assertThat(txn2.get(readOptions, k1)).isEqualTo(v12);
+ txn2.commit();
+
+ try {
+ txn3.commit(); // should cause an exception!
+ } catch(final RocksDBException e) {
+ assertThat(e.getStatus().getCode()).isSameAs(Status.Code.Busy);
+ return;
+ }
+ }
+ }
+
+ fail("Expected an exception for put after getForUpdate from conflicting" +
+ "transactions");
+ }
+ }
+
+ @Test
+ public void multiGetForUpdate_cf_conflict() throws RocksDBException {
+ final byte keys[][] = new byte[][] {
+ "key1".getBytes(UTF_8),
+ "key2".getBytes(UTF_8)};
+ final byte values[][] = new byte[][] {
+ "value1".getBytes(UTF_8),
+ "value2".getBytes(UTF_8)};
+ final byte[] otherValue = "otherValue".getBytes(UTF_8);
+
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+ final List<ColumnFamilyHandle> cfList = Arrays.asList(testCf, testCf);
+
+ try(final Transaction txn = dbContainer.beginTransaction()) {
+ txn.put(testCf, keys[0], values[0]);
+ txn.put(testCf, keys[1], values[1]);
+ assertThat(txn.multiGet(readOptions, cfList, keys)).isEqualTo(values);
+ txn.commit();
+ }
+
+ try(final Transaction txn2 = dbContainer.beginTransaction()) {
+ try(final Transaction txn3 = dbContainer.beginTransaction()) {
+ assertThat(txn3.multiGetForUpdate(readOptions, cfList, keys))
+ .isEqualTo(values);
+
+ // NOTE: txn2 updates k1, during txn3
+ txn2.put(testCf, keys[0], otherValue);
+ assertThat(txn2.get(testCf, readOptions, keys[0]))
+ .isEqualTo(otherValue);
+ txn2.commit();
+
+ try {
+ txn3.commit(); // should cause an exception!
+ } catch(final RocksDBException e) {
+ assertThat(e.getStatus().getCode()).isSameAs(Status.Code.Busy);
+ return;
+ }
+ }
+ }
+
+ fail("Expected an exception for put after getForUpdate from conflicting" +
+ "transactions");
+ }
+ }
+
+ @Test
+ public void multiGetForUpdate_conflict() throws RocksDBException {
+ final byte keys[][] = new byte[][] {
+ "key1".getBytes(UTF_8),
+ "key2".getBytes(UTF_8)};
+ final byte values[][] = new byte[][] {
+ "value1".getBytes(UTF_8),
+ "value2".getBytes(UTF_8)};
+ final byte[] otherValue = "otherValue".getBytes(UTF_8);
+
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions()) {
+ try(final Transaction txn = dbContainer.beginTransaction()) {
+ txn.put(keys[0], values[0]);
+ txn.put(keys[1], values[1]);
+ assertThat(txn.multiGet(readOptions, keys)).isEqualTo(values);
+ txn.commit();
+ }
+
+ try(final Transaction txn2 = dbContainer.beginTransaction()) {
+ try(final Transaction txn3 = dbContainer.beginTransaction()) {
+ assertThat(txn3.multiGetForUpdate(readOptions, keys))
+ .isEqualTo(values);
+
+ // NOTE: txn2 updates k1, during txn3
+ txn2.put(keys[0], otherValue);
+ assertThat(txn2.get(readOptions, keys[0]))
+ .isEqualTo(otherValue);
+ txn2.commit();
+
+ try {
+ txn3.commit(); // should cause an exception!
+ } catch(final RocksDBException e) {
+ assertThat(e.getStatus().getCode()).isSameAs(Status.Code.Busy);
+ return;
+ }
+ }
+ }
+
+ fail("Expected an exception for put after getForUpdate from conflicting" +
+ "transactions");
+ }
+ }
+
+ @Test
+ public void undoGetForUpdate_cf_conflict() throws RocksDBException {
+ final byte k1[] = "key1".getBytes(UTF_8);
+ final byte v1[] = "value1".getBytes(UTF_8);
+ final byte v12[] = "value12".getBytes(UTF_8);
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+
+ try(final Transaction txn = dbContainer.beginTransaction()) {
+ txn.put(testCf, k1, v1);
+ assertThat(txn.get(testCf, readOptions, k1)).isEqualTo(v1);
+ txn.commit();
+ }
+
+ try(final Transaction txn2 = dbContainer.beginTransaction()) {
+ try(final Transaction txn3 = dbContainer.beginTransaction()) {
+ assertThat(txn3.getForUpdate(readOptions, testCf, k1, true)).isEqualTo(v1);
+
+ // undo the getForUpdate
+ txn3.undoGetForUpdate(testCf, k1);
+
+ // NOTE: txn2 updates k1, during txn3
+ txn2.put(testCf, k1, v12);
+ assertThat(txn2.get(testCf, readOptions, k1)).isEqualTo(v12);
+ txn2.commit();
+
+ // should not cause an exception
+ // because we undid the getForUpdate above!
+ txn3.commit();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void undoGetForUpdate_conflict() throws RocksDBException {
+ final byte k1[] = "key1".getBytes(UTF_8);
+ final byte v1[] = "value1".getBytes(UTF_8);
+ final byte v12[] = "value12".getBytes(UTF_8);
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions()) {
+
+ try(final Transaction txn = dbContainer.beginTransaction()) {
+ txn.put(k1, v1);
+ assertThat(txn.get(readOptions, k1)).isEqualTo(v1);
+ txn.commit();
+ }
+
+ try(final Transaction txn2 = dbContainer.beginTransaction()) {
+ try(final Transaction txn3 = dbContainer.beginTransaction()) {
+ assertThat(txn3.getForUpdate(readOptions, k1, true)).isEqualTo(v1);
+
+ // undo the getForUpdate
+ txn3.undoGetForUpdate(k1);
+
+ // NOTE: txn2 updates k1, during txn3
+ txn2.put(k1, v12);
+ assertThat(txn2.get(readOptions, k1)).isEqualTo(v12);
+ txn2.commit();
+
+ // should not cause an exception
+ // because we undid the getForUpdate above!
+ txn3.commit();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void name() throws RocksDBException {
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ assertThat(txn.getName()).isEmpty();
+ final String name = "my-transaction-" + rand.nextLong();
+
+ try {
+ txn.setName(name);
+ } catch(final RocksDBException e) {
+ assertThat(e.getStatus().getCode() == Status.Code.InvalidArgument);
+ return;
+ }
+
+ fail("Optimistic transactions cannot be named.");
+ }
+ }
+
+ @Override
+ public OptimisticTransactionDBContainer startDb()
+ throws RocksDBException {
+ final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+
+ final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor(TXN_TEST_COLUMN_FAMILY,
+ columnFamilyOptions));
+ final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
+
+ final OptimisticTransactionDB optimisticTxnDb;
+ try {
+ optimisticTxnDb = OptimisticTransactionDB.open(
+ options, dbFolder.getRoot().getAbsolutePath(),
+ columnFamilyDescriptors, columnFamilyHandles);
+ } catch(final RocksDBException e) {
+ columnFamilyOptions.close();
+ options.close();
+ throw e;
+ }
+
+ final WriteOptions writeOptions = new WriteOptions();
+ final OptimisticTransactionOptions optimisticTxnOptions =
+ new OptimisticTransactionOptions();
+
+ return new OptimisticTransactionDBContainer(optimisticTxnOptions,
+ writeOptions, columnFamilyHandles, optimisticTxnDb, columnFamilyOptions,
+ options);
+ }
+
+ private static class OptimisticTransactionDBContainer
+ extends DBContainer {
+
+ private final OptimisticTransactionOptions optimisticTxnOptions;
+ private final OptimisticTransactionDB optimisticTxnDb;
+
+ public OptimisticTransactionDBContainer(
+ final OptimisticTransactionOptions optimisticTxnOptions,
+ final WriteOptions writeOptions,
+ final List<ColumnFamilyHandle> columnFamilyHandles,
+ final OptimisticTransactionDB optimisticTxnDb,
+ final ColumnFamilyOptions columnFamilyOptions,
+ final DBOptions options) {
+ super(writeOptions, columnFamilyHandles, columnFamilyOptions,
+ options);
+ this.optimisticTxnOptions = optimisticTxnOptions;
+ this.optimisticTxnDb = optimisticTxnDb;
+ }
+
+ @Override
+ public Transaction beginTransaction() {
+ return optimisticTxnDb.beginTransaction(writeOptions,
+ optimisticTxnOptions);
+ }
+
+ @Override
+ public Transaction beginTransaction(final WriteOptions writeOptions) {
+ return optimisticTxnDb.beginTransaction(writeOptions,
+ optimisticTxnOptions);
+ }
+
+ @Override
+ public void close() {
+ optimisticTxnOptions.close();
+ writeOptions.close();
+ for(final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) {
+ columnFamilyHandle.close();
+ }
+ optimisticTxnDb.close();
+ options.close();
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/OptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/OptionsTest.java
new file mode 100644
index 000000000..b249f95fb
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/OptionsTest.java
@@ -0,0 +1,1311 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.file.Paths;
+import java.util.*;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.rocksdb.test.RemoveEmptyValueCompactionFilterFactory;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+
+public class OptionsTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ public static final Random rand = PlatformRandomHelper.
+ getPlatformSpecificRandomFactory();
+
+ @Test
+ public void copyConstructor() {
+ Options origOpts = new Options();
+ origOpts.setNumLevels(rand.nextInt(8));
+ origOpts.setTargetFileSizeMultiplier(rand.nextInt(100));
+ origOpts.setLevel0StopWritesTrigger(rand.nextInt(50));
+ Options copyOpts = new Options(origOpts);
+ assertThat(origOpts.numLevels()).isEqualTo(copyOpts.numLevels());
+ assertThat(origOpts.targetFileSizeMultiplier()).isEqualTo(copyOpts.targetFileSizeMultiplier());
+ assertThat(origOpts.level0StopWritesTrigger()).isEqualTo(copyOpts.level0StopWritesTrigger());
+ }
+
+ @Test
+ public void setIncreaseParallelism() {
+ try (final Options opt = new Options()) {
+ final int threads = Runtime.getRuntime().availableProcessors() * 2;
+ opt.setIncreaseParallelism(threads);
+ }
+ }
+
+ @Test
+ public void writeBufferSize() throws RocksDBException {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setWriteBufferSize(longValue);
+ assertThat(opt.writeBufferSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void maxWriteBufferNumber() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setMaxWriteBufferNumber(intValue);
+ assertThat(opt.maxWriteBufferNumber()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void minWriteBufferNumberToMerge() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setMinWriteBufferNumberToMerge(intValue);
+ assertThat(opt.minWriteBufferNumberToMerge()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void numLevels() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setNumLevels(intValue);
+ assertThat(opt.numLevels()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void levelZeroFileNumCompactionTrigger() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setLevelZeroFileNumCompactionTrigger(intValue);
+ assertThat(opt.levelZeroFileNumCompactionTrigger()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void levelZeroSlowdownWritesTrigger() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setLevelZeroSlowdownWritesTrigger(intValue);
+ assertThat(opt.levelZeroSlowdownWritesTrigger()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void levelZeroStopWritesTrigger() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setLevelZeroStopWritesTrigger(intValue);
+ assertThat(opt.levelZeroStopWritesTrigger()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void targetFileSizeBase() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setTargetFileSizeBase(longValue);
+ assertThat(opt.targetFileSizeBase()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void targetFileSizeMultiplier() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setTargetFileSizeMultiplier(intValue);
+ assertThat(opt.targetFileSizeMultiplier()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void maxBytesForLevelBase() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setMaxBytesForLevelBase(longValue);
+ assertThat(opt.maxBytesForLevelBase()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void levelCompactionDynamicLevelBytes() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setLevelCompactionDynamicLevelBytes(boolValue);
+ assertThat(opt.levelCompactionDynamicLevelBytes())
+ .isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void maxBytesForLevelMultiplier() {
+ try (final Options opt = new Options()) {
+ final double doubleValue = rand.nextDouble();
+ opt.setMaxBytesForLevelMultiplier(doubleValue);
+ assertThat(opt.maxBytesForLevelMultiplier()).isEqualTo(doubleValue);
+ }
+ }
+
+ @Test
+ public void maxBytesForLevelMultiplierAdditional() {
+ try (final Options opt = new Options()) {
+ final int intValue1 = rand.nextInt();
+ final int intValue2 = rand.nextInt();
+ final int[] ints = new int[]{intValue1, intValue2};
+ opt.setMaxBytesForLevelMultiplierAdditional(ints);
+ assertThat(opt.maxBytesForLevelMultiplierAdditional()).isEqualTo(ints);
+ }
+ }
+
+ @Test
+ public void maxCompactionBytes() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setMaxCompactionBytes(longValue);
+ assertThat(opt.maxCompactionBytes()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void softPendingCompactionBytesLimit() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setSoftPendingCompactionBytesLimit(longValue);
+ assertThat(opt.softPendingCompactionBytesLimit()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void hardPendingCompactionBytesLimit() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setHardPendingCompactionBytesLimit(longValue);
+ assertThat(opt.hardPendingCompactionBytesLimit()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void level0FileNumCompactionTrigger() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setLevel0FileNumCompactionTrigger(intValue);
+ assertThat(opt.level0FileNumCompactionTrigger()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void level0SlowdownWritesTrigger() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setLevel0SlowdownWritesTrigger(intValue);
+ assertThat(opt.level0SlowdownWritesTrigger()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void level0StopWritesTrigger() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setLevel0StopWritesTrigger(intValue);
+ assertThat(opt.level0StopWritesTrigger()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void arenaBlockSize() throws RocksDBException {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setArenaBlockSize(longValue);
+ assertThat(opt.arenaBlockSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void disableAutoCompactions() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setDisableAutoCompactions(boolValue);
+ assertThat(opt.disableAutoCompactions()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void maxSequentialSkipInIterations() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setMaxSequentialSkipInIterations(longValue);
+ assertThat(opt.maxSequentialSkipInIterations()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void inplaceUpdateSupport() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setInplaceUpdateSupport(boolValue);
+ assertThat(opt.inplaceUpdateSupport()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void inplaceUpdateNumLocks() throws RocksDBException {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setInplaceUpdateNumLocks(longValue);
+ assertThat(opt.inplaceUpdateNumLocks()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void memtablePrefixBloomSizeRatio() {
+ try (final Options opt = new Options()) {
+ final double doubleValue = rand.nextDouble();
+ opt.setMemtablePrefixBloomSizeRatio(doubleValue);
+ assertThat(opt.memtablePrefixBloomSizeRatio()).isEqualTo(doubleValue);
+ }
+ }
+
+ @Test
+ public void memtableHugePageSize() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setMemtableHugePageSize(longValue);
+ assertThat(opt.memtableHugePageSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void bloomLocality() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setBloomLocality(intValue);
+ assertThat(opt.bloomLocality()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void maxSuccessiveMerges() throws RocksDBException {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setMaxSuccessiveMerges(longValue);
+ assertThat(opt.maxSuccessiveMerges()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void optimizeFiltersForHits() {
+ try (final Options opt = new Options()) {
+ final boolean aBoolean = rand.nextBoolean();
+ opt.setOptimizeFiltersForHits(aBoolean);
+ assertThat(opt.optimizeFiltersForHits()).isEqualTo(aBoolean);
+ }
+ }
+
+ @Test
+ public void createIfMissing() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setCreateIfMissing(boolValue);
+ assertThat(opt.createIfMissing()).
+ isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void createMissingColumnFamilies() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setCreateMissingColumnFamilies(boolValue);
+ assertThat(opt.createMissingColumnFamilies()).
+ isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void errorIfExists() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setErrorIfExists(boolValue);
+ assertThat(opt.errorIfExists()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void paranoidChecks() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setParanoidChecks(boolValue);
+ assertThat(opt.paranoidChecks()).
+ isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void maxTotalWalSize() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setMaxTotalWalSize(longValue);
+ assertThat(opt.maxTotalWalSize()).
+ isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void maxOpenFiles() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setMaxOpenFiles(intValue);
+ assertThat(opt.maxOpenFiles()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void maxFileOpeningThreads() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setMaxFileOpeningThreads(intValue);
+ assertThat(opt.maxFileOpeningThreads()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void useFsync() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setUseFsync(boolValue);
+ assertThat(opt.useFsync()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void dbPaths() {
+ final List<DbPath> dbPaths = new ArrayList<>();
+ dbPaths.add(new DbPath(Paths.get("/a"), 10));
+ dbPaths.add(new DbPath(Paths.get("/b"), 100));
+ dbPaths.add(new DbPath(Paths.get("/c"), 1000));
+
+ try (final Options opt = new Options()) {
+ assertThat(opt.dbPaths()).isEqualTo(Collections.emptyList());
+
+ opt.setDbPaths(dbPaths);
+
+ assertThat(opt.dbPaths()).isEqualTo(dbPaths);
+ }
+ }
+
+ @Test
+ public void dbLogDir() {
+ try (final Options opt = new Options()) {
+ final String str = "path/to/DbLogDir";
+ opt.setDbLogDir(str);
+ assertThat(opt.dbLogDir()).isEqualTo(str);
+ }
+ }
+
+ @Test
+ public void walDir() {
+ try (final Options opt = new Options()) {
+ final String str = "path/to/WalDir";
+ opt.setWalDir(str);
+ assertThat(opt.walDir()).isEqualTo(str);
+ }
+ }
+
+ @Test
+ public void deleteObsoleteFilesPeriodMicros() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setDeleteObsoleteFilesPeriodMicros(longValue);
+ assertThat(opt.deleteObsoleteFilesPeriodMicros()).
+ isEqualTo(longValue);
+ }
+ }
+
+ @SuppressWarnings("deprecated")
+ @Test
+ public void baseBackgroundCompactions() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setBaseBackgroundCompactions(intValue);
+ assertThat(opt.baseBackgroundCompactions()).
+ isEqualTo(intValue);
+ }
+ }
+
+ @SuppressWarnings("deprecated")
+ @Test
+ public void maxBackgroundCompactions() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setMaxBackgroundCompactions(intValue);
+ assertThat(opt.maxBackgroundCompactions()).
+ isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void maxSubcompactions() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setMaxSubcompactions(intValue);
+ assertThat(opt.maxSubcompactions()).
+ isEqualTo(intValue);
+ }
+ }
+
+ @SuppressWarnings("deprecated")
+ @Test
+ public void maxBackgroundFlushes() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setMaxBackgroundFlushes(intValue);
+ assertThat(opt.maxBackgroundFlushes()).
+ isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void maxBackgroundJobs() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setMaxBackgroundJobs(intValue);
+ assertThat(opt.maxBackgroundJobs()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void maxLogFileSize() throws RocksDBException {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setMaxLogFileSize(longValue);
+ assertThat(opt.maxLogFileSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void logFileTimeToRoll() throws RocksDBException {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setLogFileTimeToRoll(longValue);
+ assertThat(opt.logFileTimeToRoll()).
+ isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void keepLogFileNum() throws RocksDBException {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setKeepLogFileNum(longValue);
+ assertThat(opt.keepLogFileNum()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void recycleLogFileNum() throws RocksDBException {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setRecycleLogFileNum(longValue);
+ assertThat(opt.recycleLogFileNum()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void maxManifestFileSize() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setMaxManifestFileSize(longValue);
+ assertThat(opt.maxManifestFileSize()).
+ isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void tableCacheNumshardbits() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setTableCacheNumshardbits(intValue);
+ assertThat(opt.tableCacheNumshardbits()).
+ isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void walSizeLimitMB() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setWalSizeLimitMB(longValue);
+ assertThat(opt.walSizeLimitMB()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void walTtlSeconds() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setWalTtlSeconds(longValue);
+ assertThat(opt.walTtlSeconds()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void manifestPreallocationSize() throws RocksDBException {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setManifestPreallocationSize(longValue);
+ assertThat(opt.manifestPreallocationSize()).
+ isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void useDirectReads() {
+ try(final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setUseDirectReads(boolValue);
+ assertThat(opt.useDirectReads()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void useDirectIoForFlushAndCompaction() {
+ try(final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setUseDirectIoForFlushAndCompaction(boolValue);
+ assertThat(opt.useDirectIoForFlushAndCompaction()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void allowFAllocate() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setAllowFAllocate(boolValue);
+ assertThat(opt.allowFAllocate()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void allowMmapReads() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setAllowMmapReads(boolValue);
+ assertThat(opt.allowMmapReads()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void allowMmapWrites() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setAllowMmapWrites(boolValue);
+ assertThat(opt.allowMmapWrites()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void isFdCloseOnExec() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setIsFdCloseOnExec(boolValue);
+ assertThat(opt.isFdCloseOnExec()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void statsDumpPeriodSec() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setStatsDumpPeriodSec(intValue);
+ assertThat(opt.statsDumpPeriodSec()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void statsPersistPeriodSec() {
+ try (final Options opt = new Options()) {
+ final int intValue = rand.nextInt();
+ opt.setStatsPersistPeriodSec(intValue);
+ assertThat(opt.statsPersistPeriodSec()).isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void statsHistoryBufferSize() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setStatsHistoryBufferSize(longValue);
+ assertThat(opt.statsHistoryBufferSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void adviseRandomOnOpen() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setAdviseRandomOnOpen(boolValue);
+ assertThat(opt.adviseRandomOnOpen()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void dbWriteBufferSize() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setDbWriteBufferSize(longValue);
+ assertThat(opt.dbWriteBufferSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void setWriteBufferManager() throws RocksDBException {
+ try (final Options opt = new Options();
+ final Cache cache = new LRUCache(1 * 1024 * 1024);
+ final WriteBufferManager writeBufferManager = new WriteBufferManager(2000l, cache)) {
+ opt.setWriteBufferManager(writeBufferManager);
+ assertThat(opt.writeBufferManager()).isEqualTo(writeBufferManager);
+ }
+ }
+
+ @Test
+ public void setWriteBufferManagerWithZeroBufferSize() throws RocksDBException {
+ try (final Options opt = new Options();
+ final Cache cache = new LRUCache(1 * 1024 * 1024);
+ final WriteBufferManager writeBufferManager = new WriteBufferManager(0l, cache)) {
+ opt.setWriteBufferManager(writeBufferManager);
+ assertThat(opt.writeBufferManager()).isEqualTo(writeBufferManager);
+ }
+ }
+
+ @Test
+ public void accessHintOnCompactionStart() {
+ try (final Options opt = new Options()) {
+ final AccessHint accessHint = AccessHint.SEQUENTIAL;
+ opt.setAccessHintOnCompactionStart(accessHint);
+ assertThat(opt.accessHintOnCompactionStart()).isEqualTo(accessHint);
+ }
+ }
+
+ @Test
+ public void newTableReaderForCompactionInputs() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setNewTableReaderForCompactionInputs(boolValue);
+ assertThat(opt.newTableReaderForCompactionInputs()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void compactionReadaheadSize() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setCompactionReadaheadSize(longValue);
+ assertThat(opt.compactionReadaheadSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void randomAccessMaxBufferSize() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setRandomAccessMaxBufferSize(longValue);
+ assertThat(opt.randomAccessMaxBufferSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void writableFileMaxBufferSize() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setWritableFileMaxBufferSize(longValue);
+ assertThat(opt.writableFileMaxBufferSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void useAdaptiveMutex() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setUseAdaptiveMutex(boolValue);
+ assertThat(opt.useAdaptiveMutex()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void bytesPerSync() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setBytesPerSync(longValue);
+ assertThat(opt.bytesPerSync()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void walBytesPerSync() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setWalBytesPerSync(longValue);
+ assertThat(opt.walBytesPerSync()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void strictBytesPerSync() {
+ try (final Options opt = new Options()) {
+ assertThat(opt.strictBytesPerSync()).isFalse();
+ opt.setStrictBytesPerSync(true);
+ assertThat(opt.strictBytesPerSync()).isTrue();
+ }
+ }
+
+ @Test
+ public void enableThreadTracking() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setEnableThreadTracking(boolValue);
+ assertThat(opt.enableThreadTracking()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void delayedWriteRate() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setDelayedWriteRate(longValue);
+ assertThat(opt.delayedWriteRate()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void enablePipelinedWrite() {
+ try(final Options opt = new Options()) {
+ assertThat(opt.enablePipelinedWrite()).isFalse();
+ opt.setEnablePipelinedWrite(true);
+ assertThat(opt.enablePipelinedWrite()).isTrue();
+ }
+ }
+
+ @Test
+ public void unordredWrite() {
+ try(final Options opt = new Options()) {
+ assertThat(opt.unorderedWrite()).isFalse();
+ opt.setUnorderedWrite(true);
+ assertThat(opt.unorderedWrite()).isTrue();
+ }
+ }
+
+ @Test
+ public void allowConcurrentMemtableWrite() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setAllowConcurrentMemtableWrite(boolValue);
+ assertThat(opt.allowConcurrentMemtableWrite()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void enableWriteThreadAdaptiveYield() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setEnableWriteThreadAdaptiveYield(boolValue);
+ assertThat(opt.enableWriteThreadAdaptiveYield()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void writeThreadMaxYieldUsec() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setWriteThreadMaxYieldUsec(longValue);
+ assertThat(opt.writeThreadMaxYieldUsec()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void writeThreadSlowYieldUsec() {
+ try (final Options opt = new Options()) {
+ final long longValue = rand.nextLong();
+ opt.setWriteThreadSlowYieldUsec(longValue);
+ assertThat(opt.writeThreadSlowYieldUsec()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void skipStatsUpdateOnDbOpen() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setSkipStatsUpdateOnDbOpen(boolValue);
+ assertThat(opt.skipStatsUpdateOnDbOpen()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void walRecoveryMode() {
+ try (final Options opt = new Options()) {
+ for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) {
+ opt.setWalRecoveryMode(walRecoveryMode);
+ assertThat(opt.walRecoveryMode()).isEqualTo(walRecoveryMode);
+ }
+ }
+ }
+
+ @Test
+ public void allow2pc() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setAllow2pc(boolValue);
+ assertThat(opt.allow2pc()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void rowCache() {
+ try (final Options opt = new Options()) {
+ assertThat(opt.rowCache()).isNull();
+
+ try(final Cache lruCache = new LRUCache(1000)) {
+ opt.setRowCache(lruCache);
+ assertThat(opt.rowCache()).isEqualTo(lruCache);
+ }
+
+ try(final Cache clockCache = new ClockCache(1000)) {
+ opt.setRowCache(clockCache);
+ assertThat(opt.rowCache()).isEqualTo(clockCache);
+ }
+ }
+ }
+
+ @Test
+ public void walFilter() {
+ try (final Options opt = new Options()) {
+ assertThat(opt.walFilter()).isNull();
+
+ try (final AbstractWalFilter walFilter = new AbstractWalFilter() {
+ @Override
+ public void columnFamilyLogNumberMap(
+ final Map<Integer, Long> cfLognumber,
+ final Map<String, Integer> cfNameId) {
+ // no-op
+ }
+
+ @Override
+ public LogRecordFoundResult logRecordFound(final long logNumber,
+ final String logFileName, final WriteBatch batch,
+ final WriteBatch newBatch) {
+ return new LogRecordFoundResult(
+ WalProcessingOption.CONTINUE_PROCESSING, false);
+ }
+
+ @Override
+ public String name() {
+ return "test-wal-filter";
+ }
+ }) {
+ opt.setWalFilter(walFilter);
+ assertThat(opt.walFilter()).isEqualTo(walFilter);
+ }
+ }
+ }
+
+ @Test
+ public void failIfOptionsFileError() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setFailIfOptionsFileError(boolValue);
+ assertThat(opt.failIfOptionsFileError()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void dumpMallocStats() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setDumpMallocStats(boolValue);
+ assertThat(opt.dumpMallocStats()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void avoidFlushDuringRecovery() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setAvoidFlushDuringRecovery(boolValue);
+ assertThat(opt.avoidFlushDuringRecovery()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void avoidFlushDuringShutdown() {
+ try (final Options opt = new Options()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setAvoidFlushDuringShutdown(boolValue);
+ assertThat(opt.avoidFlushDuringShutdown()).isEqualTo(boolValue);
+ }
+ }
+
+
+ @Test
+ public void allowIngestBehind() {
+ try (final Options opt = new Options()) {
+ assertThat(opt.allowIngestBehind()).isFalse();
+ opt.setAllowIngestBehind(true);
+ assertThat(opt.allowIngestBehind()).isTrue();
+ }
+ }
+
+ @Test
+ public void preserveDeletes() {
+ try (final Options opt = new Options()) {
+ assertThat(opt.preserveDeletes()).isFalse();
+ opt.setPreserveDeletes(true);
+ assertThat(opt.preserveDeletes()).isTrue();
+ }
+ }
+
+ @Test
+ public void twoWriteQueues() {
+ try (final Options opt = new Options()) {
+ assertThat(opt.twoWriteQueues()).isFalse();
+ opt.setTwoWriteQueues(true);
+ assertThat(opt.twoWriteQueues()).isTrue();
+ }
+ }
+
+ @Test
+ public void manualWalFlush() {
+ try (final Options opt = new Options()) {
+ assertThat(opt.manualWalFlush()).isFalse();
+ opt.setManualWalFlush(true);
+ assertThat(opt.manualWalFlush()).isTrue();
+ }
+ }
+
+ @Test
+ public void atomicFlush() {
+ try (final Options opt = new Options()) {
+ assertThat(opt.atomicFlush()).isFalse();
+ opt.setAtomicFlush(true);
+ assertThat(opt.atomicFlush()).isTrue();
+ }
+ }
+
+ @Test
+ public void env() {
+ try (final Options options = new Options();
+ final Env env = Env.getDefault()) {
+ options.setEnv(env);
+ assertThat(options.getEnv()).isSameAs(env);
+ }
+ }
+
+ @Test
+ public void linkageOfPrepMethods() {
+ try (final Options options = new Options()) {
+ options.optimizeUniversalStyleCompaction();
+ options.optimizeUniversalStyleCompaction(4000);
+ options.optimizeLevelStyleCompaction();
+ options.optimizeLevelStyleCompaction(3000);
+ options.optimizeForPointLookup(10);
+ options.optimizeForSmallDb();
+ options.prepareForBulkLoad();
+ }
+ }
+
+ @Test
+ public void compressionTypes() {
+ try (final Options options = new Options()) {
+ for (final CompressionType compressionType :
+ CompressionType.values()) {
+ options.setCompressionType(compressionType);
+ assertThat(options.compressionType()).
+ isEqualTo(compressionType);
+ assertThat(CompressionType.valueOf("NO_COMPRESSION")).
+ isEqualTo(CompressionType.NO_COMPRESSION);
+ }
+ }
+ }
+
+ @Test
+ public void compressionPerLevel() {
+ try (final Options options = new Options()) {
+ assertThat(options.compressionPerLevel()).isEmpty();
+ List<CompressionType> compressionTypeList =
+ new ArrayList<>();
+ for (int i = 0; i < options.numLevels(); i++) {
+ compressionTypeList.add(CompressionType.NO_COMPRESSION);
+ }
+ options.setCompressionPerLevel(compressionTypeList);
+ compressionTypeList = options.compressionPerLevel();
+ for (final CompressionType compressionType : compressionTypeList) {
+ assertThat(compressionType).isEqualTo(
+ CompressionType.NO_COMPRESSION);
+ }
+ }
+ }
+
+ @Test
+ public void differentCompressionsPerLevel() {
+ try (final Options options = new Options()) {
+ options.setNumLevels(3);
+
+ assertThat(options.compressionPerLevel()).isEmpty();
+ List<CompressionType> compressionTypeList = new ArrayList<>();
+
+ compressionTypeList.add(CompressionType.BZLIB2_COMPRESSION);
+ compressionTypeList.add(CompressionType.SNAPPY_COMPRESSION);
+ compressionTypeList.add(CompressionType.LZ4_COMPRESSION);
+
+ options.setCompressionPerLevel(compressionTypeList);
+ compressionTypeList = options.compressionPerLevel();
+
+ assertThat(compressionTypeList.size()).isEqualTo(3);
+ assertThat(compressionTypeList).
+ containsExactly(
+ CompressionType.BZLIB2_COMPRESSION,
+ CompressionType.SNAPPY_COMPRESSION,
+ CompressionType.LZ4_COMPRESSION);
+
+ }
+ }
+
+ @Test
+ public void bottommostCompressionType() {
+ try (final Options options = new Options()) {
+ assertThat(options.bottommostCompressionType())
+ .isEqualTo(CompressionType.DISABLE_COMPRESSION_OPTION);
+
+ for (final CompressionType compressionType : CompressionType.values()) {
+ options.setBottommostCompressionType(compressionType);
+ assertThat(options.bottommostCompressionType())
+ .isEqualTo(compressionType);
+ }
+ }
+ }
+
+ @Test
+ public void bottommostCompressionOptions() {
+ try (final Options options = new Options();
+ final CompressionOptions bottommostCompressionOptions = new CompressionOptions()
+ .setMaxDictBytes(123)) {
+
+ options.setBottommostCompressionOptions(bottommostCompressionOptions);
+ assertThat(options.bottommostCompressionOptions())
+ .isEqualTo(bottommostCompressionOptions);
+ assertThat(options.bottommostCompressionOptions().maxDictBytes())
+ .isEqualTo(123);
+ }
+ }
+
+ @Test
+ public void compressionOptions() {
+ try (final Options options = new Options();
+ final CompressionOptions compressionOptions = new CompressionOptions()
+ .setMaxDictBytes(123)) {
+
+ options.setCompressionOptions(compressionOptions);
+ assertThat(options.compressionOptions())
+ .isEqualTo(compressionOptions);
+ assertThat(options.compressionOptions().maxDictBytes())
+ .isEqualTo(123);
+ }
+ }
+
+ @Test
+ public void compactionStyles() {
+ try (final Options options = new Options()) {
+ for (final CompactionStyle compactionStyle :
+ CompactionStyle.values()) {
+ options.setCompactionStyle(compactionStyle);
+ assertThat(options.compactionStyle()).
+ isEqualTo(compactionStyle);
+ assertThat(CompactionStyle.valueOf("FIFO")).
+ isEqualTo(CompactionStyle.FIFO);
+ }
+ }
+ }
+
+ @Test
+ public void maxTableFilesSizeFIFO() {
+ try (final Options opt = new Options()) {
+ long longValue = rand.nextLong();
+ // Size has to be positive
+ longValue = (longValue < 0) ? -longValue : longValue;
+ longValue = (longValue == 0) ? longValue + 1 : longValue;
+ opt.setMaxTableFilesSizeFIFO(longValue);
+ assertThat(opt.maxTableFilesSizeFIFO()).
+ isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void rateLimiter() {
+ try (final Options options = new Options();
+ final Options anotherOptions = new Options();
+ final RateLimiter rateLimiter =
+ new RateLimiter(1000, 100 * 1000, 1)) {
+ options.setRateLimiter(rateLimiter);
+ // Test with parameter initialization
+ anotherOptions.setRateLimiter(
+ new RateLimiter(1000));
+ }
+ }
+
+ @Test
+ public void sstFileManager() throws RocksDBException {
+ try (final Options options = new Options();
+ final SstFileManager sstFileManager =
+ new SstFileManager(Env.getDefault())) {
+ options.setSstFileManager(sstFileManager);
+ }
+ }
+
+ @Test
+ public void shouldSetTestPrefixExtractor() {
+ try (final Options options = new Options()) {
+ options.useFixedLengthPrefixExtractor(100);
+ options.useFixedLengthPrefixExtractor(10);
+ }
+ }
+
+ @Test
+ public void shouldSetTestCappedPrefixExtractor() {
+ try (final Options options = new Options()) {
+ options.useCappedPrefixExtractor(100);
+ options.useCappedPrefixExtractor(10);
+ }
+ }
+
+ @Test
+ public void shouldTestMemTableFactoryName()
+ throws RocksDBException {
+ try (final Options options = new Options()) {
+ options.setMemTableConfig(new VectorMemTableConfig());
+ assertThat(options.memTableFactoryName()).
+ isEqualTo("VectorRepFactory");
+ options.setMemTableConfig(
+ new HashLinkedListMemTableConfig());
+ assertThat(options.memTableFactoryName()).
+ isEqualTo("HashLinkedListRepFactory");
+ }
+ }
+
+ @Test
+ public void statistics() {
+ try(final Options options = new Options()) {
+ final Statistics statistics = options.statistics();
+ assertThat(statistics).isNull();
+ }
+
+ try(final Statistics statistics = new Statistics();
+ final Options options = new Options().setStatistics(statistics);
+ final Statistics stats = options.statistics()) {
+ assertThat(stats).isNotNull();
+ }
+ }
+
+ @Test
+ public void maxWriteBufferNumberToMaintain() {
+ try (final Options options = new Options()) {
+ int intValue = rand.nextInt();
+ // Size has to be positive
+ intValue = (intValue < 0) ? -intValue : intValue;
+ intValue = (intValue == 0) ? intValue + 1 : intValue;
+ options.setMaxWriteBufferNumberToMaintain(intValue);
+ assertThat(options.maxWriteBufferNumberToMaintain()).
+ isEqualTo(intValue);
+ }
+ }
+
+ @Test
+ public void compactionPriorities() {
+ try (final Options options = new Options()) {
+ for (final CompactionPriority compactionPriority :
+ CompactionPriority.values()) {
+ options.setCompactionPriority(compactionPriority);
+ assertThat(options.compactionPriority()).
+ isEqualTo(compactionPriority);
+ }
+ }
+ }
+
+ @Test
+ public void reportBgIoStats() {
+ try (final Options options = new Options()) {
+ final boolean booleanValue = true;
+ options.setReportBgIoStats(booleanValue);
+ assertThat(options.reportBgIoStats()).
+ isEqualTo(booleanValue);
+ }
+ }
+
+ @Test
+ public void ttl() {
+ try (final Options options = new Options()) {
+ options.setTtl(1000 * 60);
+ assertThat(options.ttl()).
+ isEqualTo(1000 * 60);
+ }
+ }
+
+ @Test
+ public void compactionOptionsUniversal() {
+ try (final Options options = new Options();
+ final CompactionOptionsUniversal optUni = new CompactionOptionsUniversal()
+ .setCompressionSizePercent(7)) {
+ options.setCompactionOptionsUniversal(optUni);
+ assertThat(options.compactionOptionsUniversal()).
+ isEqualTo(optUni);
+ assertThat(options.compactionOptionsUniversal().compressionSizePercent())
+ .isEqualTo(7);
+ }
+ }
+
+ @Test
+ public void compactionOptionsFIFO() {
+ try (final Options options = new Options();
+ final CompactionOptionsFIFO optFifo = new CompactionOptionsFIFO()
+ .setMaxTableFilesSize(2000)) {
+ options.setCompactionOptionsFIFO(optFifo);
+ assertThat(options.compactionOptionsFIFO()).
+ isEqualTo(optFifo);
+ assertThat(options.compactionOptionsFIFO().maxTableFilesSize())
+ .isEqualTo(2000);
+ }
+ }
+
+ @Test
+ public void forceConsistencyChecks() {
+ try (final Options options = new Options()) {
+ final boolean booleanValue = true;
+ options.setForceConsistencyChecks(booleanValue);
+ assertThat(options.forceConsistencyChecks()).
+ isEqualTo(booleanValue);
+ }
+ }
+
+ @Test
+ public void compactionFilter() {
+ try(final Options options = new Options();
+ final RemoveEmptyValueCompactionFilter cf = new RemoveEmptyValueCompactionFilter()) {
+ options.setCompactionFilter(cf);
+ assertThat(options.compactionFilter()).isEqualTo(cf);
+ }
+ }
+
+ @Test
+ public void compactionFilterFactory() {
+ try(final Options options = new Options();
+ final RemoveEmptyValueCompactionFilterFactory cff = new RemoveEmptyValueCompactionFilterFactory()) {
+ options.setCompactionFilterFactory(cff);
+ assertThat(options.compactionFilterFactory()).isEqualTo(cff);
+ }
+ }
+
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/OptionsUtilTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/OptionsUtilTest.java
new file mode 100644
index 000000000..b84314eec
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/OptionsUtilTest.java
@@ -0,0 +1,126 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.*;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class OptionsUtilTest {
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = new RocksNativeLibraryResource();
+
+ @Rule public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ enum TestAPI { LOAD_LATEST_OPTIONS, LOAD_OPTIONS_FROM_FILE }
+
+ @Test
+ public void loadLatestOptions() throws RocksDBException {
+ verifyOptions(TestAPI.LOAD_LATEST_OPTIONS);
+ }
+
+ @Test
+ public void loadOptionsFromFile() throws RocksDBException {
+ verifyOptions(TestAPI.LOAD_OPTIONS_FROM_FILE);
+ }
+
+ @Test
+ public void getLatestOptionsFileName() throws RocksDBException {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options, dbPath)) {
+ assertThat(db).isNotNull();
+ }
+
+ String fName = OptionsUtil.getLatestOptionsFileName(dbPath, Env.getDefault());
+ assertThat(fName).isNotNull();
+ assert(fName.startsWith("OPTIONS-") == true);
+ // System.out.println("latest options fileName: " + fName);
+ }
+
+ private void verifyOptions(TestAPI apiType) throws RocksDBException {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setParanoidChecks(false)
+ .setMaxOpenFiles(478)
+ .setDelayedWriteRate(1234567L);
+ final ColumnFamilyOptions baseDefaultCFOpts = new ColumnFamilyOptions();
+ final byte[] secondCFName = "new_cf".getBytes();
+ final ColumnFamilyOptions baseSecondCFOpts =
+ new ColumnFamilyOptions()
+ .setWriteBufferSize(70 * 1024)
+ .setMaxWriteBufferNumber(7)
+ .setMaxBytesForLevelBase(53 * 1024 * 1024)
+ .setLevel0FileNumCompactionTrigger(3)
+ .setLevel0SlowdownWritesTrigger(51)
+ .setBottommostCompressionType(CompressionType.ZSTD_COMPRESSION);
+
+ // Create a database with a new column family
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ assertThat(db).isNotNull();
+
+ // create column family
+ try (final ColumnFamilyHandle columnFamilyHandle =
+ db.createColumnFamily(new ColumnFamilyDescriptor(secondCFName, baseSecondCFOpts))) {
+ assert(columnFamilyHandle != null);
+ }
+ }
+
+ // Read the options back and verify
+ DBOptions dbOptions = new DBOptions();
+ final List<ColumnFamilyDescriptor> cfDescs = new ArrayList<>();
+ String path = dbPath;
+ if (apiType == TestAPI.LOAD_LATEST_OPTIONS) {
+ OptionsUtil.loadLatestOptions(path, Env.getDefault(), dbOptions, cfDescs, false);
+ } else if (apiType == TestAPI.LOAD_OPTIONS_FROM_FILE) {
+ path = dbPath + "/" + OptionsUtil.getLatestOptionsFileName(dbPath, Env.getDefault());
+ OptionsUtil.loadOptionsFromFile(path, Env.getDefault(), dbOptions, cfDescs, false);
+ }
+
+ assertThat(dbOptions.createIfMissing()).isEqualTo(options.createIfMissing());
+ assertThat(dbOptions.paranoidChecks()).isEqualTo(options.paranoidChecks());
+ assertThat(dbOptions.maxOpenFiles()).isEqualTo(options.maxOpenFiles());
+ assertThat(dbOptions.delayedWriteRate()).isEqualTo(options.delayedWriteRate());
+
+ assertThat(cfDescs.size()).isEqualTo(2);
+ assertThat(cfDescs.get(0)).isNotNull();
+ assertThat(cfDescs.get(1)).isNotNull();
+ assertThat(cfDescs.get(0).getName()).isEqualTo(RocksDB.DEFAULT_COLUMN_FAMILY);
+ assertThat(cfDescs.get(1).getName()).isEqualTo(secondCFName);
+
+ ColumnFamilyOptions defaultCFOpts = cfDescs.get(0).getOptions();
+ assertThat(defaultCFOpts.writeBufferSize()).isEqualTo(baseDefaultCFOpts.writeBufferSize());
+ assertThat(defaultCFOpts.maxWriteBufferNumber())
+ .isEqualTo(baseDefaultCFOpts.maxWriteBufferNumber());
+ assertThat(defaultCFOpts.maxBytesForLevelBase())
+ .isEqualTo(baseDefaultCFOpts.maxBytesForLevelBase());
+ assertThat(defaultCFOpts.level0FileNumCompactionTrigger())
+ .isEqualTo(baseDefaultCFOpts.level0FileNumCompactionTrigger());
+ assertThat(defaultCFOpts.level0SlowdownWritesTrigger())
+ .isEqualTo(baseDefaultCFOpts.level0SlowdownWritesTrigger());
+ assertThat(defaultCFOpts.bottommostCompressionType())
+ .isEqualTo(baseDefaultCFOpts.bottommostCompressionType());
+
+ ColumnFamilyOptions secondCFOpts = cfDescs.get(1).getOptions();
+ assertThat(secondCFOpts.writeBufferSize()).isEqualTo(baseSecondCFOpts.writeBufferSize());
+ assertThat(secondCFOpts.maxWriteBufferNumber())
+ .isEqualTo(baseSecondCFOpts.maxWriteBufferNumber());
+ assertThat(secondCFOpts.maxBytesForLevelBase())
+ .isEqualTo(baseSecondCFOpts.maxBytesForLevelBase());
+ assertThat(secondCFOpts.level0FileNumCompactionTrigger())
+ .isEqualTo(baseSecondCFOpts.level0FileNumCompactionTrigger());
+ assertThat(secondCFOpts.level0SlowdownWritesTrigger())
+ .isEqualTo(baseSecondCFOpts.level0SlowdownWritesTrigger());
+ assertThat(secondCFOpts.bottommostCompressionType())
+ .isEqualTo(baseSecondCFOpts.bottommostCompressionType());
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/PlainTableConfigTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/PlainTableConfigTest.java
new file mode 100644
index 000000000..c813dbbb4
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/PlainTableConfigTest.java
@@ -0,0 +1,89 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class PlainTableConfigTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Test
+ public void keySize() {
+ PlainTableConfig plainTableConfig = new PlainTableConfig();
+ plainTableConfig.setKeySize(5);
+ assertThat(plainTableConfig.keySize()).
+ isEqualTo(5);
+ }
+
+ @Test
+ public void bloomBitsPerKey() {
+ PlainTableConfig plainTableConfig = new PlainTableConfig();
+ plainTableConfig.setBloomBitsPerKey(11);
+ assertThat(plainTableConfig.bloomBitsPerKey()).
+ isEqualTo(11);
+ }
+
+ @Test
+ public void hashTableRatio() {
+ PlainTableConfig plainTableConfig = new PlainTableConfig();
+ plainTableConfig.setHashTableRatio(0.95);
+ assertThat(plainTableConfig.hashTableRatio()).
+ isEqualTo(0.95);
+ }
+
+ @Test
+ public void indexSparseness() {
+ PlainTableConfig plainTableConfig = new PlainTableConfig();
+ plainTableConfig.setIndexSparseness(18);
+ assertThat(plainTableConfig.indexSparseness()).
+ isEqualTo(18);
+ }
+
+ @Test
+ public void hugePageTlbSize() {
+ PlainTableConfig plainTableConfig = new PlainTableConfig();
+ plainTableConfig.setHugePageTlbSize(1);
+ assertThat(plainTableConfig.hugePageTlbSize()).
+ isEqualTo(1);
+ }
+
+ @Test
+ public void encodingType() {
+ PlainTableConfig plainTableConfig = new PlainTableConfig();
+ plainTableConfig.setEncodingType(EncodingType.kPrefix);
+ assertThat(plainTableConfig.encodingType()).isEqualTo(
+ EncodingType.kPrefix);
+ }
+
+ @Test
+ public void fullScanMode() {
+ PlainTableConfig plainTableConfig = new PlainTableConfig();
+ plainTableConfig.setFullScanMode(true);
+ assertThat(plainTableConfig.fullScanMode()).isTrue(); }
+
+ @Test
+ public void storeIndexInFile() {
+ PlainTableConfig plainTableConfig = new PlainTableConfig();
+ plainTableConfig.setStoreIndexInFile(true);
+ assertThat(plainTableConfig.storeIndexInFile()).
+ isTrue();
+ }
+
+ @Test
+ public void plainTableConfig() {
+ try(final Options opt = new Options()) {
+ final PlainTableConfig plainTableConfig = new PlainTableConfig();
+ opt.setTableFormatConfig(plainTableConfig);
+ assertThat(opt.tableFactoryName()).isEqualTo("PlainTable");
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/PlatformRandomHelper.java b/src/rocksdb/java/src/test/java/org/rocksdb/PlatformRandomHelper.java
new file mode 100644
index 000000000..80ea4d197
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/PlatformRandomHelper.java
@@ -0,0 +1,58 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.Random;
+
+/**
+ * Helper class to get the appropriate Random class instance dependent
+ * on the current platform architecture (32bit vs 64bit)
+ */
+public class PlatformRandomHelper {
+ /**
+ * Determine if OS is 32-Bit/64-Bit
+ *
+ * @return boolean value indicating if operating system is 64 Bit.
+ */
+ public static boolean isOs64Bit(){
+ final boolean is64Bit;
+ if (System.getProperty("os.name").contains("Windows")) {
+ is64Bit = (System.getenv("ProgramFiles(x86)") != null);
+ } else {
+ is64Bit = (System.getProperty("os.arch").contains("64"));
+ }
+ return is64Bit;
+ }
+
+ /**
+ * Factory to get a platform specific Random instance
+ *
+ * @return {@link java.util.Random} instance.
+ */
+ public static Random getPlatformSpecificRandomFactory(){
+ if (isOs64Bit()) {
+ return new Random();
+ }
+ return new Random32Bit();
+ }
+
+ /**
+ * Random32Bit is a class which overrides {@code nextLong} to
+ * provide random numbers which fit in size_t. This workaround
+ * is necessary because there is no unsigned_int &lt; Java 8
+ */
+ private static class Random32Bit extends Random {
+ @Override
+ public long nextLong(){
+ return this.nextInt(Integer.MAX_VALUE);
+ }
+ }
+
+ /**
+ * Utility class constructor
+ */
+ private PlatformRandomHelper() { }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/RateLimiterTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/RateLimiterTest.java
new file mode 100644
index 000000000..e7d6e6c49
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/RateLimiterTest.java
@@ -0,0 +1,65 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.rocksdb.RateLimiter.*;
+
+public class RateLimiterTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Test
+ public void bytesPerSecond() {
+ try(final RateLimiter rateLimiter =
+ new RateLimiter(1000, DEFAULT_REFILL_PERIOD_MICROS,
+ DEFAULT_FAIRNESS, DEFAULT_MODE, DEFAULT_AUTOTUNE)) {
+ assertThat(rateLimiter.getBytesPerSecond()).isGreaterThan(0);
+ rateLimiter.setBytesPerSecond(2000);
+ assertThat(rateLimiter.getBytesPerSecond()).isGreaterThan(0);
+ }
+ }
+
+ @Test
+ public void getSingleBurstBytes() {
+ try(final RateLimiter rateLimiter =
+ new RateLimiter(1000, DEFAULT_REFILL_PERIOD_MICROS,
+ DEFAULT_FAIRNESS, DEFAULT_MODE, DEFAULT_AUTOTUNE)) {
+ assertThat(rateLimiter.getSingleBurstBytes()).isEqualTo(100);
+ }
+ }
+
+ @Test
+ public void getTotalBytesThrough() {
+ try(final RateLimiter rateLimiter =
+ new RateLimiter(1000, DEFAULT_REFILL_PERIOD_MICROS,
+ DEFAULT_FAIRNESS, DEFAULT_MODE, DEFAULT_AUTOTUNE)) {
+ assertThat(rateLimiter.getTotalBytesThrough()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void getTotalRequests() {
+ try(final RateLimiter rateLimiter =
+ new RateLimiter(1000, DEFAULT_REFILL_PERIOD_MICROS,
+ DEFAULT_FAIRNESS, DEFAULT_MODE, DEFAULT_AUTOTUNE)) {
+ assertThat(rateLimiter.getTotalRequests()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void autoTune() {
+ try(final RateLimiter rateLimiter =
+ new RateLimiter(1000, DEFAULT_REFILL_PERIOD_MICROS,
+ DEFAULT_FAIRNESS, DEFAULT_MODE, true)) {
+ assertThat(rateLimiter.getBytesPerSecond()).isGreaterThan(0);
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/ReadOnlyTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/ReadOnlyTest.java
new file mode 100644
index 000000000..6d5bc96fc
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/ReadOnlyTest.java
@@ -0,0 +1,305 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class ReadOnlyTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void readOnlyOpen() throws RocksDBException {
+ try (final Options options = new Options()
+ .setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ db.put("key".getBytes(), "value".getBytes());
+ try (final RocksDB db2 = RocksDB.openReadOnly(
+ dbFolder.getRoot().getAbsolutePath())) {
+ assertThat("value").
+ isEqualTo(new String(db2.get("key".getBytes())));
+ }
+ }
+
+ try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
+ final List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
+ cfDescriptors.add(new ColumnFamilyDescriptor(
+ RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts));
+
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(),
+ cfDescriptors, columnFamilyHandleList)) {
+ try (final ColumnFamilyOptions newCfOpts = new ColumnFamilyOptions();
+ final ColumnFamilyOptions newCf2Opts = new ColumnFamilyOptions()
+ ) {
+ columnFamilyHandleList.add(db.createColumnFamily(
+ new ColumnFamilyDescriptor("new_cf".getBytes(), newCfOpts)));
+ columnFamilyHandleList.add(db.createColumnFamily(
+ new ColumnFamilyDescriptor("new_cf2".getBytes(), newCf2Opts)));
+ db.put(columnFamilyHandleList.get(2), "key2".getBytes(),
+ "value2".getBytes());
+
+ final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
+ new ArrayList<>();
+ try (final RocksDB db2 = RocksDB.openReadOnly(
+ dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
+ readOnlyColumnFamilyHandleList)) {
+ try (final ColumnFamilyOptions newCfOpts2 =
+ new ColumnFamilyOptions();
+ final ColumnFamilyOptions newCf2Opts2 =
+ new ColumnFamilyOptions()
+ ) {
+ assertThat(db2.get("key2".getBytes())).isNull();
+ assertThat(db2.get(readOnlyColumnFamilyHandleList.get(0),
+ "key2".getBytes())).
+ isNull();
+ cfDescriptors.clear();
+ cfDescriptors.add(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY,
+ newCfOpts2));
+ cfDescriptors.add(new ColumnFamilyDescriptor("new_cf2".getBytes(),
+ newCf2Opts2));
+
+ final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList2
+ = new ArrayList<>();
+ try (final RocksDB db3 = RocksDB.openReadOnly(
+ dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
+ readOnlyColumnFamilyHandleList2)) {
+ try {
+ assertThat(new String(db3.get(
+ readOnlyColumnFamilyHandleList2.get(1),
+ "key2".getBytes()))).isEqualTo("value2");
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ readOnlyColumnFamilyHandleList2) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ readOnlyColumnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ columnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void failToWriteInReadOnly() throws RocksDBException {
+ try (final Options options = new Options()
+ .setCreateIfMissing(true)) {
+
+ try (final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ //no-op
+ }
+ }
+
+ try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)
+ );
+
+ final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
+ new ArrayList<>();
+ try (final RocksDB rDb = RocksDB.openReadOnly(
+ dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
+ readOnlyColumnFamilyHandleList)) {
+ try {
+ // test that put fails in readonly mode
+ rDb.put("key".getBytes(), "value".getBytes());
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ readOnlyColumnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void failToCFWriteInReadOnly() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ //no-op
+ }
+
+ try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)
+ );
+ final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
+ new ArrayList<>();
+ try (final RocksDB rDb = RocksDB.openReadOnly(
+ dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
+ readOnlyColumnFamilyHandleList)) {
+ try {
+ rDb.put(readOnlyColumnFamilyHandleList.get(0),
+ "key".getBytes(), "value".getBytes());
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ readOnlyColumnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void failToRemoveInReadOnly() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ //no-op
+ }
+
+ try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)
+ );
+
+ final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
+ new ArrayList<>();
+
+ try (final RocksDB rDb = RocksDB.openReadOnly(
+ dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
+ readOnlyColumnFamilyHandleList)) {
+ try {
+ rDb.delete("key".getBytes());
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ readOnlyColumnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void failToCFRemoveInReadOnly() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ //no-op
+ }
+
+ try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)
+ );
+
+ final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
+ new ArrayList<>();
+ try (final RocksDB rDb = RocksDB.openReadOnly(
+ dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
+ readOnlyColumnFamilyHandleList)) {
+ try {
+ rDb.delete(readOnlyColumnFamilyHandleList.get(0),
+ "key".getBytes());
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ readOnlyColumnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void failToWriteBatchReadOnly() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ //no-op
+ }
+
+ try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)
+ );
+
+ final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
+ new ArrayList<>();
+ try (final RocksDB rDb = RocksDB.openReadOnly(
+ dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
+ readOnlyColumnFamilyHandleList);
+ final WriteBatch wb = new WriteBatch();
+ final WriteOptions wOpts = new WriteOptions()) {
+ try {
+ wb.put("key".getBytes(), "value".getBytes());
+ rDb.write(wOpts, wb);
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ readOnlyColumnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void failToCFWriteBatchReadOnly() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ //no-op
+ }
+
+ try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)
+ );
+
+ final List<ColumnFamilyHandle> readOnlyColumnFamilyHandleList =
+ new ArrayList<>();
+ try (final RocksDB rDb = RocksDB.openReadOnly(
+ dbFolder.getRoot().getAbsolutePath(), cfDescriptors,
+ readOnlyColumnFamilyHandleList);
+ final WriteBatch wb = new WriteBatch();
+ final WriteOptions wOpts = new WriteOptions()) {
+ try {
+ wb.put(readOnlyColumnFamilyHandleList.get(0), "key".getBytes(),
+ "value".getBytes());
+ rDb.write(wOpts, wb);
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ readOnlyColumnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/ReadOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/ReadOptionsTest.java
new file mode 100644
index 000000000..675023ef3
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/ReadOptionsTest.java
@@ -0,0 +1,323 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.Arrays;
+import java.util.Random;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class ReadOptionsTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public ExpectedException exception = ExpectedException.none();
+
+ @Test
+ public void altConstructor() {
+ try (final ReadOptions opt = new ReadOptions(true, true)) {
+ assertThat(opt.verifyChecksums()).isTrue();
+ assertThat(opt.fillCache()).isTrue();
+ }
+ }
+
+ @Test
+ public void copyConstructor() {
+ try (final ReadOptions opt = new ReadOptions()) {
+ opt.setVerifyChecksums(false);
+ opt.setFillCache(false);
+ opt.setIterateUpperBound(buildRandomSlice());
+ opt.setIterateLowerBound(buildRandomSlice());
+ try (final ReadOptions other = new ReadOptions(opt)) {
+ assertThat(opt.verifyChecksums()).isEqualTo(other.verifyChecksums());
+ assertThat(opt.fillCache()).isEqualTo(other.fillCache());
+ assertThat(Arrays.equals(opt.iterateUpperBound().data(), other.iterateUpperBound().data())).isTrue();
+ assertThat(Arrays.equals(opt.iterateLowerBound().data(), other.iterateLowerBound().data())).isTrue();
+ }
+ }
+ }
+
+ @Test
+ public void verifyChecksum() {
+ try (final ReadOptions opt = new ReadOptions()) {
+ final Random rand = new Random();
+ final boolean boolValue = rand.nextBoolean();
+ opt.setVerifyChecksums(boolValue);
+ assertThat(opt.verifyChecksums()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void fillCache() {
+ try (final ReadOptions opt = new ReadOptions()) {
+ final Random rand = new Random();
+ final boolean boolValue = rand.nextBoolean();
+ opt.setFillCache(boolValue);
+ assertThat(opt.fillCache()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void tailing() {
+ try (final ReadOptions opt = new ReadOptions()) {
+ final Random rand = new Random();
+ final boolean boolValue = rand.nextBoolean();
+ opt.setTailing(boolValue);
+ assertThat(opt.tailing()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void snapshot() {
+ try (final ReadOptions opt = new ReadOptions()) {
+ opt.setSnapshot(null);
+ assertThat(opt.snapshot()).isNull();
+ }
+ }
+
+ @Test
+ public void readTier() {
+ try (final ReadOptions opt = new ReadOptions()) {
+ opt.setReadTier(ReadTier.BLOCK_CACHE_TIER);
+ assertThat(opt.readTier()).isEqualTo(ReadTier.BLOCK_CACHE_TIER);
+ }
+ }
+
+ @SuppressWarnings("deprecated")
+ @Test
+ public void managed() {
+ try (final ReadOptions opt = new ReadOptions()) {
+ opt.setManaged(true);
+ assertThat(opt.managed()).isTrue();
+ }
+ }
+
+ @Test
+ public void totalOrderSeek() {
+ try (final ReadOptions opt = new ReadOptions()) {
+ opt.setTotalOrderSeek(true);
+ assertThat(opt.totalOrderSeek()).isTrue();
+ }
+ }
+
+ @Test
+ public void prefixSameAsStart() {
+ try (final ReadOptions opt = new ReadOptions()) {
+ opt.setPrefixSameAsStart(true);
+ assertThat(opt.prefixSameAsStart()).isTrue();
+ }
+ }
+
+ @Test
+ public void pinData() {
+ try (final ReadOptions opt = new ReadOptions()) {
+ opt.setPinData(true);
+ assertThat(opt.pinData()).isTrue();
+ }
+ }
+
+ @Test
+ public void backgroundPurgeOnIteratorCleanup() {
+ try (final ReadOptions opt = new ReadOptions()) {
+ opt.setBackgroundPurgeOnIteratorCleanup(true);
+ assertThat(opt.backgroundPurgeOnIteratorCleanup()).isTrue();
+ }
+ }
+
+ @Test
+ public void readaheadSize() {
+ try (final ReadOptions opt = new ReadOptions()) {
+ final Random rand = new Random();
+ final long longValue = rand.nextLong();
+ opt.setReadaheadSize(longValue);
+ assertThat(opt.readaheadSize()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void ignoreRangeDeletions() {
+ try (final ReadOptions opt = new ReadOptions()) {
+ opt.setIgnoreRangeDeletions(true);
+ assertThat(opt.ignoreRangeDeletions()).isTrue();
+ }
+ }
+
+ @Test
+ public void iterateUpperBound() {
+ try (final ReadOptions opt = new ReadOptions()) {
+ Slice upperBound = buildRandomSlice();
+ opt.setIterateUpperBound(upperBound);
+ assertThat(Arrays.equals(upperBound.data(), opt.iterateUpperBound().data())).isTrue();
+ }
+ }
+
+ @Test
+ public void iterateUpperBoundNull() {
+ try (final ReadOptions opt = new ReadOptions()) {
+ assertThat(opt.iterateUpperBound()).isNull();
+ }
+ }
+
+ @Test
+ public void iterateLowerBound() {
+ try (final ReadOptions opt = new ReadOptions()) {
+ Slice lowerBound = buildRandomSlice();
+ opt.setIterateLowerBound(lowerBound);
+ assertThat(Arrays.equals(lowerBound.data(), opt.iterateLowerBound().data())).isTrue();
+ }
+ }
+
+ @Test
+ public void iterateLowerBoundNull() {
+ try (final ReadOptions opt = new ReadOptions()) {
+ assertThat(opt.iterateLowerBound()).isNull();
+ }
+ }
+
+ @Test
+ public void tableFilter() {
+ try (final ReadOptions opt = new ReadOptions();
+ final AbstractTableFilter allTablesFilter = new AllTablesFilter()) {
+ opt.setTableFilter(allTablesFilter);
+ }
+ }
+
+ @Test
+ public void iterStartSeqnum() {
+ try (final ReadOptions opt = new ReadOptions()) {
+ assertThat(opt.iterStartSeqnum()).isEqualTo(0);
+
+ opt.setIterStartSeqnum(10);
+ assertThat(opt.iterStartSeqnum()).isEqualTo(10);
+ }
+ }
+
+ @Test
+ public void failSetVerifyChecksumUninitialized() {
+ try (final ReadOptions readOptions =
+ setupUninitializedReadOptions(exception)) {
+ readOptions.setVerifyChecksums(true);
+ }
+ }
+
+ @Test
+ public void failVerifyChecksumUninitialized() {
+ try (final ReadOptions readOptions =
+ setupUninitializedReadOptions(exception)) {
+ readOptions.verifyChecksums();
+ }
+ }
+
+ @Test
+ public void failSetFillCacheUninitialized() {
+ try (final ReadOptions readOptions =
+ setupUninitializedReadOptions(exception)) {
+ readOptions.setFillCache(true);
+ }
+ }
+
+ @Test
+ public void failFillCacheUninitialized() {
+ try (final ReadOptions readOptions =
+ setupUninitializedReadOptions(exception)) {
+ readOptions.fillCache();
+ }
+ }
+
+ @Test
+ public void failSetTailingUninitialized() {
+ try (final ReadOptions readOptions =
+ setupUninitializedReadOptions(exception)) {
+ readOptions.setTailing(true);
+ }
+ }
+
+ @Test
+ public void failTailingUninitialized() {
+ try (final ReadOptions readOptions =
+ setupUninitializedReadOptions(exception)) {
+ readOptions.tailing();
+ }
+ }
+
+ @Test
+ public void failSetSnapshotUninitialized() {
+ try (final ReadOptions readOptions =
+ setupUninitializedReadOptions(exception)) {
+ readOptions.setSnapshot(null);
+ }
+ }
+
+ @Test
+ public void failSnapshotUninitialized() {
+ try (final ReadOptions readOptions =
+ setupUninitializedReadOptions(exception)) {
+ readOptions.snapshot();
+ }
+ }
+
+ @Test
+ public void failSetIterateUpperBoundUninitialized() {
+ try (final ReadOptions readOptions =
+ setupUninitializedReadOptions(exception)) {
+ readOptions.setIterateUpperBound(null);
+ }
+ }
+
+ @Test
+ public void failIterateUpperBoundUninitialized() {
+ try (final ReadOptions readOptions =
+ setupUninitializedReadOptions(exception)) {
+ readOptions.iterateUpperBound();
+ }
+ }
+
+ @Test
+ public void failSetIterateLowerBoundUninitialized() {
+ try (final ReadOptions readOptions =
+ setupUninitializedReadOptions(exception)) {
+ readOptions.setIterateLowerBound(null);
+ }
+ }
+
+ @Test
+ public void failIterateLowerBoundUninitialized() {
+ try (final ReadOptions readOptions =
+ setupUninitializedReadOptions(exception)) {
+ readOptions.iterateLowerBound();
+ }
+ }
+
+ private ReadOptions setupUninitializedReadOptions(
+ ExpectedException exception) {
+ final ReadOptions readOptions = new ReadOptions();
+ readOptions.close();
+ exception.expect(AssertionError.class);
+ return readOptions;
+ }
+
+ private Slice buildRandomSlice() {
+ final Random rand = new Random();
+ byte[] sliceBytes = new byte[rand.nextInt(100) + 1];
+ rand.nextBytes(sliceBytes);
+ return new Slice(sliceBytes);
+ }
+
+ private static class AllTablesFilter extends AbstractTableFilter {
+ @Override
+ public boolean filter(final TableProperties tableProperties) {
+ return true;
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/RocksDBExceptionTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/RocksDBExceptionTest.java
new file mode 100644
index 000000000..d3bd4ece7
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/RocksDBExceptionTest.java
@@ -0,0 +1,115 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Test;
+
+import org.rocksdb.Status.Code;
+import org.rocksdb.Status.SubCode;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.fail;
+
+public class RocksDBExceptionTest {
+
+ @Test
+ public void exception() {
+ try {
+ raiseException();
+ } catch(final RocksDBException e) {
+ assertThat(e.getStatus()).isNull();
+ assertThat(e.getMessage()).isEqualTo("test message");
+ return;
+ }
+ fail();
+ }
+
+ @Test
+ public void exceptionWithStatusCode() {
+ try {
+ raiseExceptionWithStatusCode();
+ } catch(final RocksDBException e) {
+ assertThat(e.getStatus()).isNotNull();
+ assertThat(e.getStatus().getCode()).isEqualTo(Code.NotSupported);
+ assertThat(e.getStatus().getSubCode()).isEqualTo(SubCode.None);
+ assertThat(e.getStatus().getState()).isNull();
+ assertThat(e.getMessage()).isEqualTo("test message");
+ return;
+ }
+ fail();
+ }
+
+ @Test
+ public void exceptionNoMsgWithStatusCode() {
+ try {
+ raiseExceptionNoMsgWithStatusCode();
+ } catch(final RocksDBException e) {
+ assertThat(e.getStatus()).isNotNull();
+ assertThat(e.getStatus().getCode()).isEqualTo(Code.NotSupported);
+ assertThat(e.getStatus().getSubCode()).isEqualTo(SubCode.None);
+ assertThat(e.getStatus().getState()).isNull();
+ assertThat(e.getMessage()).isEqualTo(Code.NotSupported.name());
+ return;
+ }
+ fail();
+ }
+
+ @Test
+ public void exceptionWithStatusCodeSubCode() {
+ try {
+ raiseExceptionWithStatusCodeSubCode();
+ } catch(final RocksDBException e) {
+ assertThat(e.getStatus()).isNotNull();
+ assertThat(e.getStatus().getCode()).isEqualTo(Code.TimedOut);
+ assertThat(e.getStatus().getSubCode())
+ .isEqualTo(Status.SubCode.LockTimeout);
+ assertThat(e.getStatus().getState()).isNull();
+ assertThat(e.getMessage()).isEqualTo("test message");
+ return;
+ }
+ fail();
+ }
+
+ @Test
+ public void exceptionNoMsgWithStatusCodeSubCode() {
+ try {
+ raiseExceptionNoMsgWithStatusCodeSubCode();
+ } catch(final RocksDBException e) {
+ assertThat(e.getStatus()).isNotNull();
+ assertThat(e.getStatus().getCode()).isEqualTo(Code.TimedOut);
+ assertThat(e.getStatus().getSubCode()).isEqualTo(SubCode.LockTimeout);
+ assertThat(e.getStatus().getState()).isNull();
+ assertThat(e.getMessage()).isEqualTo(Code.TimedOut.name() +
+ "(" + SubCode.LockTimeout.name() + ")");
+ return;
+ }
+ fail();
+ }
+
+ @Test
+ public void exceptionWithStatusCodeState() {
+ try {
+ raiseExceptionWithStatusCodeState();
+ } catch(final RocksDBException e) {
+ assertThat(e.getStatus()).isNotNull();
+ assertThat(e.getStatus().getCode()).isEqualTo(Code.NotSupported);
+ assertThat(e.getStatus().getSubCode()).isEqualTo(SubCode.None);
+ assertThat(e.getStatus().getState()).isNotNull();
+ assertThat(e.getMessage()).isEqualTo("test message");
+ return;
+ }
+ fail();
+ }
+
+ private native void raiseException() throws RocksDBException;
+ private native void raiseExceptionWithStatusCode() throws RocksDBException;
+ private native void raiseExceptionNoMsgWithStatusCode() throws RocksDBException;
+ private native void raiseExceptionWithStatusCodeSubCode()
+ throws RocksDBException;
+ private native void raiseExceptionNoMsgWithStatusCodeSubCode()
+ throws RocksDBException;
+ private native void raiseExceptionWithStatusCodeState()
+ throws RocksDBException;
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/RocksDBTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/RocksDBTest.java
new file mode 100644
index 000000000..b4d96ed43
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/RocksDBTest.java
@@ -0,0 +1,1669 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+import org.junit.*;
+import org.junit.rules.ExpectedException;
+import org.junit.rules.TemporaryFolder;
+
+import java.nio.ByteBuffer;
+import java.util.*;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.fail;
+
+public class RocksDBTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ public static final Random rand = PlatformRandomHelper.
+ getPlatformSpecificRandomFactory();
+
+ @Test
+ public void open() throws RocksDBException {
+ try (final RocksDB db =
+ RocksDB.open(dbFolder.getRoot().getAbsolutePath())) {
+ assertThat(db).isNotNull();
+ }
+ }
+
+ @Test
+ public void open_opt() throws RocksDBException {
+ try (final Options opt = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+ assertThat(db).isNotNull();
+ }
+ }
+
+ @Test
+ public void openWhenOpen() throws RocksDBException {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+
+ try (final RocksDB db1 = RocksDB.open(dbPath)) {
+ try (final RocksDB db2 = RocksDB.open(dbPath)) {
+ fail("Should have thrown an exception when opening the same db twice");
+ } catch (final RocksDBException e) {
+ assertThat(e.getStatus().getCode()).isEqualTo(Status.Code.IOError);
+ assertThat(e.getStatus().getSubCode()).isEqualTo(Status.SubCode.None);
+ assertThat(e.getStatus().getState()).contains("lock ");
+ }
+ }
+ }
+
+ @Test
+ public void createColumnFamily() throws RocksDBException {
+ final byte[] col1Name = "col1".getBytes(UTF_8);
+
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
+ final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()
+ ) {
+ try (final ColumnFamilyHandle col1 =
+ db.createColumnFamily(new ColumnFamilyDescriptor(col1Name, cfOpts))) {
+ assertThat(col1).isNotNull();
+ assertThat(col1.getName()).isEqualTo(col1Name);
+ }
+ }
+
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(),
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor(col1Name)),
+ cfHandles)) {
+ try {
+ assertThat(cfHandles.size()).isEqualTo(2);
+ assertThat(cfHandles.get(1)).isNotNull();
+ assertThat(cfHandles.get(1).getName()).isEqualTo(col1Name);
+ } finally {
+ for (final ColumnFamilyHandle cfHandle :
+ cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+ }
+
+
+ @Test
+ public void createColumnFamilies() throws RocksDBException {
+ final byte[] col1Name = "col1".getBytes(UTF_8);
+ final byte[] col2Name = "col2".getBytes(UTF_8);
+
+ List<ColumnFamilyHandle> cfHandles;
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
+ final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()
+ ) {
+ cfHandles =
+ db.createColumnFamilies(cfOpts, Arrays.asList(col1Name, col2Name));
+ try {
+ assertThat(cfHandles).isNotNull();
+ assertThat(cfHandles.size()).isEqualTo(2);
+ assertThat(cfHandles.get(0).getName()).isEqualTo(col1Name);
+ assertThat(cfHandles.get(1).getName()).isEqualTo(col2Name);
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+
+ cfHandles = new ArrayList<>();
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(),
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor(col1Name),
+ new ColumnFamilyDescriptor(col2Name)),
+ cfHandles)) {
+ try {
+ assertThat(cfHandles.size()).isEqualTo(3);
+ assertThat(cfHandles.get(1)).isNotNull();
+ assertThat(cfHandles.get(1).getName()).isEqualTo(col1Name);
+ assertThat(cfHandles.get(2)).isNotNull();
+ assertThat(cfHandles.get(2).getName()).isEqualTo(col2Name);
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void createColumnFamiliesfromDescriptors() throws RocksDBException {
+ final byte[] col1Name = "col1".getBytes(UTF_8);
+ final byte[] col2Name = "col2".getBytes(UTF_8);
+
+ List<ColumnFamilyHandle> cfHandles;
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
+ final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()
+ ) {
+ cfHandles =
+ db.createColumnFamilies(Arrays.asList(
+ new ColumnFamilyDescriptor(col1Name, cfOpts),
+ new ColumnFamilyDescriptor(col2Name, cfOpts)));
+ try {
+ assertThat(cfHandles).isNotNull();
+ assertThat(cfHandles.size()).isEqualTo(2);
+ assertThat(cfHandles.get(0).getName()).isEqualTo(col1Name);
+ assertThat(cfHandles.get(1).getName()).isEqualTo(col2Name);
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+
+ cfHandles = new ArrayList<>();
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(),
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor(col1Name),
+ new ColumnFamilyDescriptor(col2Name)),
+ cfHandles)) {
+ try {
+ assertThat(cfHandles.size()).isEqualTo(3);
+ assertThat(cfHandles.get(1)).isNotNull();
+ assertThat(cfHandles.get(1).getName()).isEqualTo(col1Name);
+ assertThat(cfHandles.get(2)).isNotNull();
+ assertThat(cfHandles.get(2).getName()).isEqualTo(col2Name);
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void put() throws RocksDBException {
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
+ final WriteOptions opt = new WriteOptions(); final ReadOptions optr = new ReadOptions()) {
+ db.put("key1".getBytes(), "value".getBytes());
+ db.put(opt, "key2".getBytes(), "12345678".getBytes());
+ assertThat(db.get("key1".getBytes())).isEqualTo(
+ "value".getBytes());
+ assertThat(db.get("key2".getBytes())).isEqualTo(
+ "12345678".getBytes());
+
+ ByteBuffer key = ByteBuffer.allocateDirect(12);
+ ByteBuffer value = ByteBuffer.allocateDirect(12);
+ key.position(4);
+ key.put("key3".getBytes());
+ key.position(4).limit(8);
+ value.position(4);
+ value.put("val3".getBytes());
+ value.position(4).limit(8);
+
+ db.put(opt, key, value);
+
+ assertThat(key.position()).isEqualTo(8);
+ assertThat(key.limit()).isEqualTo(8);
+
+ assertThat(value.position()).isEqualTo(8);
+ assertThat(value.limit()).isEqualTo(8);
+
+ key.position(4);
+
+ ByteBuffer result = ByteBuffer.allocateDirect(12);
+ assertThat(db.get(optr, key, result)).isEqualTo(4);
+ assertThat(result.position()).isEqualTo(0);
+ assertThat(result.limit()).isEqualTo(4);
+ assertThat(key.position()).isEqualTo(8);
+ assertThat(key.limit()).isEqualTo(8);
+
+ byte[] tmp = new byte[4];
+ result.get(tmp);
+ assertThat(tmp).isEqualTo("val3".getBytes());
+
+ key.position(4);
+
+ result.clear().position(9);
+ assertThat(db.get(optr, key, result)).isEqualTo(4);
+ assertThat(result.position()).isEqualTo(9);
+ assertThat(result.limit()).isEqualTo(12);
+ assertThat(key.position()).isEqualTo(8);
+ assertThat(key.limit()).isEqualTo(8);
+ byte[] tmp2 = new byte[3];
+ result.get(tmp2);
+ assertThat(tmp2).isEqualTo("val".getBytes());
+
+ // put
+ Segment key3 = sliceSegment("key3");
+ Segment key4 = sliceSegment("key4");
+ Segment value0 = sliceSegment("value 0");
+ Segment value1 = sliceSegment("value 1");
+ db.put(key3.data, key3.offset, key3.len, value0.data, value0.offset, value0.len);
+ db.put(opt, key4.data, key4.offset, key4.len, value1.data, value1.offset, value1.len);
+
+ // compare
+ Assert.assertTrue(value0.isSamePayload(db.get(key3.data, key3.offset, key3.len)));
+ Assert.assertTrue(value1.isSamePayload(db.get(key4.data, key4.offset, key4.len)));
+ }
+ }
+
+ private static Segment sliceSegment(String key) {
+ ByteBuffer rawKey = ByteBuffer.allocate(key.length() + 4);
+ rawKey.put((byte)0);
+ rawKey.put((byte)0);
+ rawKey.put(key.getBytes());
+
+ return new Segment(rawKey.array(), 2, key.length());
+ }
+
+ private static class Segment {
+ final byte[] data;
+ final int offset;
+ final int len;
+
+ public boolean isSamePayload(byte[] value) {
+ if (value == null) {
+ return false;
+ }
+ if (value.length != len) {
+ return false;
+ }
+
+ for (int i = 0; i < value.length; i++) {
+ if (data[i + offset] != value[i]) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ public Segment(byte[] value, int offset, int len) {
+ this.data = value;
+ this.offset = offset;
+ this.len = len;
+ }
+ }
+
+ @Test
+ public void write() throws RocksDBException {
+ try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
+ final Options options = new Options()
+ .setMergeOperator(stringAppendOperator)
+ .setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath());
+ final WriteOptions opts = new WriteOptions()) {
+
+ try (final WriteBatch wb1 = new WriteBatch()) {
+ wb1.put("key1".getBytes(), "aa".getBytes());
+ wb1.merge("key1".getBytes(), "bb".getBytes());
+
+ try (final WriteBatch wb2 = new WriteBatch()) {
+ wb2.put("key2".getBytes(), "xx".getBytes());
+ wb2.merge("key2".getBytes(), "yy".getBytes());
+ db.write(opts, wb1);
+ db.write(opts, wb2);
+ }
+ }
+
+ assertThat(db.get("key1".getBytes())).isEqualTo(
+ "aa,bb".getBytes());
+ assertThat(db.get("key2".getBytes())).isEqualTo(
+ "xx,yy".getBytes());
+ }
+ }
+
+ @Test
+ public void getWithOutValue() throws RocksDBException {
+ try (final RocksDB db =
+ RocksDB.open(dbFolder.getRoot().getAbsolutePath())) {
+ db.put("key1".getBytes(), "value".getBytes());
+ db.put("key2".getBytes(), "12345678".getBytes());
+ byte[] outValue = new byte[5];
+ // not found value
+ int getResult = db.get("keyNotFound".getBytes(), outValue);
+ assertThat(getResult).isEqualTo(RocksDB.NOT_FOUND);
+ // found value which fits in outValue
+ getResult = db.get("key1".getBytes(), outValue);
+ assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND);
+ assertThat(outValue).isEqualTo("value".getBytes());
+ // found value which fits partially
+ getResult = db.get("key2".getBytes(), outValue);
+ assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND);
+ assertThat(outValue).isEqualTo("12345".getBytes());
+ }
+ }
+
+ @Test
+ public void getWithOutValueReadOptions() throws RocksDBException {
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
+ final ReadOptions rOpt = new ReadOptions()) {
+ db.put("key1".getBytes(), "value".getBytes());
+ db.put("key2".getBytes(), "12345678".getBytes());
+ byte[] outValue = new byte[5];
+ // not found value
+ int getResult = db.get(rOpt, "keyNotFound".getBytes(),
+ outValue);
+ assertThat(getResult).isEqualTo(RocksDB.NOT_FOUND);
+ // found value which fits in outValue
+ getResult = db.get(rOpt, "key1".getBytes(), outValue);
+ assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND);
+ assertThat(outValue).isEqualTo("value".getBytes());
+ // found value which fits partially
+ getResult = db.get(rOpt, "key2".getBytes(), outValue);
+ assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND);
+ assertThat(outValue).isEqualTo("12345".getBytes());
+ }
+ }
+
+ @Rule
+ public ExpectedException thrown = ExpectedException.none();
+
+ @Test
+ public void getOutOfArrayMaxSizeValue() throws RocksDBException {
+ final int numberOfValueSplits = 10;
+ final int splitSize = Integer.MAX_VALUE / numberOfValueSplits;
+
+ Runtime runtime = Runtime.getRuntime();
+ long neededMemory = ((long)(splitSize)) * (((long)numberOfValueSplits) + 3);
+ boolean isEnoughMemory = runtime.maxMemory() - runtime.totalMemory() > neededMemory;
+ Assume.assumeTrue(isEnoughMemory);
+
+ final byte[] valueSplit = new byte[splitSize];
+ final byte[] key = "key".getBytes();
+
+ thrown.expect(RocksDBException.class);
+ thrown.expectMessage("Requested array size exceeds VM limit");
+
+ // merge (numberOfValueSplits + 1) valueSplit's to get value size exceeding Integer.MAX_VALUE
+ try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
+ final Options opt = new Options()
+ .setCreateIfMissing(true)
+ .setMergeOperator(stringAppendOperator);
+ final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) {
+ db.put(key, valueSplit);
+ for (int i = 0; i < numberOfValueSplits; i++) {
+ db.merge(key, valueSplit);
+ }
+ db.get(key);
+ }
+ }
+
+ @SuppressWarnings("deprecated")
+ @Test
+ public void multiGet() throws RocksDBException {
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
+ final ReadOptions rOpt = new ReadOptions()) {
+ db.put("key1".getBytes(), "value".getBytes());
+ db.put("key2".getBytes(), "12345678".getBytes());
+ List<byte[]> lookupKeys = new ArrayList<>();
+ lookupKeys.add("key1".getBytes());
+ lookupKeys.add("key2".getBytes());
+ Map<byte[], byte[]> results = db.multiGet(lookupKeys);
+ assertThat(results).isNotNull();
+ assertThat(results.values()).isNotNull();
+ assertThat(results.values()).
+ contains("value".getBytes(), "12345678".getBytes());
+ // test same method with ReadOptions
+ results = db.multiGet(rOpt, lookupKeys);
+ assertThat(results).isNotNull();
+ assertThat(results.values()).isNotNull();
+ assertThat(results.values()).
+ contains("value".getBytes(), "12345678".getBytes());
+
+ // remove existing key
+ lookupKeys.remove("key2".getBytes());
+ // add non existing key
+ lookupKeys.add("key3".getBytes());
+ results = db.multiGet(lookupKeys);
+ assertThat(results).isNotNull();
+ assertThat(results.values()).isNotNull();
+ assertThat(results.values()).
+ contains("value".getBytes());
+ // test same call with readOptions
+ results = db.multiGet(rOpt, lookupKeys);
+ assertThat(results).isNotNull();
+ assertThat(results.values()).isNotNull();
+ assertThat(results.values()).
+ contains("value".getBytes());
+ }
+ }
+
+ @Test
+ public void multiGetAsList() throws RocksDBException {
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
+ final ReadOptions rOpt = new ReadOptions()) {
+ db.put("key1".getBytes(), "value".getBytes());
+ db.put("key2".getBytes(), "12345678".getBytes());
+ List<byte[]> lookupKeys = new ArrayList<>();
+ lookupKeys.add("key1".getBytes());
+ lookupKeys.add("key2".getBytes());
+ List<byte[]> results = db.multiGetAsList(lookupKeys);
+ assertThat(results).isNotNull();
+ assertThat(results).hasSize(lookupKeys.size());
+ assertThat(results).
+ containsExactly("value".getBytes(), "12345678".getBytes());
+ // test same method with ReadOptions
+ results = db.multiGetAsList(rOpt, lookupKeys);
+ assertThat(results).isNotNull();
+ assertThat(results).
+ contains("value".getBytes(), "12345678".getBytes());
+
+ // remove existing key
+ lookupKeys.remove(1);
+ // add non existing key
+ lookupKeys.add("key3".getBytes());
+ results = db.multiGetAsList(lookupKeys);
+ assertThat(results).isNotNull();
+ assertThat(results).
+ containsExactly("value".getBytes(), null);
+ // test same call with readOptions
+ results = db.multiGetAsList(rOpt, lookupKeys);
+ assertThat(results).isNotNull();
+ assertThat(results).contains("value".getBytes());
+ }
+ }
+
+ @Test
+ public void merge() throws RocksDBException {
+ try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
+ final Options opt = new Options()
+ .setCreateIfMissing(true)
+ .setMergeOperator(stringAppendOperator);
+ final WriteOptions wOpt = new WriteOptions();
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())
+ ) {
+ db.put("key1".getBytes(), "value".getBytes());
+ assertThat(db.get("key1".getBytes())).isEqualTo(
+ "value".getBytes());
+ // merge key1 with another value portion
+ db.merge("key1".getBytes(), "value2".getBytes());
+ assertThat(db.get("key1".getBytes())).isEqualTo(
+ "value,value2".getBytes());
+ // merge key1 with another value portion
+ db.merge(wOpt, "key1".getBytes(), "value3".getBytes());
+ assertThat(db.get("key1".getBytes())).isEqualTo(
+ "value,value2,value3".getBytes());
+ // merge on non existent key shall insert the value
+ db.merge(wOpt, "key2".getBytes(), "xxxx".getBytes());
+ assertThat(db.get("key2".getBytes())).isEqualTo(
+ "xxxx".getBytes());
+
+ Segment key3 = sliceSegment("key3");
+ Segment key4 = sliceSegment("key4");
+ Segment value0 = sliceSegment("value 0");
+ Segment value1 = sliceSegment("value 1");
+
+ db.merge(key3.data, key3.offset, key3.len, value0.data, value0.offset, value0.len);
+ db.merge(wOpt, key4.data, key4.offset, key4.len, value1.data, value1.offset, value1.len);
+
+ // compare
+ Assert.assertTrue(value0.isSamePayload(db.get(key3.data, key3.offset, key3.len)));
+ Assert.assertTrue(value1.isSamePayload(db.get(key4.data, key4.offset, key4.len)));
+ }
+ }
+
+ @Test
+ public void delete() throws RocksDBException {
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
+ final WriteOptions wOpt = new WriteOptions()) {
+ db.put("key1".getBytes(), "value".getBytes());
+ db.put("key2".getBytes(), "12345678".getBytes());
+ db.put("key3".getBytes(), "33".getBytes());
+ assertThat(db.get("key1".getBytes())).isEqualTo(
+ "value".getBytes());
+ assertThat(db.get("key2".getBytes())).isEqualTo(
+ "12345678".getBytes());
+ assertThat(db.get("key3".getBytes())).isEqualTo("33".getBytes());
+ db.delete("key1".getBytes());
+ db.delete(wOpt, "key2".getBytes());
+ ByteBuffer key = ByteBuffer.allocateDirect(16);
+ key.put("key3".getBytes()).flip();
+ db.delete(wOpt, key);
+ assertThat(key.position()).isEqualTo(4);
+ assertThat(key.limit()).isEqualTo(4);
+
+ assertThat(db.get("key1".getBytes())).isNull();
+ assertThat(db.get("key2".getBytes())).isNull();
+
+ Segment key3 = sliceSegment("key3");
+ Segment key4 = sliceSegment("key4");
+ db.put("key3".getBytes(), "key3 value".getBytes());
+ db.put("key4".getBytes(), "key4 value".getBytes());
+
+ db.delete(key3.data, key3.offset, key3.len);
+ db.delete(wOpt, key4.data, key4.offset, key4.len);
+
+ assertThat(db.get("key3".getBytes())).isNull();
+ assertThat(db.get("key4".getBytes())).isNull();
+ }
+ }
+
+ @Test
+ public void singleDelete() throws RocksDBException {
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
+ final WriteOptions wOpt = new WriteOptions()) {
+ db.put("key1".getBytes(), "value".getBytes());
+ db.put("key2".getBytes(), "12345678".getBytes());
+ assertThat(db.get("key1".getBytes())).isEqualTo(
+ "value".getBytes());
+ assertThat(db.get("key2".getBytes())).isEqualTo(
+ "12345678".getBytes());
+ db.singleDelete("key1".getBytes());
+ db.singleDelete(wOpt, "key2".getBytes());
+ assertThat(db.get("key1".getBytes())).isNull();
+ assertThat(db.get("key2".getBytes())).isNull();
+ }
+ }
+
+ @Test
+ public void singleDelete_nonExisting() throws RocksDBException {
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
+ final WriteOptions wOpt = new WriteOptions()) {
+ db.singleDelete("key1".getBytes());
+ db.singleDelete(wOpt, "key2".getBytes());
+ assertThat(db.get("key1".getBytes())).isNull();
+ assertThat(db.get("key2".getBytes())).isNull();
+ }
+ }
+
+ @Test
+ public void deleteRange() throws RocksDBException {
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath())) {
+ db.put("key1".getBytes(), "value".getBytes());
+ db.put("key2".getBytes(), "12345678".getBytes());
+ db.put("key3".getBytes(), "abcdefg".getBytes());
+ db.put("key4".getBytes(), "xyz".getBytes());
+ assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes());
+ assertThat(db.get("key2".getBytes())).isEqualTo("12345678".getBytes());
+ assertThat(db.get("key3".getBytes())).isEqualTo("abcdefg".getBytes());
+ assertThat(db.get("key4".getBytes())).isEqualTo("xyz".getBytes());
+ db.deleteRange("key2".getBytes(), "key4".getBytes());
+ assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes());
+ assertThat(db.get("key2".getBytes())).isNull();
+ assertThat(db.get("key3".getBytes())).isNull();
+ assertThat(db.get("key4".getBytes())).isEqualTo("xyz".getBytes());
+ }
+ }
+
+ @Test
+ public void getIntProperty() throws RocksDBException {
+ try (
+ final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setMaxWriteBufferNumber(10)
+ .setMinWriteBufferNumberToMerge(10);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath());
+ final WriteOptions wOpt = new WriteOptions().setDisableWAL(true)
+ ) {
+ db.put(wOpt, "key1".getBytes(), "value1".getBytes());
+ db.put(wOpt, "key2".getBytes(), "value2".getBytes());
+ db.put(wOpt, "key3".getBytes(), "value3".getBytes());
+ db.put(wOpt, "key4".getBytes(), "value4".getBytes());
+ assertThat(db.getLongProperty("rocksdb.num-entries-active-mem-table"))
+ .isGreaterThan(0);
+ assertThat(db.getLongProperty("rocksdb.cur-size-active-mem-table"))
+ .isGreaterThan(0);
+ }
+ }
+
+ @Test
+ public void fullCompactRange() throws RocksDBException {
+ try (final Options opt = new Options().
+ setCreateIfMissing(true).
+ setDisableAutoCompactions(true).
+ setCompactionStyle(CompactionStyle.LEVEL).
+ setNumLevels(4).
+ setWriteBufferSize(100 << 10).
+ setLevelZeroFileNumCompactionTrigger(3).
+ setTargetFileSizeBase(200 << 10).
+ setTargetFileSizeMultiplier(1).
+ setMaxBytesForLevelBase(500 << 10).
+ setMaxBytesForLevelMultiplier(1).
+ setDisableAutoCompactions(false);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+ // fill database with key/value pairs
+ byte[] b = new byte[10000];
+ for (int i = 0; i < 200; i++) {
+ rand.nextBytes(b);
+ db.put((String.valueOf(i)).getBytes(), b);
+ }
+ db.compactRange();
+ }
+ }
+
+ @Test
+ public void fullCompactRangeColumnFamily()
+ throws RocksDBException {
+ try (
+ final DBOptions opt = new DBOptions().
+ setCreateIfMissing(true).
+ setCreateMissingColumnFamilies(true);
+ final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions().
+ setDisableAutoCompactions(true).
+ setCompactionStyle(CompactionStyle.LEVEL).
+ setNumLevels(4).
+ setWriteBufferSize(100 << 10).
+ setLevelZeroFileNumCompactionTrigger(3).
+ setTargetFileSizeBase(200 << 10).
+ setTargetFileSizeMultiplier(1).
+ setMaxBytesForLevelBase(500 << 10).
+ setMaxBytesForLevelMultiplier(1).
+ setDisableAutoCompactions(false)
+ ) {
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts));
+
+ // open database
+ final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
+ try (final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath(),
+ columnFamilyDescriptors,
+ columnFamilyHandles)) {
+ try {
+ // fill database with key/value pairs
+ byte[] b = new byte[10000];
+ for (int i = 0; i < 200; i++) {
+ rand.nextBytes(b);
+ db.put(columnFamilyHandles.get(1),
+ String.valueOf(i).getBytes(), b);
+ }
+ db.compactRange(columnFamilyHandles.get(1));
+ } finally {
+ for (final ColumnFamilyHandle handle : columnFamilyHandles) {
+ handle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void compactRangeWithKeys()
+ throws RocksDBException {
+ try (final Options opt = new Options().
+ setCreateIfMissing(true).
+ setDisableAutoCompactions(true).
+ setCompactionStyle(CompactionStyle.LEVEL).
+ setNumLevels(4).
+ setWriteBufferSize(100 << 10).
+ setLevelZeroFileNumCompactionTrigger(3).
+ setTargetFileSizeBase(200 << 10).
+ setTargetFileSizeMultiplier(1).
+ setMaxBytesForLevelBase(500 << 10).
+ setMaxBytesForLevelMultiplier(1).
+ setDisableAutoCompactions(false);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+ // fill database with key/value pairs
+ byte[] b = new byte[10000];
+ for (int i = 0; i < 200; i++) {
+ rand.nextBytes(b);
+ db.put((String.valueOf(i)).getBytes(), b);
+ }
+ db.compactRange("0".getBytes(), "201".getBytes());
+ }
+ }
+
+ @Test
+ public void compactRangeWithKeysReduce()
+ throws RocksDBException {
+ try (
+ final Options opt = new Options().
+ setCreateIfMissing(true).
+ setDisableAutoCompactions(true).
+ setCompactionStyle(CompactionStyle.LEVEL).
+ setNumLevels(4).
+ setWriteBufferSize(100 << 10).
+ setLevelZeroFileNumCompactionTrigger(3).
+ setTargetFileSizeBase(200 << 10).
+ setTargetFileSizeMultiplier(1).
+ setMaxBytesForLevelBase(500 << 10).
+ setMaxBytesForLevelMultiplier(1).
+ setDisableAutoCompactions(false);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+ // fill database with key/value pairs
+ byte[] b = new byte[10000];
+ for (int i = 0; i < 200; i++) {
+ rand.nextBytes(b);
+ db.put((String.valueOf(i)).getBytes(), b);
+ }
+ db.flush(new FlushOptions().setWaitForFlush(true));
+ try (final CompactRangeOptions compactRangeOpts = new CompactRangeOptions()
+ .setChangeLevel(true)
+ .setTargetLevel(-1)
+ .setTargetPathId(0)) {
+ db.compactRange(null, "0".getBytes(), "201".getBytes(),
+ compactRangeOpts);
+ }
+ }
+ }
+
+ @Test
+ public void compactRangeWithKeysColumnFamily()
+ throws RocksDBException {
+ try (final DBOptions opt = new DBOptions().
+ setCreateIfMissing(true).
+ setCreateMissingColumnFamilies(true);
+ final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions().
+ setDisableAutoCompactions(true).
+ setCompactionStyle(CompactionStyle.LEVEL).
+ setNumLevels(4).
+ setWriteBufferSize(100 << 10).
+ setLevelZeroFileNumCompactionTrigger(3).
+ setTargetFileSizeBase(200 << 10).
+ setTargetFileSizeMultiplier(1).
+ setMaxBytesForLevelBase(500 << 10).
+ setMaxBytesForLevelMultiplier(1).
+ setDisableAutoCompactions(false)
+ ) {
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts)
+ );
+
+ // open database
+ final List<ColumnFamilyHandle> columnFamilyHandles =
+ new ArrayList<>();
+ try (final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath(),
+ columnFamilyDescriptors,
+ columnFamilyHandles)) {
+ try {
+ // fill database with key/value pairs
+ byte[] b = new byte[10000];
+ for (int i = 0; i < 200; i++) {
+ rand.nextBytes(b);
+ db.put(columnFamilyHandles.get(1),
+ String.valueOf(i).getBytes(), b);
+ }
+ db.compactRange(columnFamilyHandles.get(1),
+ "0".getBytes(), "201".getBytes());
+ } finally {
+ for (final ColumnFamilyHandle handle : columnFamilyHandles) {
+ handle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void compactRangeWithKeysReduceColumnFamily()
+ throws RocksDBException {
+ try (final DBOptions opt = new DBOptions().
+ setCreateIfMissing(true).
+ setCreateMissingColumnFamilies(true);
+ final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions().
+ setDisableAutoCompactions(true).
+ setCompactionStyle(CompactionStyle.LEVEL).
+ setNumLevels(4).
+ setWriteBufferSize(100 << 10).
+ setLevelZeroFileNumCompactionTrigger(3).
+ setTargetFileSizeBase(200 << 10).
+ setTargetFileSizeMultiplier(1).
+ setMaxBytesForLevelBase(500 << 10).
+ setMaxBytesForLevelMultiplier(1).
+ setDisableAutoCompactions(false)
+ ) {
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts)
+ );
+
+ final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
+ // open database
+ try (final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath(),
+ columnFamilyDescriptors,
+ columnFamilyHandles)) {
+ try (final CompactRangeOptions compactRangeOpts = new CompactRangeOptions()
+ .setChangeLevel(true)
+ .setTargetLevel(-1)
+ .setTargetPathId(0)) {
+ // fill database with key/value pairs
+ byte[] b = new byte[10000];
+ for (int i = 0; i < 200; i++) {
+ rand.nextBytes(b);
+ db.put(columnFamilyHandles.get(1),
+ String.valueOf(i).getBytes(), b);
+ }
+ db.compactRange(columnFamilyHandles.get(1), "0".getBytes(),
+ "201".getBytes(), compactRangeOpts);
+ } finally {
+ for (final ColumnFamilyHandle handle : columnFamilyHandles) {
+ handle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void compactRangeToLevel()
+ throws RocksDBException, InterruptedException {
+ final int NUM_KEYS_PER_L0_FILE = 100;
+ final int KEY_SIZE = 20;
+ final int VALUE_SIZE = 300;
+ final int L0_FILE_SIZE =
+ NUM_KEYS_PER_L0_FILE * (KEY_SIZE + VALUE_SIZE);
+ final int NUM_L0_FILES = 10;
+ final int TEST_SCALE = 5;
+ final int KEY_INTERVAL = 100;
+ try (final Options opt = new Options().
+ setCreateIfMissing(true).
+ setCompactionStyle(CompactionStyle.LEVEL).
+ setNumLevels(5).
+ // a slightly bigger write buffer than L0 file
+ // so that we can ensure manual flush always
+ // go before background flush happens.
+ setWriteBufferSize(L0_FILE_SIZE * 2).
+ // Disable auto L0 -> L1 compaction
+ setLevelZeroFileNumCompactionTrigger(20).
+ setTargetFileSizeBase(L0_FILE_SIZE * 100).
+ setTargetFileSizeMultiplier(1).
+ // To disable auto compaction
+ setMaxBytesForLevelBase(NUM_L0_FILES * L0_FILE_SIZE * 100).
+ setMaxBytesForLevelMultiplier(2).
+ setDisableAutoCompactions(true);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())
+ ) {
+ // fill database with key/value pairs
+ byte[] value = new byte[VALUE_SIZE];
+ int int_key = 0;
+ for (int round = 0; round < 5; ++round) {
+ int initial_key = int_key;
+ for (int f = 1; f <= NUM_L0_FILES; ++f) {
+ for (int i = 0; i < NUM_KEYS_PER_L0_FILE; ++i) {
+ int_key += KEY_INTERVAL;
+ rand.nextBytes(value);
+
+ db.put(String.format("%020d", int_key).getBytes(),
+ value);
+ }
+ db.flush(new FlushOptions().setWaitForFlush(true));
+ // Make sure we do create one more L0 files.
+ assertThat(
+ db.getProperty("rocksdb.num-files-at-level0")).
+ isEqualTo("" + f);
+ }
+
+ // Compact all L0 files we just created
+ db.compactRange(
+ String.format("%020d", initial_key).getBytes(),
+ String.format("%020d", int_key - 1).getBytes());
+ // Making sure there isn't any L0 files.
+ assertThat(
+ db.getProperty("rocksdb.num-files-at-level0")).
+ isEqualTo("0");
+ // Making sure there are some L1 files.
+ // Here we only use != 0 instead of a specific number
+ // as we don't want the test make any assumption on
+ // how compaction works.
+ assertThat(
+ db.getProperty("rocksdb.num-files-at-level1")).
+ isNotEqualTo("0");
+ // Because we only compacted those keys we issued
+ // in this round, there shouldn't be any L1 -> L2
+ // compaction. So we expect zero L2 files here.
+ assertThat(
+ db.getProperty("rocksdb.num-files-at-level2")).
+ isEqualTo("0");
+ }
+ }
+ }
+
+ @Test
+ public void deleteFilesInRange() throws RocksDBException, InterruptedException {
+ final int KEY_SIZE = 20;
+ final int VALUE_SIZE = 1000;
+ final int FILE_SIZE = 64000;
+ final int NUM_FILES = 10;
+
+ final int KEY_INTERVAL = 10000;
+ /*
+ * Intention of these options is to end up reliably with 10 files
+ * we will be deleting using deleteFilesInRange.
+ * It is writing roughly number of keys that will fit in 10 files (target size)
+ * It is writing interleaved so that files from memory on L0 will overlap
+ * Then compaction cleans everything and we should end up with 10 files
+ */
+ try (final Options opt = new Options()
+ .setCreateIfMissing(true)
+ .setCompressionType(CompressionType.NO_COMPRESSION)
+ .setTargetFileSizeBase(FILE_SIZE)
+ .setWriteBufferSize(FILE_SIZE / 2)
+ .setDisableAutoCompactions(true);
+ final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) {
+ int records = FILE_SIZE / (KEY_SIZE + VALUE_SIZE);
+
+ // fill database with key/value pairs
+ byte[] value = new byte[VALUE_SIZE];
+ int key_init = 0;
+ for (int o = 0; o < NUM_FILES; ++o) {
+ int int_key = key_init++;
+ for (int i = 0; i < records; ++i) {
+ int_key += KEY_INTERVAL;
+ rand.nextBytes(value);
+
+ db.put(String.format("%020d", int_key).getBytes(), value);
+ }
+ }
+ db.flush(new FlushOptions().setWaitForFlush(true));
+ db.compactRange();
+ // Make sure we do create one more L0 files.
+ assertThat(db.getProperty("rocksdb.num-files-at-level0")).isEqualTo("0");
+
+ // Should be 10, but we are OK with asserting +- 2
+ int files = Integer.parseInt(db.getProperty("rocksdb.num-files-at-level1"));
+ assertThat(files).isBetween(8, 12);
+
+ // Delete lower 60% (roughly). Result should be 5, but we are OK with asserting +- 2
+ // Important is that we know something was deleted (JNI call did something)
+ // Exact assertions are done in C++ unit tests
+ db.deleteFilesInRanges(null,
+ Arrays.asList(null, String.format("%020d", records * KEY_INTERVAL * 6 / 10).getBytes()),
+ false);
+ files = Integer.parseInt(db.getProperty("rocksdb.num-files-at-level1"));
+ assertThat(files).isBetween(3, 7);
+ }
+ }
+
+ @Test
+ public void compactRangeToLevelColumnFamily()
+ throws RocksDBException {
+ final int NUM_KEYS_PER_L0_FILE = 100;
+ final int KEY_SIZE = 20;
+ final int VALUE_SIZE = 300;
+ final int L0_FILE_SIZE =
+ NUM_KEYS_PER_L0_FILE * (KEY_SIZE + VALUE_SIZE);
+ final int NUM_L0_FILES = 10;
+ final int TEST_SCALE = 5;
+ final int KEY_INTERVAL = 100;
+
+ try (final DBOptions opt = new DBOptions().
+ setCreateIfMissing(true).
+ setCreateMissingColumnFamilies(true);
+ final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions().
+ setCompactionStyle(CompactionStyle.LEVEL).
+ setNumLevels(5).
+ // a slightly bigger write buffer than L0 file
+ // so that we can ensure manual flush always
+ // go before background flush happens.
+ setWriteBufferSize(L0_FILE_SIZE * 2).
+ // Disable auto L0 -> L1 compaction
+ setLevelZeroFileNumCompactionTrigger(20).
+ setTargetFileSizeBase(L0_FILE_SIZE * 100).
+ setTargetFileSizeMultiplier(1).
+ // To disable auto compaction
+ setMaxBytesForLevelBase(NUM_L0_FILES * L0_FILE_SIZE * 100).
+ setMaxBytesForLevelMultiplier(2).
+ setDisableAutoCompactions(true)
+ ) {
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts)
+ );
+
+ final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
+ // open database
+ try (final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath(),
+ columnFamilyDescriptors,
+ columnFamilyHandles)) {
+ try {
+ // fill database with key/value pairs
+ byte[] value = new byte[VALUE_SIZE];
+ int int_key = 0;
+ for (int round = 0; round < 5; ++round) {
+ int initial_key = int_key;
+ for (int f = 1; f <= NUM_L0_FILES; ++f) {
+ for (int i = 0; i < NUM_KEYS_PER_L0_FILE; ++i) {
+ int_key += KEY_INTERVAL;
+ rand.nextBytes(value);
+
+ db.put(columnFamilyHandles.get(1),
+ String.format("%020d", int_key).getBytes(),
+ value);
+ }
+ db.flush(new FlushOptions().setWaitForFlush(true),
+ columnFamilyHandles.get(1));
+ // Make sure we do create one more L0 files.
+ assertThat(
+ db.getProperty(columnFamilyHandles.get(1),
+ "rocksdb.num-files-at-level0")).
+ isEqualTo("" + f);
+ }
+
+ // Compact all L0 files we just created
+ db.compactRange(
+ columnFamilyHandles.get(1),
+ String.format("%020d", initial_key).getBytes(),
+ String.format("%020d", int_key - 1).getBytes());
+ // Making sure there isn't any L0 files.
+ assertThat(
+ db.getProperty(columnFamilyHandles.get(1),
+ "rocksdb.num-files-at-level0")).
+ isEqualTo("0");
+ // Making sure there are some L1 files.
+ // Here we only use != 0 instead of a specific number
+ // as we don't want the test make any assumption on
+ // how compaction works.
+ assertThat(
+ db.getProperty(columnFamilyHandles.get(1),
+ "rocksdb.num-files-at-level1")).
+ isNotEqualTo("0");
+ // Because we only compacted those keys we issued
+ // in this round, there shouldn't be any L1 -> L2
+ // compaction. So we expect zero L2 files here.
+ assertThat(
+ db.getProperty(columnFamilyHandles.get(1),
+ "rocksdb.num-files-at-level2")).
+ isEqualTo("0");
+ }
+ } finally {
+ for (final ColumnFamilyHandle handle : columnFamilyHandles) {
+ handle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void pauseContinueBackgroundWork() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())
+ ) {
+ db.pauseBackgroundWork();
+ db.continueBackgroundWork();
+ db.pauseBackgroundWork();
+ db.continueBackgroundWork();
+ }
+ }
+
+ @Test
+ public void enableDisableFileDeletions() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())
+ ) {
+ db.disableFileDeletions();
+ db.enableFileDeletions(false);
+ db.disableFileDeletions();
+ db.enableFileDeletions(true);
+ }
+ }
+
+ @Test
+ public void setOptions() throws RocksDBException {
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions()
+ .setWriteBufferSize(4096)) {
+
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts));
+
+ // open database
+ final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
+ try (final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, columnFamilyHandles)) {
+ try {
+ final MutableColumnFamilyOptions mutableOptions =
+ MutableColumnFamilyOptions.builder()
+ .setWriteBufferSize(2048)
+ .build();
+
+ db.setOptions(columnFamilyHandles.get(1), mutableOptions);
+
+ } finally {
+ for (final ColumnFamilyHandle handle : columnFamilyHandles) {
+ handle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void destroyDB() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ db.put("key1".getBytes(), "value".getBytes());
+ }
+ assertThat(dbFolder.getRoot().exists() && dbFolder.getRoot().listFiles().length != 0)
+ .isTrue();
+ RocksDB.destroyDB(dbPath, options);
+ assertThat(dbFolder.getRoot().exists() && dbFolder.getRoot().listFiles().length != 0)
+ .isFalse();
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void destroyDBFailIfOpen() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ // Fails as the db is open and locked.
+ RocksDB.destroyDB(dbPath, options);
+ }
+ }
+ }
+
+ @Ignore("This test crashes. Re-enable after fixing.")
+ @Test
+ public void getApproximateSizes() throws RocksDBException {
+ final byte key1[] = "key1".getBytes(UTF_8);
+ final byte key2[] = "key2".getBytes(UTF_8);
+ final byte key3[] = "key3".getBytes(UTF_8);
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ db.put(key1, key1);
+ db.put(key2, key2);
+ db.put(key3, key3);
+
+ final long[] sizes = db.getApproximateSizes(
+ Arrays.asList(
+ new Range(new Slice(key1), new Slice(key2)),
+ new Range(new Slice(key2), new Slice(key3))
+ ),
+ SizeApproximationFlag.INCLUDE_FILES,
+ SizeApproximationFlag.INCLUDE_MEMTABLES);
+
+ assertThat(sizes.length).isEqualTo(2);
+ assertThat(sizes[0]).isEqualTo(0);
+ assertThat(sizes[1]).isGreaterThanOrEqualTo(1);
+ }
+ }
+ }
+
+ @Test
+ public void getApproximateMemTableStats() throws RocksDBException {
+ final byte key1[] = "key1".getBytes(UTF_8);
+ final byte key2[] = "key2".getBytes(UTF_8);
+ final byte key3[] = "key3".getBytes(UTF_8);
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ db.put(key1, key1);
+ db.put(key2, key2);
+ db.put(key3, key3);
+
+ final RocksDB.CountAndSize stats =
+ db.getApproximateMemTableStats(
+ new Range(new Slice(key1), new Slice(key3)));
+
+ assertThat(stats).isNotNull();
+ assertThat(stats.count).isGreaterThan(1);
+ assertThat(stats.size).isGreaterThan(1);
+ }
+ }
+ }
+
+ @Ignore("TODO(AR) re-enable when ready!")
+ @Test
+ public void compactFiles() throws RocksDBException {
+ final int kTestKeySize = 16;
+ final int kTestValueSize = 984;
+ final int kEntrySize = kTestKeySize + kTestValueSize;
+ final int kEntriesPerBuffer = 100;
+ final int writeBufferSize = kEntrySize * kEntriesPerBuffer;
+ final byte[] cfName = "pikachu".getBytes(UTF_8);
+
+ try (final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setWriteBufferSize(writeBufferSize)
+ .setCompactionStyle(CompactionStyle.LEVEL)
+ .setTargetFileSizeBase(writeBufferSize)
+ .setMaxBytesForLevelBase(writeBufferSize * 2)
+ .setLevel0StopWritesTrigger(2)
+ .setMaxBytesForLevelMultiplier(2)
+ .setCompressionType(CompressionType.NO_COMPRESSION)
+ .setMaxSubcompactions(4)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath);
+ final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions(options)) {
+ db.createColumnFamily(new ColumnFamilyDescriptor(cfName,
+ cfOptions)).close();
+ }
+
+ try (final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions(options)) {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOptions),
+ new ColumnFamilyDescriptor(cfName, cfOptions)
+ );
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+ try (final DBOptions dbOptions = new DBOptions(options);
+ final RocksDB db = RocksDB.open(dbOptions, dbPath, cfDescriptors,
+ cfHandles);
+ ) {
+ try (final FlushOptions flushOptions = new FlushOptions()
+ .setWaitForFlush(true)
+ .setAllowWriteStall(true);
+ final CompactionOptions compactionOptions = new CompactionOptions()) {
+ final Random rnd = new Random(301);
+ for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) {
+ final byte[] value = new byte[kTestValueSize];
+ rnd.nextBytes(value);
+ db.put(cfHandles.get(1), Integer.toString(key).getBytes(UTF_8),
+ value);
+ }
+ db.flush(flushOptions, cfHandles);
+
+ final RocksDB.LiveFiles liveFiles = db.getLiveFiles();
+ final List<String> compactedFiles =
+ db.compactFiles(compactionOptions, cfHandles.get(1),
+ liveFiles.files, 1, -1, null);
+ assertThat(compactedFiles).isNotEmpty();
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void enableAutoCompaction() throws RocksDBException {
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)) {
+ final List<ColumnFamilyDescriptor> cfDescs = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)
+ );
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) {
+ try {
+ db.enableAutoCompaction(cfHandles);
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void numberLevels() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ assertThat(db.numberLevels()).isEqualTo(7);
+ }
+ }
+ }
+
+ @Test
+ public void maxMemCompactionLevel() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ assertThat(db.maxMemCompactionLevel()).isEqualTo(0);
+ }
+ }
+ }
+
+ @Test
+ public void level0StopWriteTrigger() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ assertThat(db.level0StopWriteTrigger()).isEqualTo(36);
+ }
+ }
+ }
+
+ @Test
+ public void getName() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ assertThat(db.getName()).isEqualTo(dbPath);
+ }
+ }
+ }
+
+ @Test
+ public void getEnv() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ assertThat(db.getEnv()).isEqualTo(Env.getDefault());
+ }
+ }
+ }
+
+ @Test
+ public void flush() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath);
+ final FlushOptions flushOptions = new FlushOptions()) {
+ db.flush(flushOptions);
+ }
+ }
+ }
+
+ @Test
+ public void flushWal() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ db.flushWal(true);
+ }
+ }
+ }
+
+ @Test
+ public void syncWal() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ db.syncWal();
+ }
+ }
+ }
+
+ @Test
+ public void setPreserveDeletesSequenceNumber() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ assertThat(db.setPreserveDeletesSequenceNumber(db.getLatestSequenceNumber()))
+ .isFalse();
+ }
+ }
+ }
+
+ @Test
+ public void getLiveFiles() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ final RocksDB.LiveFiles livefiles = db.getLiveFiles(true);
+ assertThat(livefiles).isNotNull();
+ assertThat(livefiles.manifestFileSize).isEqualTo(13);
+ assertThat(livefiles.files.size()).isEqualTo(3);
+ assertThat(livefiles.files.get(0)).isEqualTo("/CURRENT");
+ assertThat(livefiles.files.get(1)).isEqualTo("/MANIFEST-000001");
+ assertThat(livefiles.files.get(2)).isEqualTo("/OPTIONS-000005");
+ }
+ }
+ }
+
+ @Test
+ public void getSortedWalFiles() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ final List<LogFile> logFiles = db.getSortedWalFiles();
+ assertThat(logFiles).isNotNull();
+ assertThat(logFiles.size()).isEqualTo(1);
+ assertThat(logFiles.get(0).type())
+ .isEqualTo(WalFileType.kAliveLogFile);
+ }
+ }
+ }
+
+ @Test
+ public void deleteFile() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ db.deleteFile("unknown");
+ }
+ }
+ }
+
+ @Test
+ public void getLiveFilesMetaData() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ final List<LiveFileMetaData> liveFilesMetaData
+ = db.getLiveFilesMetaData();
+ assertThat(liveFilesMetaData).isEmpty();
+ }
+ }
+ }
+
+ @Test
+ public void getColumnFamilyMetaData() throws RocksDBException {
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)) {
+ final List<ColumnFamilyDescriptor> cfDescs = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)
+ );
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) {
+ db.put(cfHandles.get(0), "key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ try {
+ final ColumnFamilyMetaData cfMetadata =
+ db.getColumnFamilyMetaData(cfHandles.get(0));
+ assertThat(cfMetadata).isNotNull();
+ assertThat(cfMetadata.name()).isEqualTo(RocksDB.DEFAULT_COLUMN_FAMILY);
+ assertThat(cfMetadata.levels().size()).isEqualTo(7);
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void verifyChecksum() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ db.verifyChecksum();
+ }
+ }
+ }
+
+ @Test
+ public void getPropertiesOfAllTables() throws RocksDBException {
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)) {
+ final List<ColumnFamilyDescriptor> cfDescs = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)
+ );
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) {
+ db.put(cfHandles.get(0), "key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ try {
+ final Map<String, TableProperties> properties =
+ db.getPropertiesOfAllTables(cfHandles.get(0));
+ assertThat(properties).isNotNull();
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void getPropertiesOfTablesInRange() throws RocksDBException {
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)) {
+ final List<ColumnFamilyDescriptor> cfDescs = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)
+ );
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) {
+ db.put(cfHandles.get(0), "key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ db.put(cfHandles.get(0), "key2".getBytes(UTF_8), "value2".getBytes(UTF_8));
+ db.put(cfHandles.get(0), "key3".getBytes(UTF_8), "value3".getBytes(UTF_8));
+ try {
+ final Range range = new Range(
+ new Slice("key1".getBytes(UTF_8)),
+ new Slice("key3".getBytes(UTF_8)));
+ final Map<String, TableProperties> properties =
+ db.getPropertiesOfTablesInRange(
+ cfHandles.get(0), Arrays.asList(range));
+ assertThat(properties).isNotNull();
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void suggestCompactRange() throws RocksDBException {
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)) {
+ final List<ColumnFamilyDescriptor> cfDescs = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)
+ );
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) {
+ db.put(cfHandles.get(0), "key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ db.put(cfHandles.get(0), "key2".getBytes(UTF_8), "value2".getBytes(UTF_8));
+ db.put(cfHandles.get(0), "key3".getBytes(UTF_8), "value3".getBytes(UTF_8));
+ try {
+ final Range range = db.suggestCompactRange(cfHandles.get(0));
+ assertThat(range).isNotNull();
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void promoteL0() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ db.promoteL0(2);
+ }
+ }
+ }
+
+ @Test
+ public void startTrace() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ final TraceOptions traceOptions = new TraceOptions();
+
+ try (final InMemoryTraceWriter traceWriter = new InMemoryTraceWriter()) {
+ db.startTrace(traceOptions, traceWriter);
+
+ db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+
+ db.endTrace();
+
+ final List<byte[]> writes = traceWriter.getWrites();
+ assertThat(writes.size()).isGreaterThan(0);
+ }
+ }
+ }
+ }
+
+ @Test
+ public void setDBOptions() throws RocksDBException {
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions()
+ .setWriteBufferSize(4096)) {
+
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts));
+
+ // open database
+ final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
+ try (final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, columnFamilyHandles)) {
+ try {
+ final MutableDBOptions mutableOptions =
+ MutableDBOptions.builder()
+ .setBytesPerSync(1024 * 1027 * 7)
+ .setAvoidFlushDuringShutdown(false)
+ .build();
+
+ db.setDBOptions(mutableOptions);
+ } finally {
+ for (final ColumnFamilyHandle handle : columnFamilyHandles) {
+ handle.close();
+ }
+ }
+ }
+ }
+ }
+
+ private static class InMemoryTraceWriter extends AbstractTraceWriter {
+ private final List<byte[]> writes = new ArrayList<>();
+ private volatile boolean closed = false;
+
+ @Override
+ public void write(final Slice slice) {
+ if (closed) {
+ return;
+ }
+ final byte[] data = slice.data();
+ final byte[] dataCopy = new byte[data.length];
+ System.arraycopy(data, 0, dataCopy, 0, data.length);
+ writes.add(dataCopy);
+ }
+
+ @Override
+ public void closeWriter() {
+ closed = true;
+ }
+
+ @Override
+ public long getFileSize() {
+ long size = 0;
+ for (int i = 0; i < writes.size(); i++) {
+ size += writes.get(i).length;
+ }
+ return size;
+ }
+
+ public List<byte[]> getWrites() {
+ return writes;
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/RocksIteratorTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/RocksIteratorTest.java
new file mode 100644
index 000000000..a8f773b57
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/RocksIteratorTest.java
@@ -0,0 +1,203 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.nio.ByteBuffer;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+public class RocksIteratorTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void rocksIterator() throws RocksDBException {
+ try (final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ db.put("key1".getBytes(), "value1".getBytes());
+ db.put("key2".getBytes(), "value2".getBytes());
+
+ try (final RocksIterator iterator = db.newIterator()) {
+ iterator.seekToFirst();
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key1".getBytes());
+ assertThat(iterator.value()).isEqualTo("value1".getBytes());
+
+ ByteBuffer key = ByteBuffer.allocateDirect(2);
+ ByteBuffer value = ByteBuffer.allocateDirect(2);
+ assertThat(iterator.key(key)).isEqualTo(4);
+ assertThat(iterator.value(value)).isEqualTo(6);
+
+ assertThat(key.position()).isEqualTo(0);
+ assertThat(key.limit()).isEqualTo(2);
+ assertThat(value.position()).isEqualTo(0);
+ assertThat(value.limit()).isEqualTo(2);
+
+ byte[] tmp = new byte[2];
+ key.get(tmp);
+ assertThat(tmp).isEqualTo("ke".getBytes());
+ value.get(tmp);
+ assertThat(tmp).isEqualTo("va".getBytes());
+
+ key = ByteBuffer.allocateDirect(12);
+ value = ByteBuffer.allocateDirect(12);
+ assertThat(iterator.key(key)).isEqualTo(4);
+ assertThat(iterator.value(value)).isEqualTo(6);
+ assertThat(key.position()).isEqualTo(0);
+ assertThat(key.limit()).isEqualTo(4);
+ assertThat(value.position()).isEqualTo(0);
+ assertThat(value.limit()).isEqualTo(6);
+
+ tmp = new byte[4];
+ key.get(tmp);
+ assertThat(tmp).isEqualTo("key1".getBytes());
+ tmp = new byte[6];
+ value.get(tmp);
+ assertThat(tmp).isEqualTo("value1".getBytes());
+
+ iterator.next();
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key2".getBytes());
+ assertThat(iterator.value()).isEqualTo("value2".getBytes());
+ iterator.next();
+ assertThat(iterator.isValid()).isFalse();
+ iterator.seekToLast();
+ iterator.prev();
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key1".getBytes());
+ assertThat(iterator.value()).isEqualTo("value1".getBytes());
+ iterator.seekToFirst();
+ iterator.seekToLast();
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key2".getBytes());
+ assertThat(iterator.value()).isEqualTo("value2".getBytes());
+ iterator.status();
+
+ key.clear();
+ key.put("key1".getBytes());
+ key.flip();
+ iterator.seek(key);
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.value()).isEqualTo("value1".getBytes());
+ assertThat(key.position()).isEqualTo(4);
+ assertThat(key.limit()).isEqualTo(4);
+
+ key.clear();
+ key.put("key2".getBytes());
+ key.flip();
+ iterator.seekForPrev(key);
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.value()).isEqualTo("value2".getBytes());
+ assertThat(key.position()).isEqualTo(4);
+ assertThat(key.limit()).isEqualTo(4);
+ }
+
+ try (final RocksIterator iterator = db.newIterator()) {
+ iterator.seek("key0".getBytes());
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key1".getBytes());
+
+ iterator.seek("key1".getBytes());
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key1".getBytes());
+
+ iterator.seek("key1.5".getBytes());
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key2".getBytes());
+
+ iterator.seek("key2".getBytes());
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key2".getBytes());
+
+ iterator.seek("key3".getBytes());
+ assertThat(iterator.isValid()).isFalse();
+ }
+
+ try (final RocksIterator iterator = db.newIterator()) {
+ iterator.seekForPrev("key0".getBytes());
+ assertThat(iterator.isValid()).isFalse();
+
+ iterator.seekForPrev("key1".getBytes());
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key1".getBytes());
+
+ iterator.seekForPrev("key1.5".getBytes());
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key1".getBytes());
+
+ iterator.seekForPrev("key2".getBytes());
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key2".getBytes());
+
+ iterator.seekForPrev("key3".getBytes());
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key2".getBytes());
+ }
+ }
+ }
+
+ @Test
+ public void rocksIteratorReleaseAfterCfClose() throws RocksDBException {
+ try (final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(options,
+ this.dbFolder.getRoot().getAbsolutePath())) {
+ db.put("key".getBytes(), "value".getBytes());
+
+ // Test case: release iterator after default CF close
+ try (final RocksIterator iterator = db.newIterator()) {
+ // In fact, calling close() on default CF has no effect
+ db.getDefaultColumnFamily().close();
+
+ iterator.seekToFirst();
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key".getBytes());
+ assertThat(iterator.value()).isEqualTo("value".getBytes());
+ }
+
+ // Test case: release iterator after custom CF close
+ ColumnFamilyDescriptor cfd1 = new ColumnFamilyDescriptor("cf1".getBytes());
+ ColumnFamilyHandle cfHandle1 = db.createColumnFamily(cfd1);
+ db.put(cfHandle1, "key1".getBytes(), "value1".getBytes());
+
+ try (final RocksIterator iterator = db.newIterator(cfHandle1)) {
+ cfHandle1.close();
+
+ iterator.seekToFirst();
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key1".getBytes());
+ assertThat(iterator.value()).isEqualTo("value1".getBytes());
+ }
+
+ // Test case: release iterator after custom CF drop & close
+ ColumnFamilyDescriptor cfd2 = new ColumnFamilyDescriptor("cf2".getBytes());
+ ColumnFamilyHandle cfHandle2 = db.createColumnFamily(cfd2);
+ db.put(cfHandle2, "key2".getBytes(), "value2".getBytes());
+
+ try (final RocksIterator iterator = db.newIterator(cfHandle2)) {
+ db.dropColumnFamily(cfHandle2);
+ cfHandle2.close();
+
+ iterator.seekToFirst();
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key2".getBytes());
+ assertThat(iterator.value()).isEqualTo("value2".getBytes());
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/RocksMemEnvTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/RocksMemEnvTest.java
new file mode 100644
index 000000000..a03a0f0ae
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/RocksMemEnvTest.java
@@ -0,0 +1,146 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class RocksMemEnvTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Test
+ public void memEnvFillAndReopen() throws RocksDBException {
+
+ final byte[][] keys = {
+ "aaa".getBytes(),
+ "bbb".getBytes(),
+ "ccc".getBytes()
+ };
+
+ final byte[][] values = {
+ "foo".getBytes(),
+ "bar".getBytes(),
+ "baz".getBytes()
+ };
+
+ try (final Env env = new RocksMemEnv(Env.getDefault());
+ final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setEnv(env);
+ final FlushOptions flushOptions = new FlushOptions()
+ .setWaitForFlush(true);
+ ) {
+ try (final RocksDB db = RocksDB.open(options, "dir/db")) {
+ // write key/value pairs using MemEnv
+ for (int i = 0; i < keys.length; i++) {
+ db.put(keys[i], values[i]);
+ }
+
+ // read key/value pairs using MemEnv
+ for (int i = 0; i < keys.length; i++) {
+ assertThat(db.get(keys[i])).isEqualTo(values[i]);
+ }
+
+ // Check iterator access
+ try (final RocksIterator iterator = db.newIterator()) {
+ iterator.seekToFirst();
+ for (int i = 0; i < keys.length; i++) {
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo(keys[i]);
+ assertThat(iterator.value()).isEqualTo(values[i]);
+ iterator.next();
+ }
+ // reached end of database
+ assertThat(iterator.isValid()).isFalse();
+ }
+
+ // flush
+ db.flush(flushOptions);
+
+ // read key/value pairs after flush using MemEnv
+ for (int i = 0; i < keys.length; i++) {
+ assertThat(db.get(keys[i])).isEqualTo(values[i]);
+ }
+ }
+
+ options.setCreateIfMissing(false);
+
+ // After reopen the values shall still be in the mem env.
+ // as long as the env is not freed.
+ try (final RocksDB db = RocksDB.open(options, "dir/db")) {
+ // read key/value pairs using MemEnv
+ for (int i = 0; i < keys.length; i++) {
+ assertThat(db.get(keys[i])).isEqualTo(values[i]);
+ }
+ }
+ }
+ }
+
+ @Test
+ public void multipleDatabaseInstances() throws RocksDBException {
+ // db - keys
+ final byte[][] keys = {
+ "aaa".getBytes(),
+ "bbb".getBytes(),
+ "ccc".getBytes()
+ };
+ // otherDb - keys
+ final byte[][] otherKeys = {
+ "111".getBytes(),
+ "222".getBytes(),
+ "333".getBytes()
+ };
+ // values
+ final byte[][] values = {
+ "foo".getBytes(),
+ "bar".getBytes(),
+ "baz".getBytes()
+ };
+
+ try (final Env env = new RocksMemEnv(Env.getDefault());
+ final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setEnv(env);
+ final RocksDB db = RocksDB.open(options, "dir/db");
+ final RocksDB otherDb = RocksDB.open(options, "dir/otherDb")
+ ) {
+ // write key/value pairs using MemEnv
+ // to db and to otherDb.
+ for (int i = 0; i < keys.length; i++) {
+ db.put(keys[i], values[i]);
+ otherDb.put(otherKeys[i], values[i]);
+ }
+
+ // verify key/value pairs after flush using MemEnv
+ for (int i = 0; i < keys.length; i++) {
+ // verify db
+ assertThat(db.get(otherKeys[i])).isNull();
+ assertThat(db.get(keys[i])).isEqualTo(values[i]);
+
+ // verify otherDb
+ assertThat(otherDb.get(keys[i])).isNull();
+ assertThat(otherDb.get(otherKeys[i])).isEqualTo(values[i]);
+ }
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void createIfMissingFalse() throws RocksDBException {
+ try (final Env env = new RocksMemEnv(Env.getDefault());
+ final Options options = new Options()
+ .setCreateIfMissing(false)
+ .setEnv(env);
+ final RocksDB db = RocksDB.open(options, "db/dir")) {
+ // shall throw an exception because db dir does not
+ // exist.
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/RocksNativeLibraryResource.java b/src/rocksdb/java/src/test/java/org/rocksdb/RocksNativeLibraryResource.java
new file mode 100644
index 000000000..6116f2f92
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/RocksNativeLibraryResource.java
@@ -0,0 +1,18 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.rules.ExternalResource;
+
+/**
+ * Resource to load the RocksDB JNI library.
+ */
+public class RocksNativeLibraryResource extends ExternalResource {
+ @Override
+ protected void before() {
+ RocksDB.loadLibrary();
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/SliceTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/SliceTest.java
new file mode 100644
index 000000000..c65b01903
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/SliceTest.java
@@ -0,0 +1,80 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class SliceTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Test
+ public void slice() {
+ try (final Slice slice = new Slice("testSlice")) {
+ assertThat(slice.empty()).isFalse();
+ assertThat(slice.size()).isEqualTo(9);
+ assertThat(slice.data()).isEqualTo("testSlice".getBytes());
+ }
+
+ try (final Slice otherSlice = new Slice("otherSlice".getBytes())) {
+ assertThat(otherSlice.data()).isEqualTo("otherSlice".getBytes());
+ }
+
+ try (final Slice thirdSlice = new Slice("otherSlice".getBytes(), 5)) {
+ assertThat(thirdSlice.data()).isEqualTo("Slice".getBytes());
+ }
+ }
+
+ @Test
+ public void sliceClear() {
+ try (final Slice slice = new Slice("abc")) {
+ assertThat(slice.toString()).isEqualTo("abc");
+ slice.clear();
+ assertThat(slice.toString()).isEmpty();
+ slice.clear(); // make sure we don't double-free
+ }
+ }
+
+ @Test
+ public void sliceRemovePrefix() {
+ try (final Slice slice = new Slice("abc")) {
+ assertThat(slice.toString()).isEqualTo("abc");
+ slice.removePrefix(1);
+ assertThat(slice.toString()).isEqualTo("bc");
+ }
+ }
+
+ @Test
+ public void sliceEquals() {
+ try (final Slice slice = new Slice("abc");
+ final Slice slice2 = new Slice("abc")) {
+ assertThat(slice.equals(slice2)).isTrue();
+ assertThat(slice.hashCode() == slice2.hashCode()).isTrue();
+ }
+ }
+
+ @Test
+ public void sliceStartWith() {
+ try (final Slice slice = new Slice("matchpoint");
+ final Slice match = new Slice("mat");
+ final Slice noMatch = new Slice("nomatch")) {
+ assertThat(slice.startsWith(match)).isTrue();
+ assertThat(slice.startsWith(noMatch)).isFalse();
+ }
+ }
+
+ @Test
+ public void sliceToString() {
+ try (final Slice slice = new Slice("stringTest")) {
+ assertThat(slice.toString()).isEqualTo("stringTest");
+ assertThat(slice.toString(true)).isNotEqualTo("");
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/SnapshotTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/SnapshotTest.java
new file mode 100644
index 000000000..11f0d560a
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/SnapshotTest.java
@@ -0,0 +1,169 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class SnapshotTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void snapshots() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ db.put("key".getBytes(), "value".getBytes());
+ // Get new Snapshot of database
+ try (final Snapshot snapshot = db.getSnapshot()) {
+ assertThat(snapshot.getSequenceNumber()).isGreaterThan(0);
+ assertThat(snapshot.getSequenceNumber()).isEqualTo(1);
+ try (final ReadOptions readOptions = new ReadOptions()) {
+ // set snapshot in ReadOptions
+ readOptions.setSnapshot(snapshot);
+
+ // retrieve key value pair
+ assertThat(new String(db.get("key".getBytes()))).
+ isEqualTo("value");
+ // retrieve key value pair created before
+ // the snapshot was made
+ assertThat(new String(db.get(readOptions,
+ "key".getBytes()))).isEqualTo("value");
+ // add new key/value pair
+ db.put("newkey".getBytes(), "newvalue".getBytes());
+ // using no snapshot the latest db entries
+ // will be taken into account
+ assertThat(new String(db.get("newkey".getBytes()))).
+ isEqualTo("newvalue");
+ // snapshopot was created before newkey
+ assertThat(db.get(readOptions, "newkey".getBytes())).
+ isNull();
+ // Retrieve snapshot from read options
+ try (final Snapshot sameSnapshot = readOptions.snapshot()) {
+ readOptions.setSnapshot(sameSnapshot);
+ // results must be the same with new Snapshot
+ // instance using the same native pointer
+ assertThat(new String(db.get(readOptions,
+ "key".getBytes()))).isEqualTo("value");
+ // update key value pair to newvalue
+ db.put("key".getBytes(), "newvalue".getBytes());
+ // read with previously created snapshot will
+ // read previous version of key value pair
+ assertThat(new String(db.get(readOptions,
+ "key".getBytes()))).isEqualTo("value");
+ // read for newkey using the snapshot must be
+ // null
+ assertThat(db.get(readOptions, "newkey".getBytes())).
+ isNull();
+ // setting null to snapshot in ReadOptions leads
+ // to no Snapshot being used.
+ readOptions.setSnapshot(null);
+ assertThat(new String(db.get(readOptions,
+ "newkey".getBytes()))).isEqualTo("newvalue");
+ // release Snapshot
+ db.releaseSnapshot(snapshot);
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void iteratorWithSnapshot() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ db.put("key".getBytes(), "value".getBytes());
+
+ // Get new Snapshot of database
+ // set snapshot in ReadOptions
+ try (final Snapshot snapshot = db.getSnapshot();
+ final ReadOptions readOptions =
+ new ReadOptions().setSnapshot(snapshot)) {
+ db.put("key2".getBytes(), "value2".getBytes());
+
+ // iterate over current state of db
+ try (final RocksIterator iterator = db.newIterator()) {
+ iterator.seekToFirst();
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key".getBytes());
+ iterator.next();
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key2".getBytes());
+ iterator.next();
+ assertThat(iterator.isValid()).isFalse();
+ }
+
+ // iterate using a snapshot
+ try (final RocksIterator snapshotIterator =
+ db.newIterator(readOptions)) {
+ snapshotIterator.seekToFirst();
+ assertThat(snapshotIterator.isValid()).isTrue();
+ assertThat(snapshotIterator.key()).isEqualTo("key".getBytes());
+ snapshotIterator.next();
+ assertThat(snapshotIterator.isValid()).isFalse();
+ }
+
+ // release Snapshot
+ db.releaseSnapshot(snapshot);
+ }
+ }
+ }
+
+ @Test
+ public void iteratorWithSnapshotOnColumnFamily() throws RocksDBException {
+ try (final Options options = new Options()
+ .setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+
+ db.put("key".getBytes(), "value".getBytes());
+
+ // Get new Snapshot of database
+ // set snapshot in ReadOptions
+ try (final Snapshot snapshot = db.getSnapshot();
+ final ReadOptions readOptions = new ReadOptions()
+ .setSnapshot(snapshot)) {
+ db.put("key2".getBytes(), "value2".getBytes());
+
+ // iterate over current state of column family
+ try (final RocksIterator iterator = db.newIterator(
+ db.getDefaultColumnFamily())) {
+ iterator.seekToFirst();
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key".getBytes());
+ iterator.next();
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key2".getBytes());
+ iterator.next();
+ assertThat(iterator.isValid()).isFalse();
+ }
+
+ // iterate using a snapshot on default column family
+ try (final RocksIterator snapshotIterator = db.newIterator(
+ db.getDefaultColumnFamily(), readOptions)) {
+ snapshotIterator.seekToFirst();
+ assertThat(snapshotIterator.isValid()).isTrue();
+ assertThat(snapshotIterator.key()).isEqualTo("key".getBytes());
+ snapshotIterator.next();
+ assertThat(snapshotIterator.isValid()).isFalse();
+
+ // release Snapshot
+ db.releaseSnapshot(snapshot);
+ }
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/SstFileManagerTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/SstFileManagerTest.java
new file mode 100644
index 000000000..2e136e820
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/SstFileManagerTest.java
@@ -0,0 +1,66 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Test;
+
+import java.util.Collections;
+
+import static org.assertj.core.api.Assertions.*;
+
+public class SstFileManagerTest {
+
+ @Test
+ public void maxAllowedSpaceUsage() throws RocksDBException {
+ try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) {
+ sstFileManager.setMaxAllowedSpaceUsage(1024 * 1024 * 64);
+ assertThat(sstFileManager.isMaxAllowedSpaceReached()).isFalse();
+ assertThat(sstFileManager.isMaxAllowedSpaceReachedIncludingCompactions()).isFalse();
+ }
+ }
+
+ @Test
+ public void compactionBufferSize() throws RocksDBException {
+ try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) {
+ sstFileManager.setCompactionBufferSize(1024 * 1024 * 10);
+ assertThat(sstFileManager.isMaxAllowedSpaceReachedIncludingCompactions()).isFalse();
+ }
+ }
+
+ @Test
+ public void totalSize() throws RocksDBException {
+ try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) {
+ assertThat(sstFileManager.getTotalSize()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void trackedFiles() throws RocksDBException {
+ try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) {
+ assertThat(sstFileManager.getTrackedFiles()).isEqualTo(Collections.emptyMap());
+ }
+ }
+
+ @Test
+ public void deleteRateBytesPerSecond() throws RocksDBException {
+ try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) {
+ assertThat(sstFileManager.getDeleteRateBytesPerSecond()).isEqualTo(SstFileManager.RATE_BYTES_PER_SEC_DEFAULT);
+ final long ratePerSecond = 1024 * 1024 * 52;
+ sstFileManager.setDeleteRateBytesPerSecond(ratePerSecond);
+ assertThat(sstFileManager.getDeleteRateBytesPerSecond()).isEqualTo(ratePerSecond);
+ }
+ }
+
+ @Test
+ public void maxTrashDBRatio() throws RocksDBException {
+ try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) {
+ assertThat(sstFileManager.getMaxTrashDBRatio()).isEqualTo(SstFileManager.MAX_TRASH_DB_RATION_DEFAULT);
+ final double trashRatio = 0.2;
+ sstFileManager.setMaxTrashDBRatio(trashRatio);
+ assertThat(sstFileManager.getMaxTrashDBRatio()).isEqualTo(trashRatio);
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/SstFileReaderTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/SstFileReaderTest.java
new file mode 100644
index 000000000..0b841f420
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/SstFileReaderTest.java
@@ -0,0 +1,155 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.rocksdb.util.BytewiseComparator;
+
+public class SstFileReaderTest {
+ private static final String SST_FILE_NAME = "test.sst";
+
+ class KeyValueWithOp {
+ KeyValueWithOp(String key, String value, OpType opType) {
+ this.key = key;
+ this.value = value;
+ this.opType = opType;
+ }
+
+ String getKey() {
+ return key;
+ }
+
+ String getValue() {
+ return value;
+ }
+
+ OpType getOpType() {
+ return opType;
+ }
+
+ private String key;
+ private String value;
+ private OpType opType;
+ }
+
+ @Rule public TemporaryFolder parentFolder = new TemporaryFolder();
+
+ enum OpType { PUT, PUT_BYTES, MERGE, MERGE_BYTES, DELETE, DELETE_BYTES }
+
+ private File newSstFile(final List<KeyValueWithOp> keyValues)
+ throws IOException, RocksDBException {
+ final EnvOptions envOptions = new EnvOptions();
+ final StringAppendOperator stringAppendOperator = new StringAppendOperator();
+ final Options options = new Options().setMergeOperator(stringAppendOperator);
+ SstFileWriter sstFileWriter;
+ sstFileWriter = new SstFileWriter(envOptions, options);
+
+ final File sstFile = parentFolder.newFile(SST_FILE_NAME);
+ try {
+ sstFileWriter.open(sstFile.getAbsolutePath());
+ for (KeyValueWithOp keyValue : keyValues) {
+ Slice keySlice = new Slice(keyValue.getKey());
+ Slice valueSlice = new Slice(keyValue.getValue());
+ byte[] keyBytes = keyValue.getKey().getBytes();
+ byte[] valueBytes = keyValue.getValue().getBytes();
+ switch (keyValue.getOpType()) {
+ case PUT:
+ sstFileWriter.put(keySlice, valueSlice);
+ break;
+ case PUT_BYTES:
+ sstFileWriter.put(keyBytes, valueBytes);
+ break;
+ case MERGE:
+ sstFileWriter.merge(keySlice, valueSlice);
+ break;
+ case MERGE_BYTES:
+ sstFileWriter.merge(keyBytes, valueBytes);
+ break;
+ case DELETE:
+ sstFileWriter.delete(keySlice);
+ break;
+ case DELETE_BYTES:
+ sstFileWriter.delete(keyBytes);
+ break;
+ default:
+ fail("Unsupported op type");
+ }
+ keySlice.close();
+ valueSlice.close();
+ }
+ sstFileWriter.finish();
+ } finally {
+ assertThat(sstFileWriter).isNotNull();
+ sstFileWriter.close();
+ options.close();
+ envOptions.close();
+ }
+ return sstFile;
+ }
+
+ @Test
+ public void readSstFile() throws RocksDBException, IOException {
+ final List<KeyValueWithOp> keyValues = new ArrayList<>();
+ keyValues.add(new KeyValueWithOp("key1", "value1", OpType.PUT));
+
+ final File sstFile = newSstFile(keyValues);
+ try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
+ final Options options =
+ new Options().setCreateIfMissing(true).setMergeOperator(stringAppendOperator);
+ final SstFileReader reader = new SstFileReader(options)) {
+ // Open the sst file and iterator
+ reader.open(sstFile.getAbsolutePath());
+ final ReadOptions readOptions = new ReadOptions();
+ final SstFileReaderIterator iterator = reader.newIterator(readOptions);
+
+ // Use the iterator to read sst file
+ iterator.seekToFirst();
+
+ // Verify Checksum
+ reader.verifyChecksum();
+
+ // Verify Table Properties
+ assertEquals(reader.getTableProperties().getNumEntries(), 1);
+
+ // Check key and value
+ assertThat(iterator.key()).isEqualTo("key1".getBytes());
+ assertThat(iterator.value()).isEqualTo("value1".getBytes());
+
+ ByteBuffer direct = ByteBuffer.allocateDirect(128);
+ direct.put("key1".getBytes()).flip();
+ iterator.seek(direct);
+ assertThat(direct.position()).isEqualTo(4);
+ assertThat(direct.limit()).isEqualTo(4);
+
+ assertThat(iterator.isValid()).isTrue();
+ assertThat(iterator.key()).isEqualTo("key1".getBytes());
+ assertThat(iterator.value()).isEqualTo("value1".getBytes());
+
+ direct.clear();
+ assertThat(iterator.key(direct)).isEqualTo("key1".getBytes().length);
+ byte[] dst = new byte["key1".getBytes().length];
+ direct.get(dst);
+ assertThat(new String(dst)).isEqualTo("key1");
+
+ direct.clear();
+ assertThat(iterator.value(direct)).isEqualTo("value1".getBytes().length);
+ dst = new byte["value1".getBytes().length];
+ direct.get(dst);
+ assertThat(new String(dst)).isEqualTo("value1");
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/SstFileWriterTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/SstFileWriterTest.java
new file mode 100644
index 000000000..0a5506fc1
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/SstFileWriterTest.java
@@ -0,0 +1,241 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.rocksdb.util.BytewiseComparator;
+
+public class SstFileWriterTest {
+ private static final String SST_FILE_NAME = "test.sst";
+ private static final String DB_DIRECTORY_NAME = "test_db";
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE
+ = new RocksNativeLibraryResource();
+
+ @Rule public TemporaryFolder parentFolder = new TemporaryFolder();
+
+ enum OpType { PUT, PUT_BYTES, PUT_DIRECT, MERGE, MERGE_BYTES, DELETE, DELETE_BYTES }
+
+ class KeyValueWithOp {
+ KeyValueWithOp(String key, String value, OpType opType) {
+ this.key = key;
+ this.value = value;
+ this.opType = opType;
+ }
+
+ String getKey() {
+ return key;
+ }
+
+ String getValue() {
+ return value;
+ }
+
+ OpType getOpType() {
+ return opType;
+ }
+
+ private String key;
+ private String value;
+ private OpType opType;
+ };
+
+ private File newSstFile(final List<KeyValueWithOp> keyValues,
+ boolean useJavaBytewiseComparator) throws IOException, RocksDBException {
+ final EnvOptions envOptions = new EnvOptions();
+ final StringAppendOperator stringAppendOperator = new StringAppendOperator();
+ final Options options = new Options().setMergeOperator(stringAppendOperator);
+ SstFileWriter sstFileWriter = null;
+ ComparatorOptions comparatorOptions = null;
+ BytewiseComparator comparator = null;
+ if (useJavaBytewiseComparator) {
+ comparatorOptions = new ComparatorOptions().setUseDirectBuffer(false);
+ comparator = new BytewiseComparator(comparatorOptions);
+ options.setComparator(comparator);
+ sstFileWriter = new SstFileWriter(envOptions, options);
+ } else {
+ sstFileWriter = new SstFileWriter(envOptions, options);
+ }
+
+ final File sstFile = parentFolder.newFile(SST_FILE_NAME);
+ try {
+ sstFileWriter.open(sstFile.getAbsolutePath());
+ assertThat(sstFileWriter.fileSize()).isEqualTo(0);
+ for (KeyValueWithOp keyValue : keyValues) {
+ Slice keySlice = new Slice(keyValue.getKey());
+ Slice valueSlice = new Slice(keyValue.getValue());
+ byte[] keyBytes = keyValue.getKey().getBytes();
+ byte[] valueBytes = keyValue.getValue().getBytes();
+ ByteBuffer keyDirect = ByteBuffer.allocateDirect(keyBytes.length);
+ keyDirect.put(keyBytes);
+ keyDirect.flip();
+ ByteBuffer valueDirect = ByteBuffer.allocateDirect(valueBytes.length);
+ valueDirect.put(valueBytes);
+ valueDirect.flip();
+ switch (keyValue.getOpType()) {
+ case PUT:
+ sstFileWriter.put(keySlice, valueSlice);
+ break;
+ case PUT_BYTES:
+ sstFileWriter.put(keyBytes, valueBytes);
+ break;
+ case PUT_DIRECT:
+ sstFileWriter.put(keyDirect, valueDirect);
+ assertThat(keyDirect.position()).isEqualTo(keyBytes.length);
+ assertThat(keyDirect.limit()).isEqualTo(keyBytes.length);
+ assertThat(valueDirect.position()).isEqualTo(valueBytes.length);
+ assertThat(valueDirect.limit()).isEqualTo(valueBytes.length);
+ break;
+ case MERGE:
+ sstFileWriter.merge(keySlice, valueSlice);
+ break;
+ case MERGE_BYTES:
+ sstFileWriter.merge(keyBytes, valueBytes);
+ break;
+ case DELETE:
+ sstFileWriter.delete(keySlice);
+ break;
+ case DELETE_BYTES:
+ sstFileWriter.delete(keyBytes);
+ break;
+ default:
+ fail("Unsupported op type");
+ }
+ keySlice.close();
+ valueSlice.close();
+ }
+ sstFileWriter.finish();
+ assertThat(sstFileWriter.fileSize()).isGreaterThan(100);
+ } finally {
+ assertThat(sstFileWriter).isNotNull();
+ sstFileWriter.close();
+ options.close();
+ envOptions.close();
+ if (comparatorOptions != null) {
+ comparatorOptions.close();
+ }
+ if (comparator != null) {
+ comparator.close();
+ }
+ }
+ return sstFile;
+ }
+
+ @Test
+ public void generateSstFileWithJavaComparator()
+ throws RocksDBException, IOException {
+ final List<KeyValueWithOp> keyValues = new ArrayList<>();
+ keyValues.add(new KeyValueWithOp("key1", "value1", OpType.PUT));
+ keyValues.add(new KeyValueWithOp("key2", "value2", OpType.PUT));
+ keyValues.add(new KeyValueWithOp("key3", "value3", OpType.MERGE));
+ keyValues.add(new KeyValueWithOp("key4", "value4", OpType.MERGE));
+ keyValues.add(new KeyValueWithOp("key5", "", OpType.DELETE));
+
+ newSstFile(keyValues, true);
+ }
+
+ @Test
+ public void generateSstFileWithNativeComparator()
+ throws RocksDBException, IOException {
+ final List<KeyValueWithOp> keyValues = new ArrayList<>();
+ keyValues.add(new KeyValueWithOp("key1", "value1", OpType.PUT));
+ keyValues.add(new KeyValueWithOp("key2", "value2", OpType.PUT));
+ keyValues.add(new KeyValueWithOp("key3", "value3", OpType.MERGE));
+ keyValues.add(new KeyValueWithOp("key4", "value4", OpType.MERGE));
+ keyValues.add(new KeyValueWithOp("key5", "", OpType.DELETE));
+
+ newSstFile(keyValues, false);
+ }
+
+ @Test
+ public void ingestSstFile() throws RocksDBException, IOException {
+ final List<KeyValueWithOp> keyValues = new ArrayList<>();
+ keyValues.add(new KeyValueWithOp("key1", "value1", OpType.PUT));
+ keyValues.add(new KeyValueWithOp("key2", "value2", OpType.PUT_DIRECT));
+ keyValues.add(new KeyValueWithOp("key3", "value3", OpType.PUT_BYTES));
+ keyValues.add(new KeyValueWithOp("key4", "value4", OpType.MERGE));
+ keyValues.add(new KeyValueWithOp("key5", "value5", OpType.MERGE_BYTES));
+ keyValues.add(new KeyValueWithOp("key6", "", OpType.DELETE));
+ keyValues.add(new KeyValueWithOp("key7", "", OpType.DELETE));
+
+
+ final File sstFile = newSstFile(keyValues, false);
+ final File dbFolder = parentFolder.newFolder(DB_DIRECTORY_NAME);
+ try(final StringAppendOperator stringAppendOperator =
+ new StringAppendOperator();
+ final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setMergeOperator(stringAppendOperator);
+ final RocksDB db = RocksDB.open(options, dbFolder.getAbsolutePath());
+ final IngestExternalFileOptions ingestExternalFileOptions =
+ new IngestExternalFileOptions()) {
+ db.ingestExternalFile(Arrays.asList(sstFile.getAbsolutePath()),
+ ingestExternalFileOptions);
+
+ assertThat(db.get("key1".getBytes())).isEqualTo("value1".getBytes());
+ assertThat(db.get("key2".getBytes())).isEqualTo("value2".getBytes());
+ assertThat(db.get("key3".getBytes())).isEqualTo("value3".getBytes());
+ assertThat(db.get("key4".getBytes())).isEqualTo("value4".getBytes());
+ assertThat(db.get("key5".getBytes())).isEqualTo("value5".getBytes());
+ assertThat(db.get("key6".getBytes())).isEqualTo(null);
+ assertThat(db.get("key7".getBytes())).isEqualTo(null);
+ }
+ }
+
+ @Test
+ public void ingestSstFile_cf() throws RocksDBException, IOException {
+ final List<KeyValueWithOp> keyValues = new ArrayList<>();
+ keyValues.add(new KeyValueWithOp("key1", "value1", OpType.PUT));
+ keyValues.add(new KeyValueWithOp("key2", "value2", OpType.PUT));
+ keyValues.add(new KeyValueWithOp("key3", "value3", OpType.MERGE));
+ keyValues.add(new KeyValueWithOp("key4", "", OpType.DELETE));
+
+ final File sstFile = newSstFile(keyValues, false);
+ final File dbFolder = parentFolder.newFolder(DB_DIRECTORY_NAME);
+ try(final StringAppendOperator stringAppendOperator =
+ new StringAppendOperator();
+ final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true)
+ .setMergeOperator(stringAppendOperator);
+ final RocksDB db = RocksDB.open(options, dbFolder.getAbsolutePath());
+ final IngestExternalFileOptions ingestExternalFileOptions =
+ new IngestExternalFileOptions()) {
+
+ try(final ColumnFamilyOptions cf_opts = new ColumnFamilyOptions()
+ .setMergeOperator(stringAppendOperator);
+ final ColumnFamilyHandle cf_handle = db.createColumnFamily(
+ new ColumnFamilyDescriptor("new_cf".getBytes(), cf_opts))) {
+
+ db.ingestExternalFile(cf_handle,
+ Arrays.asList(sstFile.getAbsolutePath()),
+ ingestExternalFileOptions);
+
+ assertThat(db.get(cf_handle,
+ "key1".getBytes())).isEqualTo("value1".getBytes());
+ assertThat(db.get(cf_handle,
+ "key2".getBytes())).isEqualTo("value2".getBytes());
+ assertThat(db.get(cf_handle,
+ "key3".getBytes())).isEqualTo("value3".getBytes());
+ assertThat(db.get(cf_handle,
+ "key4".getBytes())).isEqualTo(null);
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java
new file mode 100644
index 000000000..36721c80d
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java
@@ -0,0 +1,55 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.Collections;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class StatisticsCollectorTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void statisticsCollector()
+ throws InterruptedException, RocksDBException {
+ try (final Statistics statistics = new Statistics();
+ final Options opt = new Options()
+ .setStatistics(statistics)
+ .setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+
+ try(final Statistics stats = opt.statistics()) {
+
+ final StatsCallbackMock callback = new StatsCallbackMock();
+ final StatsCollectorInput statsInput =
+ new StatsCollectorInput(stats, callback);
+
+ final StatisticsCollector statsCollector = new StatisticsCollector(
+ Collections.singletonList(statsInput), 100);
+ statsCollector.start();
+
+ Thread.sleep(1000);
+
+ assertThat(callback.tickerCallbackCount).isGreaterThan(0);
+ assertThat(callback.histCallbackCount).isGreaterThan(0);
+
+ statsCollector.shutDown(1000);
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/StatisticsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/StatisticsTest.java
new file mode 100644
index 000000000..de92102ec
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/StatisticsTest.java
@@ -0,0 +1,168 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.nio.charset.StandardCharsets;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class StatisticsTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void statsLevel() throws RocksDBException {
+ final Statistics statistics = new Statistics();
+ statistics.setStatsLevel(StatsLevel.ALL);
+ assertThat(statistics.statsLevel()).isEqualTo(StatsLevel.ALL);
+ }
+
+ @Test
+ public void getTickerCount() throws RocksDBException {
+ try (final Statistics statistics = new Statistics();
+ final Options opt = new Options()
+ .setStatistics(statistics)
+ .setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+
+ final byte[] key = "some-key".getBytes(StandardCharsets.UTF_8);
+ final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8);
+
+ db.put(key, value);
+ for(int i = 0; i < 10; i++) {
+ db.get(key);
+ }
+
+ assertThat(statistics.getTickerCount(TickerType.BYTES_READ)).isGreaterThan(0);
+ }
+ }
+
+ @Test
+ public void getAndResetTickerCount() throws RocksDBException {
+ try (final Statistics statistics = new Statistics();
+ final Options opt = new Options()
+ .setStatistics(statistics)
+ .setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+
+ final byte[] key = "some-key".getBytes(StandardCharsets.UTF_8);
+ final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8);
+
+ db.put(key, value);
+ for(int i = 0; i < 10; i++) {
+ db.get(key);
+ }
+
+ final long read = statistics.getAndResetTickerCount(TickerType.BYTES_READ);
+ assertThat(read).isGreaterThan(0);
+
+ final long readAfterReset = statistics.getTickerCount(TickerType.BYTES_READ);
+ assertThat(readAfterReset).isLessThan(read);
+ }
+ }
+
+ @Test
+ public void getHistogramData() throws RocksDBException {
+ try (final Statistics statistics = new Statistics();
+ final Options opt = new Options()
+ .setStatistics(statistics)
+ .setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+
+ final byte[] key = "some-key".getBytes(StandardCharsets.UTF_8);
+ final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8);
+
+ db.put(key, value);
+ for(int i = 0; i < 10; i++) {
+ db.get(key);
+ }
+
+ final HistogramData histogramData = statistics.getHistogramData(HistogramType.BYTES_PER_READ);
+ assertThat(histogramData).isNotNull();
+ assertThat(histogramData.getAverage()).isGreaterThan(0);
+ assertThat(histogramData.getMedian()).isGreaterThan(0);
+ assertThat(histogramData.getPercentile95()).isGreaterThan(0);
+ assertThat(histogramData.getPercentile99()).isGreaterThan(0);
+ assertThat(histogramData.getStandardDeviation()).isEqualTo(0.00);
+ assertThat(histogramData.getMax()).isGreaterThan(0);
+ assertThat(histogramData.getCount()).isGreaterThan(0);
+ assertThat(histogramData.getSum()).isGreaterThan(0);
+ assertThat(histogramData.getMin()).isGreaterThan(0);
+ }
+ }
+
+ @Test
+ public void getHistogramString() throws RocksDBException {
+ try (final Statistics statistics = new Statistics();
+ final Options opt = new Options()
+ .setStatistics(statistics)
+ .setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+
+ final byte[] key = "some-key".getBytes(StandardCharsets.UTF_8);
+ final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8);
+
+ for(int i = 0; i < 10; i++) {
+ db.put(key, value);
+ }
+
+ assertThat(statistics.getHistogramString(HistogramType.BYTES_PER_WRITE)).isNotNull();
+ }
+ }
+
+ @Test
+ public void reset() throws RocksDBException {
+ try (final Statistics statistics = new Statistics();
+ final Options opt = new Options()
+ .setStatistics(statistics)
+ .setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+
+ final byte[] key = "some-key".getBytes(StandardCharsets.UTF_8);
+ final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8);
+
+ db.put(key, value);
+ for(int i = 0; i < 10; i++) {
+ db.get(key);
+ }
+
+ final long read = statistics.getTickerCount(TickerType.BYTES_READ);
+ assertThat(read).isGreaterThan(0);
+
+ statistics.reset();
+
+ final long readAfterReset = statistics.getTickerCount(TickerType.BYTES_READ);
+ assertThat(readAfterReset).isLessThan(read);
+ }
+ }
+
+ @Test
+ public void ToString() throws RocksDBException {
+ try (final Statistics statistics = new Statistics();
+ final Options opt = new Options()
+ .setStatistics(statistics)
+ .setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+ assertThat(statistics.toString()).isNotNull();
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/StatsCallbackMock.java b/src/rocksdb/java/src/test/java/org/rocksdb/StatsCallbackMock.java
new file mode 100644
index 000000000..af8db0caa
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/StatsCallbackMock.java
@@ -0,0 +1,20 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+public class StatsCallbackMock implements StatisticsCollectorCallback {
+ public int tickerCallbackCount = 0;
+ public int histCallbackCount = 0;
+
+ public void tickerCallback(TickerType tickerType, long tickerCount) {
+ tickerCallbackCount++;
+ }
+
+ public void histogramCallback(HistogramType histType,
+ HistogramData histData) {
+ histCallbackCount++;
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/TableFilterTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/TableFilterTest.java
new file mode 100644
index 000000000..2bd3b1798
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/TableFilterTest.java
@@ -0,0 +1,106 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class TableFilterTest {
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void readOptions() throws RocksDBException {
+ try (final DBOptions opt = new DBOptions().
+ setCreateIfMissing(true).
+ setCreateMissingColumnFamilies(true);
+ final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions()
+ ) {
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts)
+ );
+
+ final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
+
+ // open database
+ try (final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath(),
+ columnFamilyDescriptors,
+ columnFamilyHandles)) {
+
+ try (final CfNameCollectionTableFilter cfNameCollectingTableFilter =
+ new CfNameCollectionTableFilter();
+ final FlushOptions flushOptions =
+ new FlushOptions().setWaitForFlush(true);
+ final ReadOptions readOptions =
+ new ReadOptions().setTableFilter(cfNameCollectingTableFilter)) {
+
+ db.put(columnFamilyHandles.get(0),
+ "key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ db.put(columnFamilyHandles.get(0),
+ "key2".getBytes(UTF_8), "value2".getBytes(UTF_8));
+ db.put(columnFamilyHandles.get(0),
+ "key3".getBytes(UTF_8), "value3".getBytes(UTF_8));
+ db.put(columnFamilyHandles.get(1),
+ "key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ db.put(columnFamilyHandles.get(1),
+ "key2".getBytes(UTF_8), "value2".getBytes(UTF_8));
+ db.put(columnFamilyHandles.get(1),
+ "key3".getBytes(UTF_8), "value3".getBytes(UTF_8));
+
+ db.flush(flushOptions, columnFamilyHandles);
+
+ try (final RocksIterator iterator =
+ db.newIterator(columnFamilyHandles.get(0), readOptions)) {
+ iterator.seekToFirst();
+ while (iterator.isValid()) {
+ iterator.key();
+ iterator.value();
+ iterator.next();
+ }
+ }
+
+ try (final RocksIterator iterator =
+ db.newIterator(columnFamilyHandles.get(1), readOptions)) {
+ iterator.seekToFirst();
+ while (iterator.isValid()) {
+ iterator.key();
+ iterator.value();
+ iterator.next();
+ }
+ }
+
+ assertThat(cfNameCollectingTableFilter.cfNames.size()).isEqualTo(2);
+ assertThat(cfNameCollectingTableFilter.cfNames.get(0))
+ .isEqualTo(RocksDB.DEFAULT_COLUMN_FAMILY);
+ assertThat(cfNameCollectingTableFilter.cfNames.get(1))
+ .isEqualTo("new_cf".getBytes(UTF_8));
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ private static class CfNameCollectionTableFilter extends AbstractTableFilter {
+ private final List<byte[]> cfNames = new ArrayList<>();
+
+ @Override
+ public boolean filter(final TableProperties tableProperties) {
+ cfNames.add(tableProperties.getColumnFamilyName());
+ return true;
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/TimedEnvTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/TimedEnvTest.java
new file mode 100644
index 000000000..c958f96b2
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/TimedEnvTest.java
@@ -0,0 +1,43 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+public class TimedEnvTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void construct() throws RocksDBException {
+ try (final Env env = new TimedEnv(Env.getDefault())) {
+ // no-op
+ }
+ }
+
+ @Test
+ public void construct_integration() throws RocksDBException {
+ try (final Env env = new TimedEnv(Env.getDefault());
+ final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setEnv(env);
+ ) {
+ try (final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getPath())) {
+ db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/TransactionDBOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionDBOptionsTest.java
new file mode 100644
index 000000000..7eaa6b16c
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionDBOptionsTest.java
@@ -0,0 +1,64 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Test;
+
+import java.util.Random;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class TransactionDBOptionsTest {
+
+ private static final Random rand = PlatformRandomHelper.
+ getPlatformSpecificRandomFactory();
+
+ @Test
+ public void maxNumLocks() {
+ try (final TransactionDBOptions opt = new TransactionDBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setMaxNumLocks(longValue);
+ assertThat(opt.getMaxNumLocks()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void maxNumStripes() {
+ try (final TransactionDBOptions opt = new TransactionDBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setNumStripes(longValue);
+ assertThat(opt.getNumStripes()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void transactionLockTimeout() {
+ try (final TransactionDBOptions opt = new TransactionDBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setTransactionLockTimeout(longValue);
+ assertThat(opt.getTransactionLockTimeout()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void defaultLockTimeout() {
+ try (final TransactionDBOptions opt = new TransactionDBOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setDefaultLockTimeout(longValue);
+ assertThat(opt.getDefaultLockTimeout()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void writePolicy() {
+ try (final TransactionDBOptions opt = new TransactionDBOptions()) {
+ final TxnDBWritePolicy writePolicy = TxnDBWritePolicy.WRITE_UNPREPARED; // non-default
+ opt.setWritePolicy(writePolicy);
+ assertThat(opt.getWritePolicy()).isEqualTo(writePolicy);
+ }
+ }
+
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/TransactionDBTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionDBTest.java
new file mode 100644
index 000000000..b0ea813ff
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionDBTest.java
@@ -0,0 +1,178 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.*;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+public class TransactionDBTest {
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void open() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final TransactionDBOptions txnDbOptions = new TransactionDBOptions();
+ final TransactionDB tdb = TransactionDB.open(options, txnDbOptions,
+ dbFolder.getRoot().getAbsolutePath())) {
+ assertThat(tdb).isNotNull();
+ }
+ }
+
+ @Test
+ public void open_columnFamilies() throws RocksDBException {
+ try(final DBOptions dbOptions = new DBOptions().setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final ColumnFamilyOptions myCfOpts = new ColumnFamilyOptions()) {
+
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("myCf".getBytes(), myCfOpts));
+
+ final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
+
+ try (final TransactionDBOptions txnDbOptions = new TransactionDBOptions();
+ final TransactionDB tdb = TransactionDB.open(dbOptions, txnDbOptions,
+ dbFolder.getRoot().getAbsolutePath(),
+ columnFamilyDescriptors, columnFamilyHandles)) {
+ try {
+ assertThat(tdb).isNotNull();
+ } finally {
+ for (final ColumnFamilyHandle handle : columnFamilyHandles) {
+ handle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void beginTransaction() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final TransactionDBOptions txnDbOptions = new TransactionDBOptions();
+ final TransactionDB tdb = TransactionDB.open(options, txnDbOptions,
+ dbFolder.getRoot().getAbsolutePath());
+ final WriteOptions writeOptions = new WriteOptions()) {
+
+ try(final Transaction txn = tdb.beginTransaction(writeOptions)) {
+ assertThat(txn).isNotNull();
+ }
+ }
+ }
+
+ @Test
+ public void beginTransaction_transactionOptions() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final TransactionDBOptions txnDbOptions = new TransactionDBOptions();
+ final TransactionDB tdb = TransactionDB.open(options, txnDbOptions,
+ dbFolder.getRoot().getAbsolutePath());
+ final WriteOptions writeOptions = new WriteOptions();
+ final TransactionOptions txnOptions = new TransactionOptions()) {
+
+ try(final Transaction txn = tdb.beginTransaction(writeOptions,
+ txnOptions)) {
+ assertThat(txn).isNotNull();
+ }
+ }
+ }
+
+ @Test
+ public void beginTransaction_withOld() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final TransactionDBOptions txnDbOptions = new TransactionDBOptions();
+ final TransactionDB tdb = TransactionDB.open(options, txnDbOptions,
+ dbFolder.getRoot().getAbsolutePath());
+ final WriteOptions writeOptions = new WriteOptions()) {
+
+ try(final Transaction txn = tdb.beginTransaction(writeOptions)) {
+ final Transaction txnReused = tdb.beginTransaction(writeOptions, txn);
+ assertThat(txnReused).isSameAs(txn);
+ }
+ }
+ }
+
+ @Test
+ public void beginTransaction_withOld_transactionOptions()
+ throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final TransactionDBOptions txnDbOptions = new TransactionDBOptions();
+ final TransactionDB tdb = TransactionDB.open(options, txnDbOptions,
+ dbFolder.getRoot().getAbsolutePath());
+ final WriteOptions writeOptions = new WriteOptions();
+ final TransactionOptions txnOptions = new TransactionOptions()) {
+
+ try(final Transaction txn = tdb.beginTransaction(writeOptions)) {
+ final Transaction txnReused = tdb.beginTransaction(writeOptions,
+ txnOptions, txn);
+ assertThat(txnReused).isSameAs(txn);
+ }
+ }
+ }
+
+ @Test
+ public void lockStatusData() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final TransactionDBOptions txnDbOptions = new TransactionDBOptions();
+ final TransactionDB tdb = TransactionDB.open(options, txnDbOptions,
+ dbFolder.getRoot().getAbsolutePath());
+ final WriteOptions writeOptions = new WriteOptions();
+ final ReadOptions readOptions = new ReadOptions()) {
+
+ try (final Transaction txn = tdb.beginTransaction(writeOptions)) {
+
+ final byte key[] = "key".getBytes(UTF_8);
+ final byte value[] = "value".getBytes(UTF_8);
+
+ txn.put(key, value);
+ assertThat(txn.getForUpdate(readOptions, key, true)).isEqualTo(value);
+
+ final Map<Long, TransactionDB.KeyLockInfo> lockStatus =
+ tdb.getLockStatusData();
+
+ assertThat(lockStatus.size()).isEqualTo(1);
+ final Set<Map.Entry<Long, TransactionDB.KeyLockInfo>> entrySet = lockStatus.entrySet();
+ final Map.Entry<Long, TransactionDB.KeyLockInfo> entry = entrySet.iterator().next();
+ final long columnFamilyId = entry.getKey();
+ assertThat(columnFamilyId).isEqualTo(0);
+ final TransactionDB.KeyLockInfo keyLockInfo = entry.getValue();
+ assertThat(keyLockInfo.getKey()).isEqualTo(new String(key, UTF_8));
+ assertThat(keyLockInfo.getTransactionIDs().length).isEqualTo(1);
+ assertThat(keyLockInfo.getTransactionIDs()[0]).isEqualTo(txn.getId());
+ assertThat(keyLockInfo.isExclusive()).isTrue();
+ }
+ }
+ }
+
+ @Test
+ public void deadlockInfoBuffer() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final TransactionDBOptions txnDbOptions = new TransactionDBOptions();
+ final TransactionDB tdb = TransactionDB.open(options, txnDbOptions,
+ dbFolder.getRoot().getAbsolutePath())) {
+
+ // TODO(AR) can we cause a deadlock so that we can test the output here?
+ assertThat(tdb.getDeadlockInfoBuffer()).isEmpty();
+ }
+ }
+
+ @Test
+ public void setDeadlockInfoBufferSize() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final TransactionDBOptions txnDbOptions = new TransactionDBOptions();
+ final TransactionDB tdb = TransactionDB.open(options, txnDbOptions,
+ dbFolder.getRoot().getAbsolutePath())) {
+ tdb.setDeadlockInfoBufferSize(123);
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java
new file mode 100644
index 000000000..3c4dff7bb
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java
@@ -0,0 +1,139 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class TransactionLogIteratorTest {
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void transactionLogIterator() throws RocksDBException {
+ try (final Options options = new Options()
+ .setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath());
+ final TransactionLogIterator transactionLogIterator =
+ db.getUpdatesSince(0)) {
+ //no-op
+ }
+ }
+
+ @Test
+ public void getBatch() throws RocksDBException {
+ final int numberOfPuts = 5;
+ try (final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setWalTtlSeconds(1000)
+ .setWalSizeLimitMB(10);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+
+ for (int i = 0; i < numberOfPuts; i++) {
+ db.put(String.valueOf(i).getBytes(),
+ String.valueOf(i).getBytes());
+ }
+ db.flush(new FlushOptions().setWaitForFlush(true));
+
+ // the latest sequence number is 5 because 5 puts
+ // were written beforehand
+ assertThat(db.getLatestSequenceNumber()).
+ isEqualTo(numberOfPuts);
+
+ // insert 5 writes into a cf
+ try (final ColumnFamilyHandle cfHandle = db.createColumnFamily(
+ new ColumnFamilyDescriptor("new_cf".getBytes()))) {
+ for (int i = 0; i < numberOfPuts; i++) {
+ db.put(cfHandle, String.valueOf(i).getBytes(),
+ String.valueOf(i).getBytes());
+ }
+ // the latest sequence number is 10 because
+ // (5 + 5) puts were written beforehand
+ assertThat(db.getLatestSequenceNumber()).
+ isEqualTo(numberOfPuts + numberOfPuts);
+
+ // Get updates since the beginning
+ try (final TransactionLogIterator transactionLogIterator =
+ db.getUpdatesSince(0)) {
+ assertThat(transactionLogIterator.isValid()).isTrue();
+ transactionLogIterator.status();
+
+ // The first sequence number is 1
+ final TransactionLogIterator.BatchResult batchResult =
+ transactionLogIterator.getBatch();
+ assertThat(batchResult.sequenceNumber()).isEqualTo(1);
+ }
+ }
+ }
+ }
+
+ @Test
+ public void transactionLogIteratorStallAtLastRecord()
+ throws RocksDBException {
+ try (final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setWalTtlSeconds(1000)
+ .setWalSizeLimitMB(10);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+
+ db.put("key1".getBytes(), "value1".getBytes());
+ // Get updates since the beginning
+ try (final TransactionLogIterator transactionLogIterator =
+ db.getUpdatesSince(0)) {
+ transactionLogIterator.status();
+ assertThat(transactionLogIterator.isValid()).isTrue();
+ transactionLogIterator.next();
+ assertThat(transactionLogIterator.isValid()).isFalse();
+ transactionLogIterator.status();
+ db.put("key2".getBytes(), "value2".getBytes());
+ transactionLogIterator.next();
+ transactionLogIterator.status();
+ assertThat(transactionLogIterator.isValid()).isTrue();
+ }
+ }
+ }
+
+ @Test
+ public void transactionLogIteratorCheckAfterRestart()
+ throws RocksDBException {
+ final int numberOfKeys = 2;
+ try (final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setWalTtlSeconds(1000)
+ .setWalSizeLimitMB(10)) {
+
+ try (final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ db.put("key1".getBytes(), "value1".getBytes());
+ db.put("key2".getBytes(), "value2".getBytes());
+ db.flush(new FlushOptions().setWaitForFlush(true));
+
+ }
+
+ // reopen
+ try (final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ assertThat(db.getLatestSequenceNumber()).isEqualTo(numberOfKeys);
+
+ try (final TransactionLogIterator transactionLogIterator =
+ db.getUpdatesSince(0)) {
+ for (int i = 0; i < numberOfKeys; i++) {
+ transactionLogIterator.status();
+ assertThat(transactionLogIterator.isValid()).isTrue();
+ transactionLogIterator.next();
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/TransactionOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionOptionsTest.java
new file mode 100644
index 000000000..add0439e0
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionOptionsTest.java
@@ -0,0 +1,72 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Test;
+
+import java.util.Random;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class TransactionOptionsTest {
+
+ private static final Random rand = PlatformRandomHelper.
+ getPlatformSpecificRandomFactory();
+
+ @Test
+ public void snapshot() {
+ try (final TransactionOptions opt = new TransactionOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setSetSnapshot(boolValue);
+ assertThat(opt.isSetSnapshot()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void deadlockDetect() {
+ try (final TransactionOptions opt = new TransactionOptions()) {
+ final boolean boolValue = rand.nextBoolean();
+ opt.setDeadlockDetect(boolValue);
+ assertThat(opt.isDeadlockDetect()).isEqualTo(boolValue);
+ }
+ }
+
+ @Test
+ public void lockTimeout() {
+ try (final TransactionOptions opt = new TransactionOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setLockTimeout(longValue);
+ assertThat(opt.getLockTimeout()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void expiration() {
+ try (final TransactionOptions opt = new TransactionOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setExpiration(longValue);
+ assertThat(opt.getExpiration()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void deadlockDetectDepth() {
+ try (final TransactionOptions opt = new TransactionOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setDeadlockDetectDepth(longValue);
+ assertThat(opt.getDeadlockDetectDepth()).isEqualTo(longValue);
+ }
+ }
+
+ @Test
+ public void maxWriteBatchSize() {
+ try (final TransactionOptions opt = new TransactionOptions()) {
+ final long longValue = rand.nextLong();
+ opt.setMaxWriteBatchSize(longValue);
+ assertThat(opt.getMaxWriteBatchSize()).isEqualTo(longValue);
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/TransactionTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionTest.java
new file mode 100644
index 000000000..57a05c9e3
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionTest.java
@@ -0,0 +1,308 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.fail;
+
+public class TransactionTest extends AbstractTransactionTest {
+
+ @Test
+ public void getForUpdate_cf_conflict() throws RocksDBException {
+ final byte k1[] = "key1".getBytes(UTF_8);
+ final byte v1[] = "value1".getBytes(UTF_8);
+ final byte v12[] = "value12".getBytes(UTF_8);
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+
+ try(final Transaction txn = dbContainer.beginTransaction()) {
+ txn.put(testCf, k1, v1);
+ assertThat(txn.getForUpdate(readOptions, testCf, k1, true)).isEqualTo(v1);
+ txn.commit();
+ }
+
+ try(final Transaction txn2 = dbContainer.beginTransaction()) {
+ try(final Transaction txn3 = dbContainer.beginTransaction()) {
+ assertThat(txn3.getForUpdate(readOptions, testCf, k1, true)).isEqualTo(v1);
+
+ // NOTE: txn2 updates k1, during txn3
+ try {
+ txn2.put(testCf, k1, v12); // should cause an exception!
+ } catch(final RocksDBException e) {
+ assertThat(e.getStatus().getCode()).isSameAs(Status.Code.TimedOut);
+ return;
+ }
+ }
+ }
+
+ fail("Expected an exception for put after getForUpdate from conflicting" +
+ "transactions");
+ }
+ }
+
+ @Test
+ public void getForUpdate_conflict() throws RocksDBException {
+ final byte k1[] = "key1".getBytes(UTF_8);
+ final byte v1[] = "value1".getBytes(UTF_8);
+ final byte v12[] = "value12".getBytes(UTF_8);
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions()) {
+
+ try(final Transaction txn = dbContainer.beginTransaction()) {
+ txn.put(k1, v1);
+ assertThat(txn.getForUpdate(readOptions, k1, true)).isEqualTo(v1);
+ txn.commit();
+ }
+
+ try(final Transaction txn2 = dbContainer.beginTransaction()) {
+ try(final Transaction txn3 = dbContainer.beginTransaction()) {
+ assertThat(txn3.getForUpdate(readOptions, k1, true)).isEqualTo(v1);
+
+ // NOTE: txn2 updates k1, during txn3
+ try {
+ txn2.put(k1, v12); // should cause an exception!
+ } catch(final RocksDBException e) {
+ assertThat(e.getStatus().getCode()).isSameAs(Status.Code.TimedOut);
+ return;
+ }
+ }
+ }
+
+ fail("Expected an exception for put after getForUpdate from conflicting" +
+ "transactions");
+ }
+ }
+
+ @Test
+ public void multiGetForUpdate_cf_conflict() throws RocksDBException {
+ final byte keys[][] = new byte[][] {
+ "key1".getBytes(UTF_8),
+ "key2".getBytes(UTF_8)};
+ final byte values[][] = new byte[][] {
+ "value1".getBytes(UTF_8),
+ "value2".getBytes(UTF_8)};
+ final byte[] otherValue = "otherValue".getBytes(UTF_8);
+
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions()) {
+ final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily();
+ final List<ColumnFamilyHandle> cfList = Arrays.asList(testCf, testCf);
+
+ try(final Transaction txn = dbContainer.beginTransaction()) {
+ txn.put(testCf, keys[0], values[0]);
+ txn.put(testCf, keys[1], values[1]);
+ assertThat(txn.multiGet(readOptions, cfList, keys)).isEqualTo(values);
+ txn.commit();
+ }
+
+ try(final Transaction txn2 = dbContainer.beginTransaction()) {
+ try(final Transaction txn3 = dbContainer.beginTransaction()) {
+ assertThat(txn3.multiGetForUpdate(readOptions, cfList, keys))
+ .isEqualTo(values);
+
+ // NOTE: txn2 updates k1, during txn3
+ try {
+ txn2.put(testCf, keys[0], otherValue); // should cause an exception!
+ } catch(final RocksDBException e) {
+ assertThat(e.getStatus().getCode()).isSameAs(Status.Code.TimedOut);
+ return;
+ }
+ }
+ }
+
+ fail("Expected an exception for put after getForUpdate from conflicting" +
+ "transactions");
+ }
+ }
+
+ @Test
+ public void multiGetForUpdate_conflict() throws RocksDBException {
+ final byte keys[][] = new byte[][] {
+ "key1".getBytes(UTF_8),
+ "key2".getBytes(UTF_8)};
+ final byte values[][] = new byte[][] {
+ "value1".getBytes(UTF_8),
+ "value2".getBytes(UTF_8)};
+ final byte[] otherValue = "otherValue".getBytes(UTF_8);
+
+ try(final DBContainer dbContainer = startDb();
+ final ReadOptions readOptions = new ReadOptions()) {
+ try(final Transaction txn = dbContainer.beginTransaction()) {
+ txn.put(keys[0], values[0]);
+ txn.put(keys[1], values[1]);
+ assertThat(txn.multiGet(readOptions, keys)).isEqualTo(values);
+ txn.commit();
+ }
+
+ try(final Transaction txn2 = dbContainer.beginTransaction()) {
+ try(final Transaction txn3 = dbContainer.beginTransaction()) {
+ assertThat(txn3.multiGetForUpdate(readOptions, keys))
+ .isEqualTo(values);
+
+ // NOTE: txn2 updates k1, during txn3
+ try {
+ txn2.put(keys[0], otherValue); // should cause an exception!
+ } catch(final RocksDBException e) {
+ assertThat(e.getStatus().getCode()).isSameAs(Status.Code.TimedOut);
+ return;
+ }
+ }
+ }
+
+ fail("Expected an exception for put after getForUpdate from conflicting" +
+ "transactions");
+ }
+ }
+
+ @Test
+ public void name() throws RocksDBException {
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ assertThat(txn.getName()).isEmpty();
+ final String name = "my-transaction-" + rand.nextLong();
+ txn.setName(name);
+ assertThat(txn.getName()).isEqualTo(name);
+ }
+ }
+
+ @Test
+ public void ID() throws RocksDBException {
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ assertThat(txn.getID()).isGreaterThan(0);
+ }
+ }
+
+ @Test
+ public void deadlockDetect() throws RocksDBException {
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ assertThat(txn.isDeadlockDetect()).isFalse();
+ }
+ }
+
+ @Test
+ public void waitingTxns() throws RocksDBException {
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ assertThat(txn.getWaitingTxns().getTransactionIds().length).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void state() throws RocksDBException {
+ try(final DBContainer dbContainer = startDb()) {
+
+ try(final Transaction txn = dbContainer.beginTransaction()) {
+ assertThat(txn.getState())
+ .isSameAs(Transaction.TransactionState.STARTED);
+ txn.commit();
+ assertThat(txn.getState())
+ .isSameAs(Transaction.TransactionState.COMMITED);
+ }
+
+ try(final Transaction txn = dbContainer.beginTransaction()) {
+ assertThat(txn.getState())
+ .isSameAs(Transaction.TransactionState.STARTED);
+ txn.rollback();
+ assertThat(txn.getState())
+ .isSameAs(Transaction.TransactionState.STARTED);
+ }
+ }
+ }
+
+ @Test
+ public void Id() throws RocksDBException {
+ try(final DBContainer dbContainer = startDb();
+ final Transaction txn = dbContainer.beginTransaction()) {
+ assertThat(txn.getId()).isNotNull();
+ }
+ }
+
+ @Override
+ public TransactionDBContainer startDb() throws RocksDBException {
+ final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final TransactionDBOptions txnDbOptions = new TransactionDBOptions();
+ final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor(TXN_TEST_COLUMN_FAMILY,
+ columnFamilyOptions));
+ final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
+
+ final TransactionDB txnDb;
+ try {
+ txnDb = TransactionDB.open(options, txnDbOptions,
+ dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors,
+ columnFamilyHandles);
+ } catch(final RocksDBException e) {
+ columnFamilyOptions.close();
+ txnDbOptions.close();
+ options.close();
+ throw e;
+ }
+
+ final WriteOptions writeOptions = new WriteOptions();
+ final TransactionOptions txnOptions = new TransactionOptions();
+
+ return new TransactionDBContainer(txnOptions, writeOptions,
+ columnFamilyHandles, txnDb, txnDbOptions, columnFamilyOptions, options);
+ }
+
+ private static class TransactionDBContainer
+ extends DBContainer {
+ private final TransactionOptions txnOptions;
+ private final TransactionDB txnDb;
+ private final TransactionDBOptions txnDbOptions;
+
+ public TransactionDBContainer(
+ final TransactionOptions txnOptions, final WriteOptions writeOptions,
+ final List<ColumnFamilyHandle> columnFamilyHandles,
+ final TransactionDB txnDb, final TransactionDBOptions txnDbOptions,
+ final ColumnFamilyOptions columnFamilyOptions,
+ final DBOptions options) {
+ super(writeOptions, columnFamilyHandles, columnFamilyOptions,
+ options);
+ this.txnOptions = txnOptions;
+ this.txnDb = txnDb;
+ this.txnDbOptions = txnDbOptions;
+ }
+
+ @Override
+ public Transaction beginTransaction() {
+ return txnDb.beginTransaction(writeOptions, txnOptions);
+ }
+
+ @Override
+ public Transaction beginTransaction(final WriteOptions writeOptions) {
+ return txnDb.beginTransaction(writeOptions, txnOptions);
+ }
+
+ @Override
+ public void close() {
+ txnOptions.close();
+ writeOptions.close();
+ for(final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) {
+ columnFamilyHandle.close();
+ }
+ txnDb.close();
+ txnDbOptions.close();
+ options.close();
+ }
+ }
+
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/TtlDBTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/TtlDBTest.java
new file mode 100644
index 000000000..ffa15e768
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/TtlDBTest.java
@@ -0,0 +1,112 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class TtlDBTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void ttlDBOpen() throws RocksDBException, InterruptedException {
+ try (final Options options = new Options().setCreateIfMissing(true).setMaxCompactionBytes(0);
+ final TtlDB ttlDB = TtlDB.open(options, dbFolder.getRoot().getAbsolutePath())) {
+ ttlDB.put("key".getBytes(), "value".getBytes());
+ assertThat(ttlDB.get("key".getBytes())).
+ isEqualTo("value".getBytes());
+ assertThat(ttlDB.get("key".getBytes())).isNotNull();
+ }
+ }
+
+ @Test
+ public void ttlDBOpenWithTtl() throws RocksDBException, InterruptedException {
+ try (final Options options = new Options().setCreateIfMissing(true).setMaxCompactionBytes(0);
+ final TtlDB ttlDB = TtlDB.open(options, dbFolder.getRoot().getAbsolutePath(), 1, false);) {
+ ttlDB.put("key".getBytes(), "value".getBytes());
+ assertThat(ttlDB.get("key".getBytes())).
+ isEqualTo("value".getBytes());
+ TimeUnit.SECONDS.sleep(2);
+ ttlDB.compactRange();
+ assertThat(ttlDB.get("key".getBytes())).isNull();
+ }
+ }
+
+ @Test
+ public void ttlDbOpenWithColumnFamilies() throws RocksDBException,
+ InterruptedException {
+ final List<ColumnFamilyDescriptor> cfNames = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes())
+ );
+ final List<Integer> ttlValues = Arrays.asList(0, 1);
+
+ final List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
+ try (final DBOptions dbOptions = new DBOptions()
+ .setCreateMissingColumnFamilies(true)
+ .setCreateIfMissing(true);
+ final TtlDB ttlDB = TtlDB.open(dbOptions,
+ dbFolder.getRoot().getAbsolutePath(), cfNames,
+ columnFamilyHandleList, ttlValues, false)) {
+ try {
+ ttlDB.put("key".getBytes(), "value".getBytes());
+ assertThat(ttlDB.get("key".getBytes())).
+ isEqualTo("value".getBytes());
+ ttlDB.put(columnFamilyHandleList.get(1), "key".getBytes(),
+ "value".getBytes());
+ assertThat(ttlDB.get(columnFamilyHandleList.get(1),
+ "key".getBytes())).isEqualTo("value".getBytes());
+ TimeUnit.SECONDS.sleep(2);
+
+ ttlDB.compactRange();
+ ttlDB.compactRange(columnFamilyHandleList.get(1));
+
+ assertThat(ttlDB.get("key".getBytes())).isNotNull();
+ assertThat(ttlDB.get(columnFamilyHandleList.get(1),
+ "key".getBytes())).isNull();
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle :
+ columnFamilyHandleList) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void createTtlColumnFamily() throws RocksDBException,
+ InterruptedException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final TtlDB ttlDB = TtlDB.open(options,
+ dbFolder.getRoot().getAbsolutePath());
+ final ColumnFamilyHandle columnFamilyHandle =
+ ttlDB.createColumnFamilyWithTtl(
+ new ColumnFamilyDescriptor("new_cf".getBytes()), 1)) {
+ ttlDB.put(columnFamilyHandle, "key".getBytes(),
+ "value".getBytes());
+ assertThat(ttlDB.get(columnFamilyHandle, "key".getBytes())).
+ isEqualTo("value".getBytes());
+ TimeUnit.SECONDS.sleep(2);
+ ttlDB.compactRange(columnFamilyHandle);
+ assertThat(ttlDB.get(columnFamilyHandle, "key".getBytes())).isNull();
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/Types.java b/src/rocksdb/java/src/test/java/org/rocksdb/Types.java
new file mode 100644
index 000000000..c3c1de833
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/Types.java
@@ -0,0 +1,43 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Simple type conversion methods
+ * for use in tests
+ */
+public class Types {
+
+ /**
+ * Convert first 4 bytes of a byte array to an int
+ *
+ * @param data The byte array
+ *
+ * @return An integer
+ */
+ public static int byteToInt(final byte data[]) {
+ return (data[0] & 0xff) |
+ ((data[1] & 0xff) << 8) |
+ ((data[2] & 0xff) << 16) |
+ ((data[3] & 0xff) << 24);
+ }
+
+ /**
+ * Convert an int to 4 bytes
+ *
+ * @param v The int
+ *
+ * @return A byte array containing 4 bytes
+ */
+ public static byte[] intToByte(final int v) {
+ return new byte[] {
+ (byte)((v >>> 0) & 0xff),
+ (byte)((v >>> 8) & 0xff),
+ (byte)((v >>> 16) & 0xff),
+ (byte)((v >>> 24) & 0xff)
+ };
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java
new file mode 100644
index 000000000..2a0133f6b
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java
@@ -0,0 +1,22 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+
+public class WALRecoveryModeTest {
+
+ @Test
+ public void getWALRecoveryMode() {
+ for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) {
+ assertThat(WALRecoveryMode.getWALRecoveryMode(walRecoveryMode.getValue()))
+ .isEqualTo(walRecoveryMode);
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/WalFilterTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/WalFilterTest.java
new file mode 100644
index 000000000..adeb959d1
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/WalFilterTest.java
@@ -0,0 +1,165 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.rocksdb.util.ByteUtil.bytes;
+import static org.rocksdb.util.TestUtil.*;
+
+public class WalFilterTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void walFilter() throws RocksDBException {
+ // Create 3 batches with two keys each
+ final byte[][][] batchKeys = {
+ new byte[][] {
+ bytes("key1"),
+ bytes("key2")
+ },
+ new byte[][] {
+ bytes("key3"),
+ bytes("key4")
+ },
+ new byte[][] {
+ bytes("key5"),
+ bytes("key6")
+ }
+
+ };
+
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor(bytes("pikachu"))
+ );
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+
+ // Test with all WAL processing options
+ for (final WalProcessingOption option : WalProcessingOption.values()) {
+ try (final Options options = optionsForLogIterTest();
+ final DBOptions dbOptions = new DBOptions(options)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(dbOptions,
+ dbFolder.getRoot().getAbsolutePath(),
+ cfDescriptors, cfHandles)) {
+ try (final WriteOptions writeOptions = new WriteOptions()) {
+ // Write given keys in given batches
+ for (int i = 0; i < batchKeys.length; i++) {
+ final WriteBatch batch = new WriteBatch();
+ for (int j = 0; j < batchKeys[i].length; j++) {
+ batch.put(cfHandles.get(0), batchKeys[i][j], dummyString(1024));
+ }
+ db.write(writeOptions, batch);
+ }
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ cfHandles.clear();
+ }
+ }
+
+ // Create a test filter that would apply wal_processing_option at the first
+ // record
+ final int applyOptionForRecordIndex = 1;
+ try (final TestableWalFilter walFilter =
+ new TestableWalFilter(option, applyOptionForRecordIndex)) {
+
+ try (final Options options = optionsForLogIterTest();
+ final DBOptions dbOptions = new DBOptions(options)
+ .setWalFilter(walFilter)) {
+
+ try (final RocksDB db = RocksDB.open(dbOptions,
+ dbFolder.getRoot().getAbsolutePath(),
+ cfDescriptors, cfHandles)) {
+
+ try {
+ assertThat(walFilter.logNumbers).isNotEmpty();
+ assertThat(walFilter.logFileNames).isNotEmpty();
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ cfHandles.clear();
+ }
+ } catch (final RocksDBException e) {
+ if (option != WalProcessingOption.CORRUPTED_RECORD) {
+ // exception is expected when CORRUPTED_RECORD!
+ throw e;
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+ private static class TestableWalFilter extends AbstractWalFilter {
+ private final WalProcessingOption walProcessingOption;
+ private final int applyOptionForRecordIndex;
+ Map<Integer, Long> cfLognumber;
+ Map<String, Integer> cfNameId;
+ final List<Long> logNumbers = new ArrayList<>();
+ final List<String> logFileNames = new ArrayList<>();
+ private int currentRecordIndex = 0;
+
+ public TestableWalFilter(final WalProcessingOption walProcessingOption,
+ final int applyOptionForRecordIndex) {
+ super();
+ this.walProcessingOption = walProcessingOption;
+ this.applyOptionForRecordIndex = applyOptionForRecordIndex;
+ }
+
+ @Override
+ public void columnFamilyLogNumberMap(final Map<Integer, Long> cfLognumber,
+ final Map<String, Integer> cfNameId) {
+ this.cfLognumber = cfLognumber;
+ this.cfNameId = cfNameId;
+ }
+
+ @Override
+ public LogRecordFoundResult logRecordFound(
+ final long logNumber, final String logFileName, final WriteBatch batch,
+ final WriteBatch newBatch) {
+
+ logNumbers.add(logNumber);
+ logFileNames.add(logFileName);
+
+ final WalProcessingOption optionToReturn;
+ if (currentRecordIndex == applyOptionForRecordIndex) {
+ optionToReturn = walProcessingOption;
+ }
+ else {
+ optionToReturn = WalProcessingOption.CONTINUE_PROCESSING;
+ }
+
+ currentRecordIndex++;
+
+ return new LogRecordFoundResult(optionToReturn, false);
+ }
+
+ @Override
+ public String name() {
+ return "testable-wal-filter";
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java
new file mode 100644
index 000000000..2826b128f
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java
@@ -0,0 +1,76 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.Arrays;
+import java.util.List;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.rocksdb.util.CapturingWriteBatchHandler;
+import org.rocksdb.util.CapturingWriteBatchHandler.Event;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.rocksdb.util.CapturingWriteBatchHandler.Action.*;
+
+
+public class WriteBatchHandlerTest {
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Test
+ public void writeBatchHandler() throws RocksDBException {
+ // setup test data
+ final List<Event> testEvents = Arrays.asList(
+ new Event(DELETE, "k0".getBytes(), null),
+ new Event(PUT, "k1".getBytes(), "v1".getBytes()),
+ new Event(PUT, "k2".getBytes(), "v2".getBytes()),
+ new Event(PUT, "k3".getBytes(), "v3".getBytes()),
+ new Event(LOG, null, "log1".getBytes()),
+ new Event(MERGE, "k2".getBytes(), "v22".getBytes()),
+ new Event(DELETE, "k3".getBytes(), null)
+ );
+
+ // load test data to the write batch
+ try (final WriteBatch batch = new WriteBatch()) {
+ for (final Event testEvent : testEvents) {
+ switch (testEvent.action) {
+
+ case PUT:
+ batch.put(testEvent.key, testEvent.value);
+ break;
+
+ case MERGE:
+ batch.merge(testEvent.key, testEvent.value);
+ break;
+
+ case DELETE:
+ batch.delete(testEvent.key);
+ break;
+
+ case LOG:
+ batch.putLogData(testEvent.value);
+ break;
+ }
+ }
+
+ // attempt to read test data back from the WriteBatch by iterating
+ // with a handler
+ try (final CapturingWriteBatchHandler handler =
+ new CapturingWriteBatchHandler()) {
+ batch.iterate(handler);
+
+ // compare the results to the test data
+ final List<Event> actualEvents =
+ handler.getEvents();
+ assertThat(testEvents.size()).isSameAs(actualEvents.size());
+
+ assertThat(testEvents).isEqualTo(actualEvents);
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchTest.java
new file mode 100644
index 000000000..f915c7dcb
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchTest.java
@@ -0,0 +1,528 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+package org.rocksdb;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.rocksdb.util.CapturingWriteBatchHandler.Action.DELETE;
+import static org.rocksdb.util.CapturingWriteBatchHandler.Action.DELETE_RANGE;
+import static org.rocksdb.util.CapturingWriteBatchHandler.Action.LOG;
+import static org.rocksdb.util.CapturingWriteBatchHandler.Action.MERGE;
+import static org.rocksdb.util.CapturingWriteBatchHandler.Action.PUT;
+import static org.rocksdb.util.CapturingWriteBatchHandler.Action.SINGLE_DELETE;
+
+import java.io.UnsupportedEncodingException;
+import java.nio.ByteBuffer;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.rocksdb.util.CapturingWriteBatchHandler;
+import org.rocksdb.util.CapturingWriteBatchHandler.Event;
+import org.rocksdb.util.WriteBatchGetter;
+
+/**
+ * This class mimics the db/write_batch_test.cc
+ * in the c++ rocksdb library.
+ */
+public class WriteBatchTest {
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void emptyWriteBatch() {
+ try (final WriteBatch batch = new WriteBatch()) {
+ assertThat(batch.count()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void multipleBatchOperations()
+ throws RocksDBException {
+
+ final byte[] foo = "foo".getBytes(UTF_8);
+ final byte[] bar = "bar".getBytes(UTF_8);
+ final byte[] box = "box".getBytes(UTF_8);
+ final byte[] baz = "baz".getBytes(UTF_8);
+ final byte[] boo = "boo".getBytes(UTF_8);
+ final byte[] hoo = "hoo".getBytes(UTF_8);
+ final byte[] hello = "hello".getBytes(UTF_8);
+
+ try (final WriteBatch batch = new WriteBatch()) {
+ batch.put(foo, bar);
+ batch.delete(box);
+ batch.put(baz, boo);
+ batch.merge(baz, hoo);
+ batch.singleDelete(foo);
+ batch.deleteRange(baz, foo);
+ batch.putLogData(hello);
+
+ try(final CapturingWriteBatchHandler handler =
+ new CapturingWriteBatchHandler()) {
+ batch.iterate(handler);
+
+ assertThat(handler.getEvents().size()).isEqualTo(7);
+
+ assertThat(handler.getEvents().get(0)).isEqualTo(new Event(PUT, foo, bar));
+ assertThat(handler.getEvents().get(1)).isEqualTo(new Event(DELETE, box, null));
+ assertThat(handler.getEvents().get(2)).isEqualTo(new Event(PUT, baz, boo));
+ assertThat(handler.getEvents().get(3)).isEqualTo(new Event(MERGE, baz, hoo));
+ assertThat(handler.getEvents().get(4)).isEqualTo(new Event(SINGLE_DELETE, foo, null));
+ assertThat(handler.getEvents().get(5)).isEqualTo(new Event(DELETE_RANGE, baz, foo));
+ assertThat(handler.getEvents().get(6)).isEqualTo(new Event(LOG, null, hello));
+ }
+ }
+ }
+
+ @Test
+ public void multipleBatchOperationsDirect()
+ throws UnsupportedEncodingException, RocksDBException {
+ try (WriteBatch batch = new WriteBatch()) {
+ ByteBuffer key = ByteBuffer.allocateDirect(16);
+ ByteBuffer value = ByteBuffer.allocateDirect(16);
+ key.put("foo".getBytes("US-ASCII")).flip();
+ value.put("bar".getBytes("US-ASCII")).flip();
+ batch.put(key, value);
+ assertThat(key.position()).isEqualTo(3);
+ assertThat(key.limit()).isEqualTo(3);
+ assertThat(value.position()).isEqualTo(3);
+ assertThat(value.limit()).isEqualTo(3);
+
+ key.clear();
+ key.put("box".getBytes("US-ASCII")).flip();
+ batch.remove(key);
+ assertThat(key.position()).isEqualTo(3);
+ assertThat(key.limit()).isEqualTo(3);
+
+ batch.put("baz".getBytes("US-ASCII"), "boo".getBytes("US-ASCII"));
+
+ WriteBatchTestInternalHelper.setSequence(batch, 100);
+ assertThat(WriteBatchTestInternalHelper.sequence(batch)).isNotNull().isEqualTo(100);
+ assertThat(batch.count()).isEqualTo(3);
+ assertThat(new String(getContents(batch), "US-ASCII"))
+ .isEqualTo("Put(baz, boo)@102"
+ + "Delete(box)@101"
+ + "Put(foo, bar)@100");
+ }
+ }
+
+ @Test
+ public void testAppendOperation()
+ throws RocksDBException {
+ try (final WriteBatch b1 = new WriteBatch();
+ final WriteBatch b2 = new WriteBatch()) {
+ WriteBatchTestInternalHelper.setSequence(b1, 200);
+ WriteBatchTestInternalHelper.setSequence(b2, 300);
+ WriteBatchTestInternalHelper.append(b1, b2);
+ assertThat(getContents(b1).length).isEqualTo(0);
+ assertThat(b1.count()).isEqualTo(0);
+ b2.put("a".getBytes(UTF_8), "va".getBytes(UTF_8));
+ WriteBatchTestInternalHelper.append(b1, b2);
+ assertThat("Put(a, va)@200".equals(new String(getContents(b1),
+ UTF_8)));
+ assertThat(b1.count()).isEqualTo(1);
+ b2.clear();
+ b2.put("b".getBytes(UTF_8), "vb".getBytes(UTF_8));
+ WriteBatchTestInternalHelper.append(b1, b2);
+ assertThat(("Put(a, va)@200" +
+ "Put(b, vb)@201")
+ .equals(new String(getContents(b1), UTF_8)));
+ assertThat(b1.count()).isEqualTo(2);
+ b2.delete("foo".getBytes(UTF_8));
+ WriteBatchTestInternalHelper.append(b1, b2);
+ assertThat(("Put(a, va)@200" +
+ "Put(b, vb)@202" +
+ "Put(b, vb)@201" +
+ "Delete(foo)@203")
+ .equals(new String(getContents(b1), UTF_8)));
+ assertThat(b1.count()).isEqualTo(4);
+ }
+ }
+
+ @Test
+ public void blobOperation()
+ throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+ batch.put("k1".getBytes(UTF_8), "v1".getBytes(UTF_8));
+ batch.put("k2".getBytes(UTF_8), "v2".getBytes(UTF_8));
+ batch.put("k3".getBytes(UTF_8), "v3".getBytes(UTF_8));
+ batch.putLogData("blob1".getBytes(UTF_8));
+ batch.delete("k2".getBytes(UTF_8));
+ batch.putLogData("blob2".getBytes(UTF_8));
+ batch.merge("foo".getBytes(UTF_8), "bar".getBytes(UTF_8));
+ assertThat(batch.count()).isEqualTo(5);
+ assertThat(("Merge(foo, bar)@4" +
+ "Put(k1, v1)@0" +
+ "Delete(k2)@3" +
+ "Put(k2, v2)@1" +
+ "Put(k3, v3)@2")
+ .equals(new String(getContents(batch), UTF_8)));
+ }
+ }
+
+ @Test
+ public void savePoints()
+ throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+ batch.put("k1".getBytes(UTF_8), "v1".getBytes(UTF_8));
+ batch.put("k2".getBytes(UTF_8), "v2".getBytes(UTF_8));
+ batch.put("k3".getBytes(UTF_8), "v3".getBytes(UTF_8));
+
+ assertThat(getFromWriteBatch(batch, "k1")).isEqualTo("v1");
+ assertThat(getFromWriteBatch(batch, "k2")).isEqualTo("v2");
+ assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3");
+
+ batch.setSavePoint();
+
+ batch.delete("k2".getBytes(UTF_8));
+ batch.put("k3".getBytes(UTF_8), "v3-2".getBytes(UTF_8));
+
+ assertThat(getFromWriteBatch(batch, "k2")).isNull();
+ assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3-2");
+
+
+ batch.setSavePoint();
+
+ batch.put("k3".getBytes(UTF_8), "v3-3".getBytes(UTF_8));
+ batch.put("k4".getBytes(UTF_8), "v4".getBytes(UTF_8));
+
+ assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3-3");
+ assertThat(getFromWriteBatch(batch, "k4")).isEqualTo("v4");
+
+
+ batch.rollbackToSavePoint();
+
+ assertThat(getFromWriteBatch(batch, "k2")).isNull();
+ assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3-2");
+ assertThat(getFromWriteBatch(batch, "k4")).isNull();
+
+
+ batch.rollbackToSavePoint();
+
+ assertThat(getFromWriteBatch(batch, "k1")).isEqualTo("v1");
+ assertThat(getFromWriteBatch(batch, "k2")).isEqualTo("v2");
+ assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3");
+ assertThat(getFromWriteBatch(batch, "k4")).isNull();
+ }
+ }
+
+ @Test
+ public void deleteRange() throws RocksDBException {
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
+ final WriteBatch batch = new WriteBatch();
+ final WriteOptions wOpt = new WriteOptions()) {
+ db.put("key1".getBytes(), "value".getBytes());
+ db.put("key2".getBytes(), "12345678".getBytes());
+ db.put("key3".getBytes(), "abcdefg".getBytes());
+ db.put("key4".getBytes(), "xyz".getBytes());
+ assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes());
+ assertThat(db.get("key2".getBytes())).isEqualTo("12345678".getBytes());
+ assertThat(db.get("key3".getBytes())).isEqualTo("abcdefg".getBytes());
+ assertThat(db.get("key4".getBytes())).isEqualTo("xyz".getBytes());
+
+ batch.deleteRange("key2".getBytes(), "key4".getBytes());
+ db.write(wOpt, batch);
+
+ assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes());
+ assertThat(db.get("key2".getBytes())).isNull();
+ assertThat(db.get("key3".getBytes())).isNull();
+ assertThat(db.get("key4".getBytes())).isEqualTo("xyz".getBytes());
+ }
+ }
+
+ @Test
+ public void restorePoints() throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+
+ batch.put("k1".getBytes(), "v1".getBytes());
+ batch.put("k2".getBytes(), "v2".getBytes());
+
+ batch.setSavePoint();
+
+ batch.put("k1".getBytes(), "123456789".getBytes());
+ batch.delete("k2".getBytes());
+
+ batch.rollbackToSavePoint();
+
+ try(final CapturingWriteBatchHandler handler = new CapturingWriteBatchHandler()) {
+ batch.iterate(handler);
+
+ assertThat(handler.getEvents().size()).isEqualTo(2);
+ assertThat(handler.getEvents().get(0)).isEqualTo(new Event(PUT, "k1".getBytes(), "v1".getBytes()));
+ assertThat(handler.getEvents().get(1)).isEqualTo(new Event(PUT, "k2".getBytes(), "v2".getBytes()));
+ }
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void restorePoints_withoutSavePoints() throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+ batch.rollbackToSavePoint();
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void restorePoints_withoutSavePoints_nested() throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+
+ batch.setSavePoint();
+ batch.rollbackToSavePoint();
+
+ // without previous corresponding setSavePoint
+ batch.rollbackToSavePoint();
+ }
+ }
+
+ @Test
+ public void popSavePoint() throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+
+ batch.put("k1".getBytes(), "v1".getBytes());
+ batch.put("k2".getBytes(), "v2".getBytes());
+
+ batch.setSavePoint();
+
+ batch.put("k1".getBytes(), "123456789".getBytes());
+ batch.delete("k2".getBytes());
+
+ batch.setSavePoint();
+
+ batch.popSavePoint();
+
+ batch.rollbackToSavePoint();
+
+ try(final CapturingWriteBatchHandler handler = new CapturingWriteBatchHandler()) {
+ batch.iterate(handler);
+
+ assertThat(handler.getEvents().size()).isEqualTo(2);
+ assertThat(handler.getEvents().get(0)).isEqualTo(new Event(PUT, "k1".getBytes(), "v1".getBytes()));
+ assertThat(handler.getEvents().get(1)).isEqualTo(new Event(PUT, "k2".getBytes(), "v2".getBytes()));
+ }
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void popSavePoint_withoutSavePoints() throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+ batch.popSavePoint();
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void popSavePoint_withoutSavePoints_nested() throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+
+ batch.setSavePoint();
+ batch.popSavePoint();
+
+ // without previous corresponding setSavePoint
+ batch.popSavePoint();
+ }
+ }
+
+ @Test
+ public void maxBytes() throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+ batch.setMaxBytes(19);
+
+ batch.put("k1".getBytes(), "v1".getBytes());
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void maxBytes_over() throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+ batch.setMaxBytes(1);
+
+ batch.put("k1".getBytes(), "v1".getBytes());
+ }
+ }
+
+ @Test
+ public void data() throws RocksDBException {
+ try (final WriteBatch batch1 = new WriteBatch()) {
+ batch1.delete("k0".getBytes());
+ batch1.put("k1".getBytes(), "v1".getBytes());
+ batch1.put("k2".getBytes(), "v2".getBytes());
+ batch1.put("k3".getBytes(), "v3".getBytes());
+ batch1.putLogData("log1".getBytes());
+ batch1.merge("k2".getBytes(), "v22".getBytes());
+ batch1.delete("k3".getBytes());
+
+ final byte[] serialized = batch1.data();
+
+ try(final WriteBatch batch2 = new WriteBatch(serialized)) {
+ assertThat(batch2.count()).isEqualTo(batch1.count());
+
+ try(final CapturingWriteBatchHandler handler1 = new CapturingWriteBatchHandler()) {
+ batch1.iterate(handler1);
+
+ try (final CapturingWriteBatchHandler handler2 = new CapturingWriteBatchHandler()) {
+ batch2.iterate(handler2);
+
+ assertThat(handler1.getEvents().equals(handler2.getEvents())).isTrue();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void dataSize() throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+ batch.put("k1".getBytes(), "v1".getBytes());
+
+ assertThat(batch.getDataSize()).isEqualTo(19);
+ }
+ }
+
+ @Test
+ public void hasPut() throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+ assertThat(batch.hasPut()).isFalse();
+
+ batch.put("k1".getBytes(), "v1".getBytes());
+
+ assertThat(batch.hasPut()).isTrue();
+ }
+ }
+
+ @Test
+ public void hasDelete() throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+ assertThat(batch.hasDelete()).isFalse();
+
+ batch.delete("k1".getBytes());
+
+ assertThat(batch.hasDelete()).isTrue();
+ }
+ }
+
+ @Test
+ public void hasSingleDelete() throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+ assertThat(batch.hasSingleDelete()).isFalse();
+
+ batch.singleDelete("k1".getBytes());
+
+ assertThat(batch.hasSingleDelete()).isTrue();
+ }
+ }
+
+ @Test
+ public void hasDeleteRange() throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+ assertThat(batch.hasDeleteRange()).isFalse();
+
+ batch.deleteRange("k1".getBytes(), "k2".getBytes());
+
+ assertThat(batch.hasDeleteRange()).isTrue();
+ }
+ }
+
+ @Test
+ public void hasBeginPrepareRange() throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+ assertThat(batch.hasBeginPrepare()).isFalse();
+ }
+ }
+
+ @Test
+ public void hasEndPrepareRange() throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+ assertThat(batch.hasEndPrepare()).isFalse();
+ }
+ }
+
+ @Test
+ public void hasCommit() throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+ assertThat(batch.hasCommit()).isFalse();
+ }
+ }
+
+ @Test
+ public void hasRollback() throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+ assertThat(batch.hasRollback()).isFalse();
+ }
+ }
+
+ @Test
+ public void walTerminationPoint() throws RocksDBException {
+ try (final WriteBatch batch = new WriteBatch()) {
+ WriteBatch.SavePoint walTerminationPoint = batch.getWalTerminationPoint();
+ assertThat(walTerminationPoint.isCleared()).isTrue();
+
+ batch.put("k1".getBytes(UTF_8), "v1".getBytes(UTF_8));
+
+ batch.markWalTerminationPoint();
+
+ walTerminationPoint = batch.getWalTerminationPoint();
+ assertThat(walTerminationPoint.getSize()).isEqualTo(19);
+ assertThat(walTerminationPoint.getCount()).isEqualTo(1);
+ assertThat(walTerminationPoint.getContentFlags()).isEqualTo(2);
+ }
+ }
+
+ @Test
+ public void getWriteBatch() {
+ try (final WriteBatch batch = new WriteBatch()) {
+ assertThat(batch.getWriteBatch()).isEqualTo(batch);
+ }
+ }
+
+ static byte[] getContents(final WriteBatch wb) {
+ return getContents(wb.nativeHandle_);
+ }
+
+ static String getFromWriteBatch(final WriteBatch wb, final String key)
+ throws RocksDBException {
+ final WriteBatchGetter getter =
+ new WriteBatchGetter(key.getBytes(UTF_8));
+ wb.iterate(getter);
+ if(getter.getValue() != null) {
+ return new String(getter.getValue(), UTF_8);
+ } else {
+ return null;
+ }
+ }
+
+ private static native byte[] getContents(final long writeBatchHandle);
+}
+
+/**
+ * Package-private class which provides java api to access
+ * c++ WriteBatchInternal.
+ */
+class WriteBatchTestInternalHelper {
+ static void setSequence(final WriteBatch wb, final long sn) {
+ setSequence(wb.nativeHandle_, sn);
+ }
+
+ static long sequence(final WriteBatch wb) {
+ return sequence(wb.nativeHandle_);
+ }
+
+ static void append(final WriteBatch wb1, final WriteBatch wb2) {
+ append(wb1.nativeHandle_, wb2.nativeHandle_);
+ }
+
+ private static native void setSequence(final long writeBatchHandle,
+ final long sn);
+
+ private static native long sequence(final long writeBatchHandle);
+
+ private static native void append(final long writeBatchHandle1,
+ final long writeBatchHandle2);
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java
new file mode 100644
index 000000000..c5090dbce
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java
@@ -0,0 +1,104 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Parameterized.Parameters;
+
+import java.nio.ByteBuffer;
+import java.util.*;
+import java.util.concurrent.*;
+
+@RunWith(Parameterized.class)
+public class WriteBatchThreadedTest {
+
+ @Parameters(name = "WriteBatchThreadedTest(threadCount={0})")
+ public static Iterable<Integer> data() {
+ return Arrays.asList(new Integer[]{1, 10, 50, 100});
+ }
+
+ @Parameter
+ public int threadCount;
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ RocksDB db;
+
+ @Before
+ public void setUp() throws Exception {
+ RocksDB.loadLibrary();
+ final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setIncreaseParallelism(32);
+ db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());
+ assert (db != null);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ if (db != null) {
+ db.close();
+ }
+ }
+
+ @Test
+ public void threadedWrites() throws InterruptedException, ExecutionException {
+ final List<Callable<Void>> callables = new ArrayList<>();
+ for (int i = 0; i < 100; i++) {
+ final int offset = i * 100;
+ callables.add(new Callable<Void>() {
+ @Override
+ public Void call() throws RocksDBException {
+ try (final WriteBatch wb = new WriteBatch();
+ final WriteOptions w_opt = new WriteOptions()) {
+ for (int i = offset; i < offset + 100; i++) {
+ wb.put(ByteBuffer.allocate(4).putInt(i).array(), "parallel rocks test".getBytes());
+ }
+ db.write(w_opt, wb);
+ }
+ return null;
+ }
+ });
+ }
+
+ //submit the callables
+ final ExecutorService executorService =
+ Executors.newFixedThreadPool(threadCount);
+ try {
+ final ExecutorCompletionService<Void> completionService =
+ new ExecutorCompletionService<>(executorService);
+ final Set<Future<Void>> futures = new HashSet<>();
+ for (final Callable<Void> callable : callables) {
+ futures.add(completionService.submit(callable));
+ }
+
+ while (futures.size() > 0) {
+ final Future<Void> future = completionService.take();
+ futures.remove(future);
+
+ try {
+ future.get();
+ } catch (final ExecutionException e) {
+ for (final Future<Void> f : futures) {
+ f.cancel(true);
+ }
+
+ throw e;
+ }
+ }
+ } finally {
+ executorService.shutdown();
+ executorService.awaitTermination(10, TimeUnit.SECONDS);
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java
new file mode 100644
index 000000000..01eb652f1
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java
@@ -0,0 +1,566 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+package org.rocksdb;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+public class WriteBatchWithIndexTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void readYourOwnWrites() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+
+ final byte[] k1 = "key1".getBytes();
+ final byte[] v1 = "value1".getBytes();
+ final byte[] k2 = "key2".getBytes();
+ final byte[] v2 = "value2".getBytes();
+
+ db.put(k1, v1);
+ db.put(k2, v2);
+
+ try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true);
+ final RocksIterator base = db.newIterator();
+ final RocksIterator it = wbwi.newIteratorWithBase(base)) {
+ it.seek(k1);
+ assertThat(it.isValid()).isTrue();
+ assertThat(it.key()).isEqualTo(k1);
+ assertThat(it.value()).isEqualTo(v1);
+
+ it.seek(k2);
+ assertThat(it.isValid()).isTrue();
+ assertThat(it.key()).isEqualTo(k2);
+ assertThat(it.value()).isEqualTo(v2);
+
+ //put data to the write batch and make sure we can read it.
+ final byte[] k3 = "key3".getBytes();
+ final byte[] v3 = "value3".getBytes();
+ wbwi.put(k3, v3);
+ it.seek(k3);
+ assertThat(it.isValid()).isTrue();
+ assertThat(it.key()).isEqualTo(k3);
+ assertThat(it.value()).isEqualTo(v3);
+
+ //update k2 in the write batch and check the value
+ final byte[] v2Other = "otherValue2".getBytes();
+ wbwi.put(k2, v2Other);
+ it.seek(k2);
+ assertThat(it.isValid()).isTrue();
+ assertThat(it.key()).isEqualTo(k2);
+ assertThat(it.value()).isEqualTo(v2Other);
+
+ //delete k1 and make sure we can read back the write
+ wbwi.delete(k1);
+ it.seek(k1);
+ assertThat(it.key()).isNotEqualTo(k1);
+
+ //reinsert k1 and make sure we see the new value
+ final byte[] v1Other = "otherValue1".getBytes();
+ wbwi.put(k1, v1Other);
+ it.seek(k1);
+ assertThat(it.isValid()).isTrue();
+ assertThat(it.key()).isEqualTo(k1);
+ assertThat(it.value()).isEqualTo(v1Other);
+
+ //single remove k3 and make sure we can read back the write
+ wbwi.singleDelete(k3);
+ it.seek(k3);
+ assertThat(it.isValid()).isEqualTo(false);
+
+ //reinsert k3 and make sure we see the new value
+ final byte[] v3Other = "otherValue3".getBytes();
+ wbwi.put(k3, v3Other);
+ it.seek(k3);
+ assertThat(it.isValid()).isTrue();
+ assertThat(it.key()).isEqualTo(k3);
+ assertThat(it.value()).isEqualTo(v3Other);
+ }
+ }
+ }
+
+ @Test
+ public void writeBatchWithIndex() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+
+ final byte[] k1 = "key1".getBytes();
+ final byte[] v1 = "value1".getBytes();
+ final byte[] k2 = "key2".getBytes();
+ final byte[] v2 = "value2".getBytes();
+
+ try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex();
+ final WriteOptions wOpt = new WriteOptions()) {
+ wbwi.put(k1, v1);
+ wbwi.put(k2, v2);
+
+ db.write(wOpt, wbwi);
+ }
+
+ assertThat(db.get(k1)).isEqualTo(v1);
+ assertThat(db.get(k2)).isEqualTo(v2);
+ }
+ }
+
+ @Test
+ public void write_writeBatchWithIndexDirect() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) {
+ ByteBuffer k1 = ByteBuffer.allocateDirect(16);
+ ByteBuffer v1 = ByteBuffer.allocateDirect(16);
+ ByteBuffer k2 = ByteBuffer.allocateDirect(16);
+ ByteBuffer v2 = ByteBuffer.allocateDirect(16);
+ k1.put("key1".getBytes()).flip();
+ v1.put("value1".getBytes()).flip();
+ k2.put("key2".getBytes()).flip();
+ v2.put("value2".getBytes()).flip();
+
+ try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) {
+ wbwi.put(k1, v1);
+ assertThat(k1.position()).isEqualTo(4);
+ assertThat(k1.limit()).isEqualTo(4);
+ assertThat(v1.position()).isEqualTo(6);
+ assertThat(v1.limit()).isEqualTo(6);
+
+ wbwi.put(k2, v2);
+
+ db.write(new WriteOptions(), wbwi);
+ }
+
+ assertThat(db.get("key1".getBytes())).isEqualTo("value1".getBytes());
+ assertThat(db.get("key2".getBytes())).isEqualTo("value2".getBytes());
+ }
+ }
+
+ @Test
+ public void iterator() throws RocksDBException {
+ try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true)) {
+
+ final String k1 = "key1";
+ final String v1 = "value1";
+ final String k2 = "key2";
+ final String v2 = "value2";
+ final String k3 = "key3";
+ final String v3 = "value3";
+ final String k4 = "key4";
+ final String k5 = "key5";
+ final String k6 = "key6";
+ final String k7 = "key7";
+ final String v8 = "value8";
+ final byte[] k1b = k1.getBytes(UTF_8);
+ final byte[] v1b = v1.getBytes(UTF_8);
+ final byte[] k2b = k2.getBytes(UTF_8);
+ final byte[] v2b = v2.getBytes(UTF_8);
+ final byte[] k3b = k3.getBytes(UTF_8);
+ final byte[] v3b = v3.getBytes(UTF_8);
+ final byte[] k4b = k4.getBytes(UTF_8);
+ final byte[] k5b = k5.getBytes(UTF_8);
+ final byte[] k6b = k6.getBytes(UTF_8);
+ final byte[] k7b = k7.getBytes(UTF_8);
+ final byte[] v8b = v8.getBytes(UTF_8);
+
+ // add put records
+ wbwi.put(k1b, v1b);
+ wbwi.put(k2b, v2b);
+ wbwi.put(k3b, v3b);
+
+ // add a deletion record
+ wbwi.delete(k4b);
+
+ // add a single deletion record
+ wbwi.singleDelete(k5b);
+
+ // add a log record
+ wbwi.putLogData(v8b);
+
+ final WBWIRocksIterator.WriteEntry[] expected = {
+ new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT,
+ new DirectSlice(k1), new DirectSlice(v1)),
+ new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT,
+ new DirectSlice(k2), new DirectSlice(v2)),
+ new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT,
+ new DirectSlice(k3), new DirectSlice(v3)),
+ new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.DELETE,
+ new DirectSlice(k4), DirectSlice.NONE),
+ new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.SINGLE_DELETE,
+ new DirectSlice(k5), DirectSlice.NONE),
+ };
+
+ try (final WBWIRocksIterator it = wbwi.newIterator()) {
+ //direct access - seek to key offsets
+ final int[] testOffsets = {2, 0, 3, 4, 1};
+
+ for (int i = 0; i < testOffsets.length; i++) {
+ final int testOffset = testOffsets[i];
+ final byte[] key = toArray(expected[testOffset].getKey().data());
+
+ it.seek(key);
+ assertThat(it.isValid()).isTrue();
+
+ final WBWIRocksIterator.WriteEntry entry = it.entry();
+ assertThat(entry).isEqualTo(expected[testOffset]);
+
+ // Direct buffer seek
+ expected[testOffset].getKey().data().mark();
+ ByteBuffer db = expected[testOffset].getKey().data();
+ it.seek(db);
+ assertThat(db.position()).isEqualTo(key.length);
+ assertThat(it.isValid()).isTrue();
+ }
+
+ //forward iterative access
+ int i = 0;
+ for (it.seekToFirst(); it.isValid(); it.next()) {
+ assertThat(it.entry()).isEqualTo(expected[i++]);
+ }
+
+ //reverse iterative access
+ i = expected.length - 1;
+ for (it.seekToLast(); it.isValid(); it.prev()) {
+ assertThat(it.entry()).isEqualTo(expected[i--]);
+ }
+ }
+ }
+ }
+
+ @Test
+ public void zeroByteTests() throws RocksDBException {
+ try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true)) {
+ final byte[] zeroByteValue = new byte[]{0, 0};
+ //add zero byte value
+ wbwi.put(zeroByteValue, zeroByteValue);
+
+ final ByteBuffer buffer = ByteBuffer.allocateDirect(zeroByteValue.length);
+ buffer.put(zeroByteValue);
+
+ final WBWIRocksIterator.WriteEntry expected =
+ new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT,
+ new DirectSlice(buffer, zeroByteValue.length),
+ new DirectSlice(buffer, zeroByteValue.length));
+
+ try (final WBWIRocksIterator it = wbwi.newIterator()) {
+ it.seekToFirst();
+ final WBWIRocksIterator.WriteEntry actual = it.entry();
+ assertThat(actual.equals(expected)).isTrue();
+ assertThat(it.entry().hashCode() == expected.hashCode()).isTrue();
+ }
+ }
+ }
+
+ @Test
+ public void savePoints() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+ try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true);
+ final ReadOptions readOptions = new ReadOptions()) {
+ wbwi.put("k1".getBytes(), "v1".getBytes());
+ wbwi.put("k2".getBytes(), "v2".getBytes());
+ wbwi.put("k3".getBytes(), "v3".getBytes());
+
+ assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k1"))
+ .isEqualTo("v1");
+ assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k2"))
+ .isEqualTo("v2");
+ assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3"))
+ .isEqualTo("v3");
+
+
+ wbwi.setSavePoint();
+
+ wbwi.delete("k2".getBytes());
+ wbwi.put("k3".getBytes(), "v3-2".getBytes());
+
+ assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k2"))
+ .isNull();
+ assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3"))
+ .isEqualTo("v3-2");
+
+
+ wbwi.setSavePoint();
+
+ wbwi.put("k3".getBytes(), "v3-3".getBytes());
+ wbwi.put("k4".getBytes(), "v4".getBytes());
+
+ assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3"))
+ .isEqualTo("v3-3");
+ assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k4"))
+ .isEqualTo("v4");
+
+
+ wbwi.rollbackToSavePoint();
+
+ assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k2"))
+ .isNull();
+ assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3"))
+ .isEqualTo("v3-2");
+ assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k4"))
+ .isNull();
+
+
+ wbwi.rollbackToSavePoint();
+
+ assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k1"))
+ .isEqualTo("v1");
+ assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k2"))
+ .isEqualTo("v2");
+ assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3"))
+ .isEqualTo("v3");
+ assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k4"))
+ .isNull();
+ }
+ }
+ }
+
+ @Test
+ public void restorePoints() throws RocksDBException {
+ try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) {
+
+ wbwi.put("k1".getBytes(UTF_8), "v1".getBytes(UTF_8));
+ wbwi.put("k2".getBytes(UTF_8), "v2".getBytes(UTF_8));
+
+ wbwi.setSavePoint();
+
+ wbwi.put("k1".getBytes(UTF_8), "123456789".getBytes(UTF_8));
+ wbwi.delete("k2".getBytes(UTF_8));
+
+ wbwi.rollbackToSavePoint();
+
+ try(final DBOptions options = new DBOptions()) {
+ assertThat(wbwi.getFromBatch(options,"k1".getBytes(UTF_8))).isEqualTo("v1".getBytes());
+ assertThat(wbwi.getFromBatch(options,"k2".getBytes(UTF_8))).isEqualTo("v2".getBytes());
+ }
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void restorePoints_withoutSavePoints() throws RocksDBException {
+ try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) {
+ wbwi.rollbackToSavePoint();
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void restorePoints_withoutSavePoints_nested() throws RocksDBException {
+ try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) {
+
+ wbwi.setSavePoint();
+ wbwi.rollbackToSavePoint();
+
+ // without previous corresponding setSavePoint
+ wbwi.rollbackToSavePoint();
+ }
+ }
+
+ @Test
+ public void popSavePoint() throws RocksDBException {
+ try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) {
+
+ wbwi.put("k1".getBytes(), "v1".getBytes());
+ wbwi.put("k2".getBytes(), "v2".getBytes());
+
+ wbwi.setSavePoint();
+
+ wbwi.put("k1".getBytes(), "123456789".getBytes());
+ wbwi.delete("k2".getBytes());
+
+ wbwi.setSavePoint();
+
+ wbwi.popSavePoint();
+
+ wbwi.rollbackToSavePoint();
+
+ try(final DBOptions options = new DBOptions()) {
+ assertThat(wbwi.getFromBatch(options,"k1".getBytes(UTF_8))).isEqualTo("v1".getBytes());
+ assertThat(wbwi.getFromBatch(options,"k2".getBytes(UTF_8))).isEqualTo("v2".getBytes());
+ }
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void popSavePoint_withoutSavePoints() throws RocksDBException {
+ try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) {
+ wbwi.popSavePoint();
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void popSavePoint_withoutSavePoints_nested() throws RocksDBException {
+ try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) {
+
+ wbwi.setSavePoint();
+ wbwi.popSavePoint();
+
+ // without previous corresponding setSavePoint
+ wbwi.popSavePoint();
+ }
+ }
+
+ @Test
+ public void maxBytes() throws RocksDBException {
+ try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) {
+ wbwi.setMaxBytes(19);
+
+ wbwi.put("k1".getBytes(), "v1".getBytes());
+ }
+ }
+
+ @Test(expected = RocksDBException.class)
+ public void maxBytes_over() throws RocksDBException {
+ try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) {
+ wbwi.setMaxBytes(1);
+
+ wbwi.put("k1".getBytes(), "v1".getBytes());
+ }
+ }
+
+ @Test
+ public void getWriteBatch() {
+ try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) {
+
+ final WriteBatch wb = wbwi.getWriteBatch();
+ assertThat(wb).isNotNull();
+ assertThat(wb.isOwningHandle()).isFalse();
+ }
+ }
+
+ private static String getFromWriteBatchWithIndex(final RocksDB db,
+ final ReadOptions readOptions, final WriteBatchWithIndex wbwi,
+ final String skey) {
+ final byte[] key = skey.getBytes();
+ try (final RocksIterator baseIterator = db.newIterator(readOptions);
+ final RocksIterator iterator = wbwi.newIteratorWithBase(baseIterator)) {
+ iterator.seek(key);
+
+ // Arrays.equals(key, iterator.key()) ensures an exact match in Rocks,
+ // instead of a nearest match
+ return iterator.isValid() &&
+ Arrays.equals(key, iterator.key()) ?
+ new String(iterator.value()) : null;
+ }
+ }
+
+ @Test
+ public void getFromBatch() throws RocksDBException {
+ final byte[] k1 = "k1".getBytes();
+ final byte[] k2 = "k2".getBytes();
+ final byte[] k3 = "k3".getBytes();
+ final byte[] k4 = "k4".getBytes();
+
+ final byte[] v1 = "v1".getBytes();
+ final byte[] v2 = "v2".getBytes();
+ final byte[] v3 = "v3".getBytes();
+
+ try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true);
+ final DBOptions dbOptions = new DBOptions()) {
+ wbwi.put(k1, v1);
+ wbwi.put(k2, v2);
+ wbwi.put(k3, v3);
+
+ assertThat(wbwi.getFromBatch(dbOptions, k1)).isEqualTo(v1);
+ assertThat(wbwi.getFromBatch(dbOptions, k2)).isEqualTo(v2);
+ assertThat(wbwi.getFromBatch(dbOptions, k3)).isEqualTo(v3);
+ assertThat(wbwi.getFromBatch(dbOptions, k4)).isNull();
+
+ wbwi.delete(k2);
+
+ assertThat(wbwi.getFromBatch(dbOptions, k2)).isNull();
+ }
+ }
+
+ @Test
+ public void getFromBatchAndDB() throws RocksDBException {
+ final byte[] k1 = "k1".getBytes();
+ final byte[] k2 = "k2".getBytes();
+ final byte[] k3 = "k3".getBytes();
+ final byte[] k4 = "k4".getBytes();
+
+ final byte[] v1 = "v1".getBytes();
+ final byte[] v2 = "v2".getBytes();
+ final byte[] v3 = "v3".getBytes();
+ final byte[] v4 = "v4".getBytes();
+
+ try (final Options options = new Options().setCreateIfMissing(true);
+ final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath())) {
+
+ db.put(k1, v1);
+ db.put(k2, v2);
+ db.put(k4, v4);
+
+ try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true);
+ final DBOptions dbOptions = new DBOptions();
+ final ReadOptions readOptions = new ReadOptions()) {
+
+ assertThat(wbwi.getFromBatch(dbOptions, k1)).isNull();
+ assertThat(wbwi.getFromBatch(dbOptions, k2)).isNull();
+ assertThat(wbwi.getFromBatch(dbOptions, k4)).isNull();
+
+ wbwi.put(k3, v3);
+
+ assertThat(wbwi.getFromBatch(dbOptions, k3)).isEqualTo(v3);
+
+ assertThat(wbwi.getFromBatchAndDB(db, readOptions, k1)).isEqualTo(v1);
+ assertThat(wbwi.getFromBatchAndDB(db, readOptions, k2)).isEqualTo(v2);
+ assertThat(wbwi.getFromBatchAndDB(db, readOptions, k3)).isEqualTo(v3);
+ assertThat(wbwi.getFromBatchAndDB(db, readOptions, k4)).isEqualTo(v4);
+
+ wbwi.delete(k4);
+
+ assertThat(wbwi.getFromBatchAndDB(db, readOptions, k4)).isNull();
+ }
+ }
+ }
+ private byte[] toArray(final ByteBuffer buf) {
+ final byte[] ary = new byte[buf.remaining()];
+ buf.get(ary);
+ return ary;
+ }
+
+ @Test
+ public void deleteRange() throws RocksDBException {
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
+ final WriteBatch batch = new WriteBatch();
+ final WriteOptions wOpt = new WriteOptions()) {
+ db.put("key1".getBytes(), "value".getBytes());
+ db.put("key2".getBytes(), "12345678".getBytes());
+ db.put("key3".getBytes(), "abcdefg".getBytes());
+ db.put("key4".getBytes(), "xyz".getBytes());
+ assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes());
+ assertThat(db.get("key2".getBytes())).isEqualTo("12345678".getBytes());
+ assertThat(db.get("key3".getBytes())).isEqualTo("abcdefg".getBytes());
+ assertThat(db.get("key4".getBytes())).isEqualTo("xyz".getBytes());
+
+ batch.deleteRange("key2".getBytes(), "key4".getBytes());
+ db.write(wOpt, batch);
+
+ assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes());
+ assertThat(db.get("key2".getBytes())).isNull();
+ assertThat(db.get("key3".getBytes())).isNull();
+ assertThat(db.get("key4".getBytes())).isEqualTo("xyz".getBytes());
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/WriteOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/WriteOptionsTest.java
new file mode 100644
index 000000000..1d5f3cc8d
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/WriteOptionsTest.java
@@ -0,0 +1,69 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import java.util.Random;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class WriteOptionsTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ public static final Random rand = PlatformRandomHelper.
+ getPlatformSpecificRandomFactory();
+
+ @Test
+ public void writeOptions() {
+ try (final WriteOptions writeOptions = new WriteOptions()) {
+
+ writeOptions.setSync(true);
+ assertThat(writeOptions.sync()).isTrue();
+ writeOptions.setSync(false);
+ assertThat(writeOptions.sync()).isFalse();
+
+ writeOptions.setDisableWAL(true);
+ assertThat(writeOptions.disableWAL()).isTrue();
+ writeOptions.setDisableWAL(false);
+ assertThat(writeOptions.disableWAL()).isFalse();
+
+
+ writeOptions.setIgnoreMissingColumnFamilies(true);
+ assertThat(writeOptions.ignoreMissingColumnFamilies()).isTrue();
+ writeOptions.setIgnoreMissingColumnFamilies(false);
+ assertThat(writeOptions.ignoreMissingColumnFamilies()).isFalse();
+
+ writeOptions.setNoSlowdown(true);
+ assertThat(writeOptions.noSlowdown()).isTrue();
+ writeOptions.setNoSlowdown(false);
+ assertThat(writeOptions.noSlowdown()).isFalse();
+
+ writeOptions.setLowPri(true);
+ assertThat(writeOptions.lowPri()).isTrue();
+ writeOptions.setLowPri(false);
+ assertThat(writeOptions.lowPri()).isFalse();
+ }
+ }
+
+ @Test
+ public void copyConstructor() {
+ WriteOptions origOpts = new WriteOptions();
+ origOpts.setDisableWAL(rand.nextBoolean());
+ origOpts.setIgnoreMissingColumnFamilies(rand.nextBoolean());
+ origOpts.setSync(rand.nextBoolean());
+ WriteOptions copyOpts = new WriteOptions(origOpts);
+ assertThat(origOpts.disableWAL()).isEqualTo(copyOpts.disableWAL());
+ assertThat(origOpts.ignoreMissingColumnFamilies()).isEqualTo(
+ copyOpts.ignoreMissingColumnFamilies());
+ assertThat(origOpts.sync()).isEqualTo(copyOpts.sync());
+ }
+
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/test/RemoveEmptyValueCompactionFilterFactory.java b/src/rocksdb/java/src/test/java/org/rocksdb/test/RemoveEmptyValueCompactionFilterFactory.java
new file mode 100644
index 000000000..c4e4f25a0
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/test/RemoveEmptyValueCompactionFilterFactory.java
@@ -0,0 +1,21 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb.test;
+
+import org.rocksdb.AbstractCompactionFilter;
+import org.rocksdb.AbstractCompactionFilterFactory;
+import org.rocksdb.RemoveEmptyValueCompactionFilter;
+
+/**
+ * Simple CompactionFilterFactory class used in tests. Generates RemoveEmptyValueCompactionFilters.
+ */
+public class RemoveEmptyValueCompactionFilterFactory extends AbstractCompactionFilterFactory<RemoveEmptyValueCompactionFilter> {
+ @Override
+ public RemoveEmptyValueCompactionFilter createCompactionFilter(final AbstractCompactionFilter.Context context) {
+ return new RemoveEmptyValueCompactionFilter();
+ }
+
+ @Override
+ public String name() {
+ return "RemoveEmptyValueCompactionFilterFactory";
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java b/src/rocksdb/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java
new file mode 100644
index 000000000..42d3148ef
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java
@@ -0,0 +1,174 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb.test;
+
+import org.junit.internal.JUnitSystem;
+import org.junit.internal.RealSystem;
+import org.junit.internal.TextListener;
+import org.junit.runner.Description;
+import org.junit.runner.JUnitCore;
+import org.junit.runner.Result;
+import org.junit.runner.notification.Failure;
+import org.rocksdb.RocksDB;
+
+import java.io.PrintStream;
+import java.text.DecimalFormat;
+import java.text.NumberFormat;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.rocksdb.test.RocksJunitRunner.RocksJunitListener.Status.*;
+
+/**
+ * Custom Junit Runner to print also Test classes
+ * and executed methods to command prompt.
+ */
+public class RocksJunitRunner {
+
+ /**
+ * Listener which overrides default functionality
+ * to print class and method to system out.
+ */
+ static class RocksJunitListener extends TextListener {
+
+ private final static NumberFormat secsFormat =
+ new DecimalFormat("###,###.###");
+
+ private final PrintStream writer;
+
+ private String currentClassName = null;
+ private String currentMethodName = null;
+ private Status currentStatus = null;
+ private long currentTestsStartTime;
+ private int currentTestsCount = 0;
+ private int currentTestsIgnoredCount = 0;
+ private int currentTestsFailureCount = 0;
+ private int currentTestsErrorCount = 0;
+
+ enum Status {
+ IGNORED,
+ FAILURE,
+ ERROR,
+ OK
+ }
+
+ /**
+ * RocksJunitListener constructor
+ *
+ * @param system JUnitSystem
+ */
+ public RocksJunitListener(final JUnitSystem system) {
+ this(system.out());
+ }
+
+ public RocksJunitListener(final PrintStream writer) {
+ super(writer);
+ this.writer = writer;
+ }
+
+ @Override
+ public void testRunStarted(final Description description) {
+ writer.format("Starting RocksJava Tests...%n");
+
+ }
+
+ @Override
+ public void testStarted(final Description description) {
+ if(currentClassName == null
+ || !currentClassName.equals(description.getClassName())) {
+ if(currentClassName != null) {
+ printTestsSummary();
+ } else {
+ currentTestsStartTime = System.currentTimeMillis();
+ }
+ writer.format("%nRunning: %s%n", description.getClassName());
+ currentClassName = description.getClassName();
+ }
+ currentMethodName = description.getMethodName();
+ currentStatus = OK;
+ currentTestsCount++;
+ }
+
+ private void printTestsSummary() {
+ // print summary of last test set
+ writer.format("Tests run: %d, Failures: %d, Errors: %d, Ignored: %d, Time elapsed: %s sec%n",
+ currentTestsCount,
+ currentTestsFailureCount,
+ currentTestsErrorCount,
+ currentTestsIgnoredCount,
+ formatSecs(System.currentTimeMillis() - currentTestsStartTime));
+
+ // reset counters
+ currentTestsCount = 0;
+ currentTestsFailureCount = 0;
+ currentTestsErrorCount = 0;
+ currentTestsIgnoredCount = 0;
+ currentTestsStartTime = System.currentTimeMillis();
+ }
+
+ private static String formatSecs(final double milliseconds) {
+ final double seconds = milliseconds / 1000;
+ return secsFormat.format(seconds);
+ }
+
+ @Override
+ public void testFailure(final Failure failure) {
+ if (failure.getException() != null
+ && failure.getException() instanceof AssertionError) {
+ currentStatus = FAILURE;
+ currentTestsFailureCount++;
+ } else {
+ currentStatus = ERROR;
+ currentTestsErrorCount++;
+ }
+ }
+
+ @Override
+ public void testIgnored(final Description description) {
+ currentStatus = IGNORED;
+ currentTestsIgnoredCount++;
+ }
+
+ @Override
+ public void testFinished(final Description description) {
+ if(currentStatus == OK) {
+ writer.format("\t%s OK%n",currentMethodName);
+ } else {
+ writer.format(" [%s] %s%n", currentStatus.name(), currentMethodName);
+ }
+ }
+
+ @Override
+ public void testRunFinished(final Result result) {
+ printTestsSummary();
+ super.testRunFinished(result);
+ }
+ }
+
+ /**
+ * Main method to execute tests
+ *
+ * @param args Test classes as String names
+ */
+ public static void main(final String[] args){
+ final JUnitCore runner = new JUnitCore();
+ final JUnitSystem system = new RealSystem();
+ runner.addListener(new RocksJunitListener(system));
+ try {
+ final List<Class<?>> classes = new ArrayList<>();
+ for (final String arg : args) {
+ classes.add(Class.forName(arg));
+ }
+ final Class[] clazzes = classes.toArray(new Class[classes.size()]);
+ final Result result = runner.run(clazzes);
+ if(!result.wasSuccessful()) {
+ System.exit(-1);
+ }
+ } catch (final ClassNotFoundException e) {
+ e.printStackTrace();
+ System.exit(-2);
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorIntTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorIntTest.java
new file mode 100644
index 000000000..fb7239c92
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorIntTest.java
@@ -0,0 +1,267 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb.util;
+
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Parameterized.Parameters;
+import org.rocksdb.*;
+
+import java.nio.ByteBuffer;
+import java.nio.file.FileSystems;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.assertj.core.api.Assertions.assertThat;
+
+/**
+ * Similar to {@link IntComparatorTest}, but uses {@link BytewiseComparator}
+ * which ensures the correct ordering of positive integers.
+ */
+@RunWith(Parameterized.class)
+public class BytewiseComparatorIntTest {
+
+ // test with 500 random positive integer keys
+ private static final int TOTAL_KEYS = 500;
+ private static final byte[][] keys = new byte[TOTAL_KEYS][4];
+
+ @BeforeClass
+ public static void prepareKeys() {
+ final ByteBuffer buf = ByteBuffer.allocate(4);
+ final Random random = new Random();
+ for (int i = 0; i < TOTAL_KEYS; i++) {
+ final int ri = random.nextInt() & Integer.MAX_VALUE; // the & ensures positive integer
+ buf.putInt(ri);
+ buf.flip();
+ final byte[] key = buf.array();
+
+ // does key already exist (avoid duplicates)
+ if (keyExists(key, i)) {
+ i--; // loop round and generate a different key
+ } else {
+ System.arraycopy(key, 0, keys[i], 0, 4);
+ }
+ }
+ }
+
+ private static boolean keyExists(final byte[] key, final int limit) {
+ for (int j = 0; j < limit; j++) {
+ if (Arrays.equals(key, keys[j])) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Parameters(name = "{0}")
+ public static Iterable<Object[]> parameters() {
+ return Arrays.asList(new Object[][] {
+ { "non-direct_reused64_mutex", false, 64, ReusedSynchronisationType.MUTEX },
+ { "direct_reused64_mutex", true, 64, ReusedSynchronisationType.MUTEX },
+ { "non-direct_reused64_adaptive-mutex", false, 64, ReusedSynchronisationType.ADAPTIVE_MUTEX },
+ { "direct_reused64_adaptive-mutex", true, 64, ReusedSynchronisationType.ADAPTIVE_MUTEX },
+ { "non-direct_reused64_thread-local", false, 64, ReusedSynchronisationType.THREAD_LOCAL },
+ { "direct_reused64_thread-local", true, 64, ReusedSynchronisationType.THREAD_LOCAL },
+ { "non-direct_noreuse", false, -1, null },
+ { "direct_noreuse", true, -1, null }
+ });
+ }
+
+ @Parameter(0)
+ public String name;
+
+ @Parameter(1)
+ public boolean useDirectBuffer;
+
+ @Parameter(2)
+ public int maxReusedBufferSize;
+
+ @Parameter(3)
+ public ReusedSynchronisationType reusedSynchronisationType;
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+
+ @Test
+ public void javaComparatorDefaultCf() throws RocksDBException {
+ try (final ComparatorOptions options = new ComparatorOptions()
+ .setUseDirectBuffer(useDirectBuffer)
+ .setMaxReusedBufferSize(maxReusedBufferSize)
+ // if reusedSynchronisationType == null we assume that maxReusedBufferSize <= 0 and so we just set ADAPTIVE_MUTEX, even though it won't be used
+ .setReusedSynchronisationType(reusedSynchronisationType == null ? ReusedSynchronisationType.ADAPTIVE_MUTEX : reusedSynchronisationType);
+ final BytewiseComparator comparator = new BytewiseComparator(options)) {
+
+ // test the round-tripability of keys written and read with the Comparator
+ testRoundtrip(FileSystems.getDefault().getPath(
+ dbFolder.getRoot().getAbsolutePath()), comparator);
+ }
+ }
+
+ @Test
+ public void javaComparatorNamedCf() throws RocksDBException {
+ try (final ComparatorOptions options = new ComparatorOptions()
+ .setUseDirectBuffer(useDirectBuffer)
+ .setMaxReusedBufferSize(maxReusedBufferSize)
+ // if reusedSynchronisationType == null we assume that maxReusedBufferSize <= 0 and so we just set ADAPTIVE_MUTEX, even though it won't be used
+ .setReusedSynchronisationType(reusedSynchronisationType == null ? ReusedSynchronisationType.ADAPTIVE_MUTEX : reusedSynchronisationType);
+ final BytewiseComparator comparator = new BytewiseComparator(options)) {
+
+ // test the round-tripability of keys written and read with the Comparator
+ testRoundtripCf(FileSystems.getDefault().getPath(
+ dbFolder.getRoot().getAbsolutePath()), comparator);
+ }
+ }
+
+ /**
+ * Test which stores random keys into the database
+ * using an {@link IntComparator}
+ * it then checks that these keys are read back in
+ * ascending order
+ *
+ * @param db_path A path where we can store database
+ * files temporarily
+ *
+ * @param comparator the comparator
+ *
+ * @throws RocksDBException if a database error happens.
+ */
+ private void testRoundtrip(final Path db_path,
+ final AbstractComparator comparator) throws RocksDBException {
+ try (final Options opt = new Options()
+ .setCreateIfMissing(true)
+ .setComparator(comparator)) {
+
+ // store TOTAL_KEYS into the db
+ try (final RocksDB db = RocksDB.open(opt, db_path.toString())) {
+ for (int i = 0; i < TOTAL_KEYS; i++) {
+ db.put(keys[i], "value".getBytes(UTF_8));
+ }
+ }
+
+ // re-open db and read from start to end
+ // integer keys should be in ascending
+ // order as defined by IntComparator
+ final ByteBuffer key = ByteBuffer.allocate(4);
+ try (final RocksDB db = RocksDB.open(opt, db_path.toString());
+ final RocksIterator it = db.newIterator()) {
+ it.seekToFirst();
+ int lastKey = Integer.MIN_VALUE;
+ int count = 0;
+ for (it.seekToFirst(); it.isValid(); it.next()) {
+ key.put(it.key());
+ key.flip();
+ final int thisKey = key.getInt();
+ key.clear();
+ assertThat(thisKey).isGreaterThan(lastKey);
+ lastKey = thisKey;
+ count++;
+ }
+ assertThat(count).isEqualTo(TOTAL_KEYS);
+ }
+ }
+ }
+
+ /**
+ * Test which stores random keys into a column family
+ * in the database
+ * using an {@link IntComparator}
+ * it then checks that these keys are read back in
+ * ascending order
+ *
+ * @param db_path A path where we can store database
+ * files temporarily
+ *
+ * @param comparator the comparator
+ *
+ * @throws RocksDBException if a database error happens.
+ */
+ private void testRoundtripCf(final Path db_path,
+ final AbstractComparator comparator) throws RocksDBException {
+
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes(),
+ new ColumnFamilyOptions()
+ .setComparator(comparator))
+ );
+
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+
+ try (final DBOptions opt = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true)) {
+
+ try (final RocksDB db = RocksDB.open(opt, db_path.toString(),
+ cfDescriptors, cfHandles)) {
+ try {
+ assertThat(cfDescriptors.size()).isEqualTo(2);
+ assertThat(cfHandles.size()).isEqualTo(2);
+
+ for (int i = 0; i < TOTAL_KEYS; i++) {
+ db.put(cfHandles.get(1), keys[i], "value".getBytes(UTF_8));
+ }
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ cfHandles.clear();
+ }
+ }
+
+ // re-open db and read from start to end
+ // integer keys should be in ascending
+ // order as defined by SimpleIntComparator
+ final ByteBuffer key = ByteBuffer.allocate(4);
+ try (final RocksDB db = RocksDB.open(opt, db_path.toString(),
+ cfDescriptors, cfHandles);
+ final RocksIterator it = db.newIterator(cfHandles.get(1))) {
+ try {
+ assertThat(cfDescriptors.size()).isEqualTo(2);
+ assertThat(cfHandles.size()).isEqualTo(2);
+
+ it.seekToFirst();
+ int lastKey = Integer.MIN_VALUE;
+ int count = 0;
+ for (it.seekToFirst(); it.isValid(); it.next()) {
+ key.put(it.key());
+ key.flip();
+ final int thisKey = key.getInt();
+ key.clear();
+ assertThat(thisKey).isGreaterThan(lastKey);
+ lastKey = thisKey;
+ count++;
+ }
+
+ assertThat(count).isEqualTo(TOTAL_KEYS);
+
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ cfHandles.clear();
+ for (final ColumnFamilyDescriptor cfDescriptor : cfDescriptors) {
+ cfDescriptor.getOptions().close();
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java
new file mode 100644
index 000000000..2e2ddc543
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java
@@ -0,0 +1,519 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb.util;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.rocksdb.*;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.*;
+import java.util.*;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.Assert.*;
+import static org.rocksdb.util.ByteUtil.bytes;
+
+/**
+ * This is a direct port of various C++
+ * tests from db/comparator_db_test.cc
+ * and some code to adapt it to RocksJava
+ */
+public class BytewiseComparatorTest {
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ private List<String> source_strings = Arrays.asList("b", "d", "f", "h", "j", "l");
+ private List<String> interleaving_strings = Arrays.asList("a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m");
+
+ /**
+ * Open the database using the C++ BytewiseComparatorImpl
+ * and test the results against our Java BytewiseComparator
+ */
+ @Test
+ public void java_vs_cpp_bytewiseComparator()
+ throws IOException, RocksDBException {
+ for(int rand_seed = 301; rand_seed < 306; rand_seed++) {
+ final Path dbDir =
+ FileSystems.getDefault().getPath(dbFolder.newFolder().getAbsolutePath());
+ try(final RocksDB db = openDatabase(dbDir,
+ BuiltinComparator.BYTEWISE_COMPARATOR)) {
+
+ final Random rnd = new Random(rand_seed);
+ try(final ComparatorOptions copt2 = new ComparatorOptions()
+ .setUseDirectBuffer(false);
+ final AbstractComparator comparator2 = new BytewiseComparator(copt2)) {
+ final java.util.Comparator<String> jComparator = toJavaComparator(comparator2);
+ doRandomIterationTest(
+ db,
+ jComparator,
+ rnd,
+ 8, 100, 3
+ );
+ }
+ }
+ }
+ }
+
+ /**
+ * Open the database using the Java BytewiseComparator
+ * and test the results against another Java BytewiseComparator
+ */
+ @Test
+ public void java_vs_java_bytewiseComparator()
+ throws IOException, RocksDBException {
+ for(int rand_seed = 301; rand_seed < 306; rand_seed++) {
+ final Path dbDir =
+ FileSystems.getDefault().getPath(dbFolder.newFolder().getAbsolutePath());
+ try(final ComparatorOptions copt = new ComparatorOptions()
+ .setUseDirectBuffer(false);
+ final AbstractComparator comparator = new BytewiseComparator(copt);
+ final RocksDB db = openDatabase(dbDir, comparator)) {
+
+ final Random rnd = new Random(rand_seed);
+ try(final ComparatorOptions copt2 = new ComparatorOptions()
+ .setUseDirectBuffer(false);
+ final AbstractComparator comparator2 = new BytewiseComparator(copt2)) {
+ final java.util.Comparator<String> jComparator = toJavaComparator(comparator2);
+ doRandomIterationTest(
+ db,
+ jComparator,
+ rnd,
+ 8, 100, 3
+ );
+ }
+ }
+ }
+ }
+
+ /**
+ * Open the database using the C++ BytewiseComparatorImpl
+ * and test the results against our Java DirectBytewiseComparator
+ */
+ @Test
+ public void java_vs_cpp_directBytewiseComparator()
+ throws IOException, RocksDBException {
+ for(int rand_seed = 301; rand_seed < 306; rand_seed++) {
+ final Path dbDir =
+ FileSystems.getDefault().getPath(dbFolder.newFolder().getAbsolutePath());
+ try(final RocksDB db = openDatabase(dbDir,
+ BuiltinComparator.BYTEWISE_COMPARATOR)) {
+
+ final Random rnd = new Random(rand_seed);
+ try(final ComparatorOptions copt2 = new ComparatorOptions()
+ .setUseDirectBuffer(true);
+ final AbstractComparator comparator2 = new BytewiseComparator(copt2)) {
+ final java.util.Comparator<String> jComparator = toJavaComparator(comparator2);
+ doRandomIterationTest(
+ db,
+ jComparator,
+ rnd,
+ 8, 100, 3
+ );
+ }
+ }
+ }
+ }
+
+ /**
+ * Open the database using the Java DirectBytewiseComparator
+ * and test the results against another Java DirectBytewiseComparator
+ */
+ @Test
+ public void java_vs_java_directBytewiseComparator()
+ throws IOException, RocksDBException {
+ for(int rand_seed = 301; rand_seed < 306; rand_seed++) {
+ final Path dbDir =
+ FileSystems.getDefault().getPath(dbFolder.newFolder().getAbsolutePath());
+ try (final ComparatorOptions copt = new ComparatorOptions()
+ .setUseDirectBuffer(true);
+ final AbstractComparator comparator = new BytewiseComparator(copt);
+ final RocksDB db = openDatabase(dbDir, comparator)) {
+
+ final Random rnd = new Random(rand_seed);
+ try(final ComparatorOptions copt2 = new ComparatorOptions()
+ .setUseDirectBuffer(true);
+ final AbstractComparator comparator2 = new BytewiseComparator(copt2)) {
+ final java.util.Comparator<String> jComparator = toJavaComparator(comparator2);
+ doRandomIterationTest(
+ db,
+ jComparator,
+ rnd,
+ 8, 100, 3
+ );
+ }
+ }
+ }
+ }
+
+ /**
+ * Open the database using the C++ ReverseBytewiseComparatorImpl
+ * and test the results against our Java ReverseBytewiseComparator
+ */
+ @Test
+ public void java_vs_cpp_reverseBytewiseComparator()
+ throws IOException, RocksDBException {
+ for(int rand_seed = 301; rand_seed < 306; rand_seed++) {
+ final Path dbDir =
+ FileSystems.getDefault().getPath(dbFolder.newFolder().getAbsolutePath());
+ try(final RocksDB db = openDatabase(dbDir,
+ BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR)) {
+
+ final Random rnd = new Random(rand_seed);
+ try(final ComparatorOptions copt2 = new ComparatorOptions()
+ .setUseDirectBuffer(false);
+ final AbstractComparator comparator2 = new ReverseBytewiseComparator(copt2)) {
+ final java.util.Comparator<String> jComparator = toJavaComparator(comparator2);
+ doRandomIterationTest(
+ db,
+ jComparator,
+ rnd,
+ 8, 100, 3
+ );
+ }
+ }
+ }
+ }
+
+ /**
+ * Open the database using the Java ReverseBytewiseComparator
+ * and test the results against another Java ReverseBytewiseComparator
+ */
+ @Test
+ public void java_vs_java_reverseBytewiseComparator()
+ throws IOException, RocksDBException {
+ for(int rand_seed = 301; rand_seed < 306; rand_seed++) {
+ final Path dbDir =
+ FileSystems.getDefault().getPath(dbFolder.newFolder().getAbsolutePath());
+ try (final ComparatorOptions copt = new ComparatorOptions()
+ .setUseDirectBuffer(false);
+ final AbstractComparator comparator = new ReverseBytewiseComparator(copt);
+ final RocksDB db = openDatabase(dbDir, comparator)) {
+
+ final Random rnd = new Random(rand_seed);
+ try(final ComparatorOptions copt2 = new ComparatorOptions()
+ .setUseDirectBuffer(false);
+ final AbstractComparator comparator2 = new ReverseBytewiseComparator(copt2)) {
+ final java.util.Comparator<String> jComparator = toJavaComparator(comparator2);
+ doRandomIterationTest(
+ db,
+ jComparator,
+ rnd,
+ 8, 100, 3
+ );
+ }
+ }
+ }
+ }
+
+ private void doRandomIterationTest(
+ final RocksDB db, final java.util.Comparator<String> javaComparator,
+ final Random rnd,
+ final int num_writes, final int num_iter_ops,
+ final int num_trigger_flush) throws RocksDBException {
+
+ final TreeMap<String, String> map = new TreeMap<>(javaComparator);
+
+ try (final FlushOptions flushOptions = new FlushOptions();
+ final WriteOptions writeOptions = new WriteOptions()) {
+ for (int i = 0; i < num_writes; i++) {
+ if (num_trigger_flush > 0 && i != 0 && i % num_trigger_flush == 0) {
+ db.flush(flushOptions);
+ }
+
+ final int type = rnd.nextInt(2);
+ final int index = rnd.nextInt(source_strings.size());
+ final String key = source_strings.get(index);
+ switch (type) {
+ case 0:
+ // put
+ map.put(key, key);
+ db.put(writeOptions, bytes(key), bytes(key));
+ break;
+ case 1:
+ // delete
+ if (map.containsKey(key)) {
+ map.remove(key);
+ }
+ db.delete(writeOptions, bytes(key));
+ break;
+
+ default:
+ fail("Should not be able to generate random outside range 1..2");
+ }
+ }
+ }
+
+ try (final ReadOptions readOptions = new ReadOptions();
+ final RocksIterator iter = db.newIterator(readOptions)) {
+ final KVIter<String, String> result_iter = new KVIter<>(map);
+
+ boolean is_valid = false;
+ for (int i = 0; i < num_iter_ops; i++) {
+ // Random walk and make sure iter and result_iter returns the
+ // same key and value
+ final int type = rnd.nextInt(7);
+ iter.status();
+ switch (type) {
+ case 0:
+ // Seek to First
+ iter.seekToFirst();
+ result_iter.seekToFirst();
+ break;
+ case 1:
+ // Seek to last
+ iter.seekToLast();
+ result_iter.seekToLast();
+ break;
+ case 2: {
+ // Seek to random (existing or non-existing) key
+ final int key_idx = rnd.nextInt(interleaving_strings.size());
+ final String key = interleaving_strings.get(key_idx);
+ iter.seek(bytes(key));
+ result_iter.seek(bytes(key));
+ break;
+ }
+ case 3: {
+ // SeekForPrev to random (existing or non-existing) key
+ final int key_idx = rnd.nextInt(interleaving_strings.size());
+ final String key = interleaving_strings.get(key_idx);
+ iter.seekForPrev(bytes(key));
+ result_iter.seekForPrev(bytes(key));
+ break;
+ }
+ case 4:
+ // Next
+ if (is_valid) {
+ iter.next();
+ result_iter.next();
+ } else {
+ continue;
+ }
+ break;
+ case 5:
+ // Prev
+ if (is_valid) {
+ iter.prev();
+ result_iter.prev();
+ } else {
+ continue;
+ }
+ break;
+ default: {
+ assert (type == 6);
+ final int key_idx = rnd.nextInt(source_strings.size());
+ final String key = source_strings.get(key_idx);
+ final byte[] result = db.get(readOptions, bytes(key));
+ if (!map.containsKey(key)) {
+ assertNull(result);
+ } else {
+ assertArrayEquals(bytes(map.get(key)), result);
+ }
+ break;
+ }
+ }
+
+ assertEquals(result_iter.isValid(), iter.isValid());
+
+ is_valid = iter.isValid();
+
+ if (is_valid) {
+ assertArrayEquals(bytes(result_iter.key()), iter.key());
+
+ //note that calling value on a non-valid iterator from the Java API
+ //results in a SIGSEGV
+ assertArrayEquals(bytes(result_iter.value()), iter.value());
+ }
+ }
+ }
+ }
+
+ /**
+ * Open the database using a C++ Comparator
+ */
+ private RocksDB openDatabase(
+ final Path dbDir, final BuiltinComparator cppComparator)
+ throws IOException, RocksDBException {
+ final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setComparator(cppComparator);
+ return RocksDB.open(options, dbDir.toAbsolutePath().toString());
+ }
+
+ /**
+ * Open the database using a Java Comparator
+ */
+ private RocksDB openDatabase(
+ final Path dbDir,
+ final AbstractComparator javaComparator)
+ throws IOException, RocksDBException {
+ final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setComparator(javaComparator);
+ return RocksDB.open(options, dbDir.toAbsolutePath().toString());
+ }
+
+ private java.util.Comparator<String> toJavaComparator(
+ final AbstractComparator rocksComparator) {
+ return new java.util.Comparator<String>() {
+ @Override
+ public int compare(final String s1, final String s2) {
+ final ByteBuffer bufS1;
+ final ByteBuffer bufS2;
+ if (rocksComparator.usingDirectBuffers()) {
+ bufS1 = ByteBuffer.allocateDirect(s1.length());
+ bufS2 = ByteBuffer.allocateDirect(s2.length());
+ } else {
+ bufS1 = ByteBuffer.allocate(s1.length());
+ bufS2 = ByteBuffer.allocate(s2.length());
+ }
+ bufS1.put(bytes(s1));
+ bufS1.flip();
+ bufS2.put(bytes(s2));
+ bufS2.flip();
+ return rocksComparator.compare(bufS1, bufS2);
+ }
+ };
+ }
+
+ private static class KVIter<K, V> implements RocksIteratorInterface {
+
+ private final List<Map.Entry<K, V>> entries;
+ private final java.util.Comparator<? super K> comparator;
+ private int offset = -1;
+
+ private int lastPrefixMatchIdx = -1;
+ private int lastPrefixMatch = 0;
+
+ public KVIter(final TreeMap<K, V> map) {
+ this.entries = new ArrayList<>();
+ entries.addAll(map.entrySet());
+ this.comparator = map.comparator();
+ }
+
+
+ @Override
+ public boolean isValid() {
+ return offset > -1 && offset < entries.size();
+ }
+
+ @Override
+ public void seekToFirst() {
+ offset = 0;
+ }
+
+ @Override
+ public void seekToLast() {
+ offset = entries.size() - 1;
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public void seek(final byte[] target) {
+ for(offset = 0; offset < entries.size(); offset++) {
+ if(comparator.compare(entries.get(offset).getKey(),
+ (K)new String(target, UTF_8)) >= 0) {
+ return;
+ }
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public void seekForPrev(final byte[] target) {
+ for(offset = entries.size()-1; offset >= 0; offset--) {
+ if(comparator.compare(entries.get(offset).getKey(),
+ (K)new String(target, UTF_8)) <= 0) {
+ return;
+ }
+ }
+ }
+
+ /**
+ * Is `a` a prefix of `b`
+ *
+ * @return The length of the matching prefix, or 0 if it is not a prefix
+ */
+ private int isPrefix(final byte[] a, final byte[] b) {
+ if(b.length >= a.length) {
+ for(int i = 0; i < a.length; i++) {
+ if(a[i] != b[i]) {
+ return i;
+ }
+ }
+ return a.length;
+ } else {
+ return 0;
+ }
+ }
+
+ @Override
+ public void next() {
+ if(offset < entries.size()) {
+ offset++;
+ }
+ }
+
+ @Override
+ public void prev() {
+ if(offset >= 0) {
+ offset--;
+ }
+ }
+
+ @Override
+ public void status() throws RocksDBException {
+ if(offset < 0 || offset >= entries.size()) {
+ throw new RocksDBException("Index out of bounds. Size is: " +
+ entries.size() + ", offset is: " + offset);
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public K key() {
+ if(!isValid()) {
+ if(entries.isEmpty()) {
+ return (K)"";
+ } else if(offset == -1){
+ return entries.get(0).getKey();
+ } else if(offset == entries.size()) {
+ return entries.get(offset - 1).getKey();
+ } else {
+ return (K)"";
+ }
+ } else {
+ return entries.get(offset).getKey();
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public V value() {
+ if(!isValid()) {
+ return (V)"";
+ } else {
+ return entries.get(offset).getValue();
+ }
+ }
+
+ @Override
+ public void seek(ByteBuffer target) {
+ throw new IllegalAccessError("Not implemented");
+ }
+
+ @Override
+ public void seekForPrev(ByteBuffer target) {
+ throw new IllegalAccessError("Not implemented");
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java
new file mode 100644
index 000000000..890819471
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java
@@ -0,0 +1,172 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb.util;
+
+import org.rocksdb.RocksDBException;
+import org.rocksdb.WriteBatch;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Objects;
+
+/**
+ * A simple WriteBatch Handler which adds a record
+ * of each event that it receives to a list
+ */
+public class CapturingWriteBatchHandler extends WriteBatch.Handler {
+
+ private final List<Event> events = new ArrayList<>();
+
+ /**
+ * Returns a copy of the current events list
+ *
+ * @return a list of the events which have happened upto now
+ */
+ public List<Event> getEvents() {
+ return new ArrayList<>(events);
+ }
+
+ @Override
+ public void put(final int columnFamilyId, final byte[] key,
+ final byte[] value) {
+ events.add(new Event(Action.PUT, columnFamilyId, key, value));
+ }
+
+ @Override
+ public void put(final byte[] key, final byte[] value) {
+ events.add(new Event(Action.PUT, key, value));
+ }
+
+ @Override
+ public void merge(final int columnFamilyId, final byte[] key,
+ final byte[] value) {
+ events.add(new Event(Action.MERGE, columnFamilyId, key, value));
+ }
+
+ @Override
+ public void merge(final byte[] key, final byte[] value) {
+ events.add(new Event(Action.MERGE, key, value));
+ }
+
+ @Override
+ public void delete(final int columnFamilyId, final byte[] key) {
+ events.add(new Event(Action.DELETE, columnFamilyId, key, (byte[])null));
+ }
+
+ @Override
+ public void delete(final byte[] key) {
+ events.add(new Event(Action.DELETE, key, (byte[])null));
+ }
+
+ @Override
+ public void singleDelete(final int columnFamilyId, final byte[] key) {
+ events.add(new Event(Action.SINGLE_DELETE,
+ columnFamilyId, key, (byte[])null));
+ }
+
+ @Override
+ public void singleDelete(final byte[] key) {
+ events.add(new Event(Action.SINGLE_DELETE, key, (byte[])null));
+ }
+
+ @Override
+ public void deleteRange(final int columnFamilyId, final byte[] beginKey,
+ final byte[] endKey) {
+ events.add(new Event(Action.DELETE_RANGE, columnFamilyId, beginKey,
+ endKey));
+ }
+
+ @Override
+ public void deleteRange(final byte[] beginKey, final byte[] endKey) {
+ events.add(new Event(Action.DELETE_RANGE, beginKey, endKey));
+ }
+
+ @Override
+ public void logData(final byte[] blob) {
+ events.add(new Event(Action.LOG, (byte[])null, blob));
+ }
+
+ @Override
+ public void putBlobIndex(final int columnFamilyId, final byte[] key,
+ final byte[] value) {
+ events.add(new Event(Action.PUT_BLOB_INDEX, key, value));
+ }
+
+ @Override
+ public void markBeginPrepare() throws RocksDBException {
+ events.add(new Event(Action.MARK_BEGIN_PREPARE, (byte[])null,
+ (byte[])null));
+ }
+
+ @Override
+ public void markEndPrepare(final byte[] xid) throws RocksDBException {
+ events.add(new Event(Action.MARK_END_PREPARE, (byte[])null,
+ (byte[])null));
+ }
+
+ @Override
+ public void markNoop(final boolean emptyBatch) throws RocksDBException {
+ events.add(new Event(Action.MARK_NOOP, (byte[])null, (byte[])null));
+ }
+
+ @Override
+ public void markRollback(final byte[] xid) throws RocksDBException {
+ events.add(new Event(Action.MARK_ROLLBACK, (byte[])null, (byte[])null));
+ }
+
+ @Override
+ public void markCommit(final byte[] xid) throws RocksDBException {
+ events.add(new Event(Action.MARK_COMMIT, (byte[])null, (byte[])null));
+ }
+
+ public static class Event {
+ public final Action action;
+ public final int columnFamilyId;
+ public final byte[] key;
+ public final byte[] value;
+
+ public Event(final Action action, final byte[] key, final byte[] value) {
+ this(action, 0, key, value);
+ }
+
+ public Event(final Action action, final int columnFamilyId, final byte[] key,
+ final byte[] value) {
+ this.action = action;
+ this.columnFamilyId = columnFamilyId;
+ this.key = key;
+ this.value = value;
+ }
+
+ @Override
+ public boolean equals(final Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ final Event event = (Event) o;
+ return columnFamilyId == event.columnFamilyId &&
+ action == event.action &&
+ ((key == null && event.key == null)
+ || Arrays.equals(key, event.key)) &&
+ ((value == null && event.value == null)
+ || Arrays.equals(value, event.value));
+ }
+
+ @Override
+ public int hashCode() {
+
+ return Objects.hash(action, columnFamilyId, key, value);
+ }
+ }
+
+ /**
+ * Enumeration of Write Batch
+ * event actions
+ */
+ public enum Action {
+ PUT, MERGE, DELETE, SINGLE_DELETE, DELETE_RANGE, LOG, PUT_BLOB_INDEX,
+ MARK_BEGIN_PREPARE, MARK_END_PREPARE, MARK_NOOP, MARK_COMMIT,
+ MARK_ROLLBACK }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/EnvironmentTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/EnvironmentTest.java
new file mode 100644
index 000000000..8b57066bd
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/EnvironmentTest.java
@@ -0,0 +1,259 @@
+// Copyright (c) 2014, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb.util;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class EnvironmentTest {
+ private final static String ARCH_FIELD_NAME = "ARCH";
+ private final static String OS_FIELD_NAME = "OS";
+ private final static String MUSL_LIBC_FIELD_NAME = "MUSL_LIBC";
+
+ private static String INITIAL_OS;
+ private static String INITIAL_ARCH;
+ private static boolean INITIAL_MUSL_LIBC;
+
+ @BeforeClass
+ public static void saveState() {
+ INITIAL_ARCH = getEnvironmentClassField(ARCH_FIELD_NAME);
+ INITIAL_OS = getEnvironmentClassField(OS_FIELD_NAME);
+ INITIAL_MUSL_LIBC = getEnvironmentClassField(MUSL_LIBC_FIELD_NAME);
+ }
+
+ @Test
+ public void mac32() {
+ setEnvironmentClassFields("mac", "32");
+ assertThat(Environment.isWindows()).isFalse();
+ assertThat(Environment.getJniLibraryExtension()).
+ isEqualTo(".jnilib");
+ assertThat(Environment.getJniLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni-osx.jnilib");
+ assertThat(Environment.getSharedLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni.dylib");
+ }
+
+ @Test
+ public void mac64() {
+ setEnvironmentClassFields("mac", "64");
+ assertThat(Environment.isWindows()).isFalse();
+ assertThat(Environment.getJniLibraryExtension()).
+ isEqualTo(".jnilib");
+ assertThat(Environment.getJniLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni-osx.jnilib");
+ assertThat(Environment.getSharedLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni.dylib");
+ }
+
+ @Test
+ public void nix32() {
+ // Linux
+ setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, false);
+ setEnvironmentClassFields("Linux", "32");
+ assertThat(Environment.isWindows()).isFalse();
+ assertThat(Environment.getJniLibraryExtension()).
+ isEqualTo(".so");
+ assertThat(Environment.getJniLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni-linux32.so");
+ assertThat(Environment.getSharedLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni.so");
+ // Linux musl-libc (Alpine)
+ setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, true);
+ assertThat(Environment.isWindows()).isFalse();
+ assertThat(Environment.getJniLibraryExtension()).
+ isEqualTo(".so");
+ assertThat(Environment.getJniLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni-linux32-musl.so");
+ assertThat(Environment.getSharedLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni.so");
+ // UNIX
+ setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, false);
+ setEnvironmentClassFields("Unix", "32");
+ assertThat(Environment.isWindows()).isFalse();
+ assertThat(Environment.getJniLibraryExtension()).
+ isEqualTo(".so");
+ assertThat(Environment.getJniLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni-linux32.so");
+ assertThat(Environment.getSharedLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni.so");
+ }
+
+ @Test(expected = UnsupportedOperationException.class)
+ public void aix32() {
+ // AIX
+ setEnvironmentClassFields("aix", "32");
+ assertThat(Environment.isWindows()).isFalse();
+ assertThat(Environment.getJniLibraryExtension()).
+ isEqualTo(".so");
+ Environment.getJniLibraryFileName("rocksdb");
+ }
+
+ @Test
+ public void nix64() {
+ setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, false);
+ setEnvironmentClassFields("Linux", "x64");
+ assertThat(Environment.isWindows()).isFalse();
+ assertThat(Environment.getJniLibraryExtension()).
+ isEqualTo(".so");
+ assertThat(Environment.getJniLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni-linux64.so");
+ assertThat(Environment.getSharedLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni.so");
+ // Linux musl-libc (Alpine)
+ setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, true);
+ assertThat(Environment.isWindows()).isFalse();
+ assertThat(Environment.getJniLibraryExtension()).
+ isEqualTo(".so");
+ assertThat(Environment.getJniLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni-linux64-musl.so");
+ assertThat(Environment.getSharedLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni.so");
+ // UNIX
+ setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, false);
+ setEnvironmentClassFields("Unix", "x64");
+ assertThat(Environment.isWindows()).isFalse();
+ assertThat(Environment.getJniLibraryExtension()).
+ isEqualTo(".so");
+ assertThat(Environment.getJniLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni-linux64.so");
+ assertThat(Environment.getSharedLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni.so");
+ // AIX
+ setEnvironmentClassFields("aix", "x64");
+ assertThat(Environment.isWindows()).isFalse();
+ assertThat(Environment.getJniLibraryExtension()).
+ isEqualTo(".so");
+ assertThat(Environment.getJniLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni-aix64.so");
+ assertThat(Environment.getSharedLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni.so");
+ }
+
+ @Test
+ public void detectWindows(){
+ setEnvironmentClassFields("win", "x64");
+ assertThat(Environment.isWindows()).isTrue();
+ }
+
+ @Test
+ public void win64() {
+ setEnvironmentClassFields("win", "x64");
+ assertThat(Environment.isWindows()).isTrue();
+ assertThat(Environment.getJniLibraryExtension()).
+ isEqualTo(".dll");
+ assertThat(Environment.getJniLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni-win64.dll");
+ assertThat(Environment.getSharedLibraryFileName("rocksdb")).
+ isEqualTo("librocksdbjni.dll");
+ }
+
+ @Test
+ public void ppc64le() {
+ setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, false);
+ setEnvironmentClassFields("Linux", "ppc64le");
+ assertThat(Environment.isUnix()).isTrue();
+ assertThat(Environment.isPowerPC()).isTrue();
+ assertThat(Environment.is64Bit()).isTrue();
+ assertThat(Environment.getJniLibraryExtension()).isEqualTo(".so");
+ assertThat(Environment.getSharedLibraryName("rocksdb")).isEqualTo("rocksdbjni");
+ assertThat(Environment.getJniLibraryName("rocksdb")).isEqualTo("rocksdbjni-linux-ppc64le");
+ assertThat(Environment.getJniLibraryFileName("rocksdb"))
+ .isEqualTo("librocksdbjni-linux-ppc64le.so");
+ assertThat(Environment.getSharedLibraryFileName("rocksdb")).isEqualTo("librocksdbjni.so");
+ // Linux musl-libc (Alpine)
+ setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, true);
+ setEnvironmentClassFields("Linux", "ppc64le");
+ assertThat(Environment.isUnix()).isTrue();
+ assertThat(Environment.isPowerPC()).isTrue();
+ assertThat(Environment.is64Bit()).isTrue();
+ assertThat(Environment.getJniLibraryExtension()).isEqualTo(".so");
+ assertThat(Environment.getSharedLibraryName("rocksdb")).isEqualTo("rocksdbjni");
+ assertThat(Environment.getJniLibraryName("rocksdb")).isEqualTo("rocksdbjni-linux-ppc64le-musl");
+ assertThat(Environment.getJniLibraryFileName("rocksdb"))
+ .isEqualTo("librocksdbjni-linux-ppc64le-musl.so");
+ assertThat(Environment.getSharedLibraryFileName("rocksdb")).isEqualTo("librocksdbjni.so");
+ setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, false);
+ }
+
+ @Test
+ public void aarch64() {
+ setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, false);
+ setEnvironmentClassFields("Linux", "aarch64");
+ assertThat(Environment.isUnix()).isTrue();
+ assertThat(Environment.isAarch64()).isTrue();
+ assertThat(Environment.is64Bit()).isTrue();
+ assertThat(Environment.getJniLibraryExtension()).isEqualTo(".so");
+ assertThat(Environment.getSharedLibraryName("rocksdb")).isEqualTo("rocksdbjni");
+ assertThat(Environment.getJniLibraryName("rocksdb")).isEqualTo("rocksdbjni-linux-aarch64");
+ assertThat(Environment.getJniLibraryFileName("rocksdb"))
+ .isEqualTo("librocksdbjni-linux-aarch64.so");
+ assertThat(Environment.getSharedLibraryFileName("rocksdb")).isEqualTo("librocksdbjni.so");
+ // Linux musl-libc (Alpine)
+ setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, true);
+ setEnvironmentClassFields("Linux", "aarch64");
+ assertThat(Environment.isUnix()).isTrue();
+ assertThat(Environment.isAarch64()).isTrue();
+ assertThat(Environment.is64Bit()).isTrue();
+ assertThat(Environment.getJniLibraryExtension()).isEqualTo(".so");
+ assertThat(Environment.getSharedLibraryName("rocksdb")).isEqualTo("rocksdbjni");
+ assertThat(Environment.getJniLibraryName("rocksdb")).isEqualTo("rocksdbjni-linux-aarch64-musl");
+ assertThat(Environment.getJniLibraryFileName("rocksdb"))
+ .isEqualTo("librocksdbjni-linux-aarch64-musl.so");
+ assertThat(Environment.getSharedLibraryFileName("rocksdb")).isEqualTo("librocksdbjni.so");
+ setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, false);
+ }
+
+ private void setEnvironmentClassFields(String osName,
+ String osArch) {
+ setEnvironmentClassField(OS_FIELD_NAME, osName);
+ setEnvironmentClassField(ARCH_FIELD_NAME, osArch);
+ }
+
+ @AfterClass
+ public static void restoreState() {
+ setEnvironmentClassField(OS_FIELD_NAME, INITIAL_OS);
+ setEnvironmentClassField(ARCH_FIELD_NAME, INITIAL_ARCH);
+ setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, INITIAL_MUSL_LIBC);
+ }
+
+ @SuppressWarnings("unchecked")
+ private static <T> T getEnvironmentClassField(String fieldName) {
+ final Field field;
+ try {
+ field = Environment.class.getDeclaredField(fieldName);
+ field.setAccessible(true);
+ /* Fails in JDK 13; and not needed unless fields are final
+ final Field modifiersField = Field.class.getDeclaredField("modifiers");
+ modifiersField.setAccessible(true);
+ modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
+ */
+ return (T)field.get(null);
+ } catch (final NoSuchFieldException | IllegalAccessException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private static void setEnvironmentClassField(String fieldName, Object value) {
+ final Field field;
+ try {
+ field = Environment.class.getDeclaredField(fieldName);
+ field.setAccessible(true);
+ /* Fails in JDK 13; and not needed unless fields are final
+ final Field modifiersField = Field.class.getDeclaredField("modifiers");
+ modifiersField.setAccessible(true);
+ modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
+ */
+ field.set(null, value);
+ } catch (final NoSuchFieldException | IllegalAccessException e) {
+ throw new RuntimeException(e);
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/IntComparatorTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/IntComparatorTest.java
new file mode 100644
index 000000000..dd3288513
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/IntComparatorTest.java
@@ -0,0 +1,266 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb.util;
+
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Parameterized.Parameters;
+import org.rocksdb.*;
+
+import java.nio.ByteBuffer;
+import java.nio.file.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.assertj.core.api.Assertions.assertThat;
+
+/**
+ * Tests for IntComparator, but more generally
+ * also for rocksdb::ComparatorJniCallback implementation.
+ */
+@RunWith(Parameterized.class)
+public class IntComparatorTest {
+
+ // test with 500 random integer keys
+ private static final int TOTAL_KEYS = 500;
+ private static final byte[][] keys = new byte[TOTAL_KEYS][4];
+
+ @BeforeClass
+ public static void prepareKeys() {
+ final ByteBuffer buf = ByteBuffer.allocate(4);
+ final Random random = new Random();
+ for (int i = 0; i < TOTAL_KEYS; i++) {
+ final int ri = random.nextInt();
+ buf.putInt(ri);
+ buf.flip();
+ final byte[] key = buf.array();
+
+ // does key already exist (avoid duplicates)
+ if (keyExists(key, i)) {
+ i--; // loop round and generate a different key
+ } else {
+ System.arraycopy(key, 0, keys[i], 0, 4);
+ }
+ }
+ }
+
+ private static boolean keyExists(final byte[] key, final int limit) {
+ for (int j = 0; j < limit; j++) {
+ if (Arrays.equals(key, keys[j])) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Parameters(name = "{0}")
+ public static Iterable<Object[]> parameters() {
+ return Arrays.asList(new Object[][] {
+ { "non-direct_reused64_mutex", false, 64, ReusedSynchronisationType.MUTEX },
+ { "direct_reused64_mutex", true, 64, ReusedSynchronisationType.MUTEX },
+ { "non-direct_reused64_adaptive-mutex", false, 64, ReusedSynchronisationType.ADAPTIVE_MUTEX },
+ { "direct_reused64_adaptive-mutex", true, 64, ReusedSynchronisationType.ADAPTIVE_MUTEX },
+ { "non-direct_reused64_thread-local", false, 64, ReusedSynchronisationType.THREAD_LOCAL },
+ { "direct_reused64_thread-local", true, 64, ReusedSynchronisationType.THREAD_LOCAL },
+ { "non-direct_noreuse", false, -1, null },
+ { "direct_noreuse", true, -1, null }
+ });
+ }
+
+ @Parameter(0)
+ public String name;
+
+ @Parameter(1)
+ public boolean useDirectBuffer;
+
+ @Parameter(2)
+ public int maxReusedBufferSize;
+
+ @Parameter(3)
+ public ReusedSynchronisationType reusedSynchronisationType;
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+
+ @Test
+ public void javaComparatorDefaultCf() throws RocksDBException {
+ try (final ComparatorOptions options = new ComparatorOptions()
+ .setUseDirectBuffer(useDirectBuffer)
+ .setMaxReusedBufferSize(maxReusedBufferSize)
+ // if reusedSynchronisationType == null we assume that maxReusedBufferSize <= 0 and so we just set ADAPTIVE_MUTEX, even though it won't be used
+ .setReusedSynchronisationType(reusedSynchronisationType == null ? ReusedSynchronisationType.ADAPTIVE_MUTEX : reusedSynchronisationType);
+ final IntComparator comparator = new IntComparator(options)) {
+
+ // test the round-tripability of keys written and read with the Comparator
+ testRoundtrip(FileSystems.getDefault().getPath(
+ dbFolder.getRoot().getAbsolutePath()), comparator);
+ }
+ }
+
+ @Test
+ public void javaComparatorNamedCf() throws RocksDBException {
+ try (final ComparatorOptions options = new ComparatorOptions()
+ .setUseDirectBuffer(useDirectBuffer)
+ .setMaxReusedBufferSize(maxReusedBufferSize)
+ // if reusedSynchronisationType == null we assume that maxReusedBufferSize <= 0 and so we just set ADAPTIVE_MUTEX, even though it won't be used
+ .setReusedSynchronisationType(reusedSynchronisationType == null ? ReusedSynchronisationType.ADAPTIVE_MUTEX : reusedSynchronisationType);
+ final IntComparator comparator = new IntComparator(options)) {
+
+ // test the round-tripability of keys written and read with the Comparator
+ testRoundtripCf(FileSystems.getDefault().getPath(
+ dbFolder.getRoot().getAbsolutePath()), comparator);
+ }
+ }
+
+ /**
+ * Test which stores random keys into the database
+ * using an {@link IntComparator}
+ * it then checks that these keys are read back in
+ * ascending order
+ *
+ * @param db_path A path where we can store database
+ * files temporarily
+ *
+ * @param comparator the comparator
+ *
+ * @throws RocksDBException if a database error happens.
+ */
+ private void testRoundtrip(final Path db_path,
+ final AbstractComparator comparator) throws RocksDBException {
+ try (final Options opt = new Options()
+ .setCreateIfMissing(true)
+ .setComparator(comparator)) {
+
+ // store TOTAL_KEYS into the db
+ try (final RocksDB db = RocksDB.open(opt, db_path.toString())) {
+ for (int i = 0; i < TOTAL_KEYS; i++) {
+ db.put(keys[i], "value".getBytes(UTF_8));
+ }
+ }
+
+ // re-open db and read from start to end
+ // integer keys should be in ascending
+ // order as defined by IntComparator
+ final ByteBuffer key = ByteBuffer.allocate(4);
+ try (final RocksDB db = RocksDB.open(opt, db_path.toString());
+ final RocksIterator it = db.newIterator()) {
+ it.seekToFirst();
+ int lastKey = Integer.MIN_VALUE;
+ int count = 0;
+ for (it.seekToFirst(); it.isValid(); it.next()) {
+ key.put(it.key());
+ key.flip();
+ final int thisKey = key.getInt();
+ key.clear();
+ assertThat(thisKey).isGreaterThan(lastKey);
+ lastKey = thisKey;
+ count++;
+ }
+ assertThat(count).isEqualTo(TOTAL_KEYS);
+ }
+ }
+ }
+
+ /**
+ * Test which stores random keys into a column family
+ * in the database
+ * using an {@link IntComparator}
+ * it then checks that these keys are read back in
+ * ascending order
+ *
+ * @param db_path A path where we can store database
+ * files temporarily
+ *
+ * @param comparator the comparator
+ *
+ * @throws RocksDBException if a database error happens.
+ */
+ private void testRoundtripCf(final Path db_path,
+ final AbstractComparator comparator) throws RocksDBException {
+
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes(),
+ new ColumnFamilyOptions()
+ .setComparator(comparator))
+ );
+
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+
+ try (final DBOptions opt = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true)) {
+
+ try (final RocksDB db = RocksDB.open(opt, db_path.toString(),
+ cfDescriptors, cfHandles)) {
+ try {
+ assertThat(cfDescriptors.size()).isEqualTo(2);
+ assertThat(cfHandles.size()).isEqualTo(2);
+
+ for (int i = 0; i < TOTAL_KEYS; i++) {
+ db.put(cfHandles.get(1), keys[i], "value".getBytes(UTF_8));
+ }
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ cfHandles.clear();
+ }
+ }
+
+ // re-open db and read from start to end
+ // integer keys should be in ascending
+ // order as defined by SimpleIntComparator
+ final ByteBuffer key = ByteBuffer.allocate(4);
+ try (final RocksDB db = RocksDB.open(opt, db_path.toString(),
+ cfDescriptors, cfHandles);
+ final RocksIterator it = db.newIterator(cfHandles.get(1))) {
+ try {
+ assertThat(cfDescriptors.size()).isEqualTo(2);
+ assertThat(cfHandles.size()).isEqualTo(2);
+
+ it.seekToFirst();
+ int lastKey = Integer.MIN_VALUE;
+ int count = 0;
+ for (it.seekToFirst(); it.isValid(); it.next()) {
+ key.put(it.key());
+ key.flip();
+ final int thisKey = key.getInt();
+ key.clear();
+ assertThat(thisKey).isGreaterThan(lastKey);
+ lastKey = thisKey;
+ count++;
+ }
+
+ assertThat(count).isEqualTo(TOTAL_KEYS);
+
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ cfHandles.clear();
+ for (final ColumnFamilyDescriptor cfDescriptor : cfDescriptors) {
+ cfDescriptor.getOptions().close();
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/JNIComparatorTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/JNIComparatorTest.java
new file mode 100644
index 000000000..6ffa2785f
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/JNIComparatorTest.java
@@ -0,0 +1,174 @@
+package org.rocksdb.util;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Parameterized.Parameters;
+import org.rocksdb.*;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.*;
+import java.util.Arrays;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+@RunWith(Parameterized.class)
+public class JNIComparatorTest {
+
+ @Parameters(name = "{0}")
+ public static Iterable<Object[]> parameters() {
+ return Arrays.asList(new Object[][] {
+ { "bytewise_non-direct", BuiltinComparator.BYTEWISE_COMPARATOR, false },
+ { "bytewise_direct", BuiltinComparator.BYTEWISE_COMPARATOR, true },
+ { "reverse-bytewise_non-direct", BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR, false },
+ { "reverse-bytewise_direct", BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR, true },
+ });
+ }
+
+ @Parameter(0)
+ public String name;
+
+ @Parameter(1)
+ public BuiltinComparator builtinComparator;
+
+ @Parameter(2)
+ public boolean useDirectBuffer;
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ private static final int MIN = Short.MIN_VALUE - 1;
+ private static final int MAX = Short.MAX_VALUE + 1;
+
+ @Test
+ public void java_comparator_equals_cpp_comparator() throws RocksDBException, IOException {
+ final int[] javaKeys;
+ try (final ComparatorOptions comparatorOptions = new ComparatorOptions();
+ final AbstractComparator comparator = builtinComparator == BuiltinComparator.BYTEWISE_COMPARATOR
+ ? new BytewiseComparator(comparatorOptions)
+ : new ReverseBytewiseComparator(comparatorOptions)) {
+ final Path javaDbDir =
+ FileSystems.getDefault().getPath(dbFolder.newFolder().getAbsolutePath());
+ storeWithJavaComparator(javaDbDir, comparator);
+ javaKeys = readAllWithJavaComparator(javaDbDir, comparator);
+ }
+
+ final Path cppDbDir =
+ FileSystems.getDefault().getPath(dbFolder.newFolder().getAbsolutePath());
+ storeWithCppComparator(cppDbDir, builtinComparator);
+ final int[] cppKeys =
+ readAllWithCppComparator(cppDbDir, builtinComparator);
+
+ assertThat(javaKeys).isEqualTo(cppKeys);
+ }
+
+ private void storeWithJavaComparator(final Path dir,
+ final AbstractComparator comparator) throws RocksDBException {
+ final ByteBuffer buf = ByteBuffer.allocate(4);
+ try (final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setComparator(comparator);
+ final RocksDB db =
+ RocksDB.open(options, dir.toAbsolutePath().toString())) {
+ for (int i = MIN; i < MAX; i++) {
+ buf.putInt(i);
+ buf.flip();
+
+ db.put(buf.array(), buf.array());
+
+ buf.clear();
+ }
+ }
+ }
+
+ private void storeWithCppComparator(final Path dir,
+ final BuiltinComparator builtinComparator) throws RocksDBException {
+ try (final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setComparator(builtinComparator);
+ final RocksDB db =
+ RocksDB.open(options, dir.toAbsolutePath().toString())) {
+
+ final ByteBuffer buf = ByteBuffer.allocate(4);
+ for (int i = MIN; i < MAX; i++) {
+ buf.putInt(i);
+ buf.flip();
+
+ db.put(buf.array(), buf.array());
+
+ buf.clear();
+ }
+ }
+ }
+
+ private int[] readAllWithJavaComparator(final Path dir,
+ final AbstractComparator comparator) throws RocksDBException {
+ try (final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setComparator(comparator);
+ final RocksDB db =
+ RocksDB.open(options, dir.toAbsolutePath().toString())) {
+
+ try (final RocksIterator it = db.newIterator()) {
+ it.seekToFirst();
+
+ final ByteBuffer buf = ByteBuffer.allocate(4);
+ final int[] keys = new int[MAX - MIN];
+ int idx = 0;
+ while (it.isValid()) {
+ buf.put(it.key());
+ buf.flip();
+
+ final int thisKey = buf.getInt();
+ keys[idx++] = thisKey;
+
+ buf.clear();
+
+ it.next();
+ }
+
+ return keys;
+ }
+ }
+ }
+
+ private int[] readAllWithCppComparator(final Path dir,
+ final BuiltinComparator comparator) throws RocksDBException {
+ try (final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setComparator(comparator);
+ final RocksDB db =
+ RocksDB.open(options, dir.toAbsolutePath().toString())) {
+
+ try (final RocksIterator it = db.newIterator()) {
+ it.seekToFirst();
+
+ final ByteBuffer buf = ByteBuffer.allocate(4);
+ final int[] keys = new int[MAX - MIN];
+ int idx = 0;
+ while (it.isValid()) {
+ buf.put(it.key());
+ buf.flip();
+
+ final int thisKey = buf.getInt();
+ keys[idx++] = thisKey;
+
+ buf.clear();
+
+ it.next();
+ }
+
+ return keys;
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/ReverseBytewiseComparatorIntTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/ReverseBytewiseComparatorIntTest.java
new file mode 100644
index 000000000..ca08d9de1
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/ReverseBytewiseComparatorIntTest.java
@@ -0,0 +1,270 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb.util;
+
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Parameterized.Parameters;
+import org.rocksdb.*;
+
+import java.nio.ByteBuffer;
+import java.nio.file.FileSystems;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.assertj.core.api.Assertions.assertThat;
+
+/**
+ * Similar to {@link IntComparatorTest}, but uses
+ * {@link ReverseBytewiseComparator} which ensures the correct reverse
+ * ordering of positive integers.
+ */
+@RunWith(Parameterized.class)
+public class ReverseBytewiseComparatorIntTest {
+
+ // test with 500 random positive integer keys
+ private static final int TOTAL_KEYS = 500;
+ private static final byte[][] keys = new byte[TOTAL_KEYS][4];
+
+ @BeforeClass
+ public static void prepareKeys() {
+ final ByteBuffer buf = ByteBuffer.allocate(4);
+ final Random random = new Random();
+ for (int i = 0; i < TOTAL_KEYS; i++) {
+ final int ri = random.nextInt() & Integer.MAX_VALUE; // the & ensures positive integer
+ buf.putInt(ri);
+ buf.flip();
+ final byte[] key = buf.array();
+
+ // does key already exist (avoid duplicates)
+ if (keyExists(key, i)) {
+ i--; // loop round and generate a different key
+ } else {
+ System.arraycopy(key, 0, keys[i], 0, 4);
+ }
+ }
+ }
+
+ private static boolean keyExists(final byte[] key, final int limit) {
+ for (int j = 0; j < limit; j++) {
+ if (Arrays.equals(key, keys[j])) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Parameters(name = "{0}")
+ public static Iterable<Object[]> parameters() {
+ return Arrays.asList(new Object[][] {
+ { "non-direct_reused64_mutex", false, 64, ReusedSynchronisationType.MUTEX },
+ { "direct_reused64_adaptive-mutex", true, 64, ReusedSynchronisationType.MUTEX },
+ { "non-direct_reused64_adaptive-mutex", false, 64, ReusedSynchronisationType.ADAPTIVE_MUTEX },
+ { "direct_reused64_adaptive-mutex", true, 64, ReusedSynchronisationType.ADAPTIVE_MUTEX },
+ { "non-direct_reused64_adaptive-mutex", false, 64, ReusedSynchronisationType.THREAD_LOCAL },
+ { "direct_reused64_adaptive-mutex", true, 64, ReusedSynchronisationType.THREAD_LOCAL },
+ { "non-direct_noreuse", false, -1, null },
+ { "direct_noreuse", true, -1, null }
+ });
+ }
+
+ @Parameter(0)
+ public String name;
+
+ @Parameter(1)
+ public boolean useDirectBuffer;
+
+ @Parameter(2)
+ public int maxReusedBufferSize;
+
+ @Parameter(3)
+ public ReusedSynchronisationType reusedSynchronisationType;
+
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+
+ @Test
+ public void javaComparatorDefaultCf() throws RocksDBException {
+ try (final ComparatorOptions options = new ComparatorOptions()
+ .setUseDirectBuffer(useDirectBuffer)
+ .setMaxReusedBufferSize(maxReusedBufferSize)
+ // if reusedSynchronisationType == null we assume that maxReusedBufferSize <= 0 and so we just set ADAPTIVE_MUTEX, even though it won't be used
+ .setReusedSynchronisationType(reusedSynchronisationType == null ? ReusedSynchronisationType.ADAPTIVE_MUTEX : reusedSynchronisationType);
+ final ReverseBytewiseComparator comparator =
+ new ReverseBytewiseComparator(options)) {
+
+ // test the round-tripability of keys written and read with the Comparator
+ testRoundtrip(FileSystems.getDefault().getPath(
+ dbFolder.getRoot().getAbsolutePath()), comparator);
+ }
+ }
+
+ @Test
+ public void javaComparatorNamedCf() throws RocksDBException {
+ try (final ComparatorOptions options = new ComparatorOptions()
+ .setUseDirectBuffer(useDirectBuffer)
+ .setMaxReusedBufferSize(maxReusedBufferSize)
+ // if reusedSynchronisationType == null we assume that maxReusedBufferSize <= 0 and so we just set ADAPTIVE_MUTEX, even though it won't be used
+ .setReusedSynchronisationType(reusedSynchronisationType == null ? ReusedSynchronisationType.ADAPTIVE_MUTEX : reusedSynchronisationType);
+ final ReverseBytewiseComparator comparator
+ = new ReverseBytewiseComparator(options)) {
+
+ // test the round-tripability of keys written and read with the Comparator
+ testRoundtripCf(FileSystems.getDefault().getPath(
+ dbFolder.getRoot().getAbsolutePath()), comparator);
+ }
+ }
+
+ /**
+ * Test which stores random keys into the database
+ * using an {@link IntComparator}
+ * it then checks that these keys are read back in
+ * ascending order
+ *
+ * @param db_path A path where we can store database
+ * files temporarily
+ *
+ * @param comparator the comparator
+ *
+ * @throws RocksDBException if a database error happens.
+ */
+ private void testRoundtrip(final Path db_path,
+ final AbstractComparator comparator) throws RocksDBException {
+ try (final Options opt = new Options()
+ .setCreateIfMissing(true)
+ .setComparator(comparator)) {
+
+ // store TOTAL_KEYS into the db
+ try (final RocksDB db = RocksDB.open(opt, db_path.toString())) {
+ for (int i = 0; i < TOTAL_KEYS; i++) {
+ db.put(keys[i], "value".getBytes(UTF_8));
+ }
+ }
+
+ // re-open db and read from start to end
+ // integer keys should be in descending
+ // order
+ final ByteBuffer key = ByteBuffer.allocate(4);
+ try (final RocksDB db = RocksDB.open(opt, db_path.toString());
+ final RocksIterator it = db.newIterator()) {
+ it.seekToFirst();
+ int lastKey = Integer.MAX_VALUE;
+ int count = 0;
+ for (it.seekToFirst(); it.isValid(); it.next()) {
+ key.put(it.key());
+ key.flip();
+ final int thisKey = key.getInt();
+ key.clear();
+ assertThat(thisKey).isLessThan(lastKey);
+ lastKey = thisKey;
+ count++;
+ }
+ assertThat(count).isEqualTo(TOTAL_KEYS);
+ }
+ }
+ }
+
+ /**
+ * Test which stores random keys into a column family
+ * in the database
+ * using an {@link IntComparator}
+ * it then checks that these keys are read back in
+ * ascending order
+ *
+ * @param db_path A path where we can store database
+ * files temporarily
+ *
+ * @param comparator the comparator
+ *
+ * @throws RocksDBException if a database error happens.
+ */
+ private void testRoundtripCf(final Path db_path,
+ final AbstractComparator comparator) throws RocksDBException {
+
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes(),
+ new ColumnFamilyOptions()
+ .setComparator(comparator))
+ );
+
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+
+ try (final DBOptions opt = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true)) {
+
+ try (final RocksDB db = RocksDB.open(opt, db_path.toString(),
+ cfDescriptors, cfHandles)) {
+ try {
+ assertThat(cfDescriptors.size()).isEqualTo(2);
+ assertThat(cfHandles.size()).isEqualTo(2);
+
+ for (int i = 0; i < TOTAL_KEYS; i++) {
+ db.put(cfHandles.get(1), keys[i], "value".getBytes(UTF_8));
+ }
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ cfHandles.clear();
+ }
+ }
+
+ // re-open db and read from start to end
+ // integer keys should be in descending
+ // order
+ final ByteBuffer key = ByteBuffer.allocate(4);
+ try (final RocksDB db = RocksDB.open(opt, db_path.toString(),
+ cfDescriptors, cfHandles);
+ final RocksIterator it = db.newIterator(cfHandles.get(1))) {
+ try {
+ assertThat(cfDescriptors.size()).isEqualTo(2);
+ assertThat(cfHandles.size()).isEqualTo(2);
+
+ it.seekToFirst();
+ int lastKey = Integer.MAX_VALUE;
+ int count = 0;
+ for (it.seekToFirst(); it.isValid(); it.next()) {
+ key.put(it.key());
+ key.flip();
+ final int thisKey = key.getInt();
+ key.clear();
+ assertThat(thisKey).isLessThan(lastKey);
+ lastKey = thisKey;
+ count++;
+ }
+
+ assertThat(count).isEqualTo(TOTAL_KEYS);
+
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ cfHandles.clear();
+ for (final ColumnFamilyDescriptor cfDescriptor : cfDescriptors) {
+ cfDescriptor.getOptions().close();
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/SizeUnitTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/SizeUnitTest.java
new file mode 100644
index 000000000..990aa5f47
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/SizeUnitTest.java
@@ -0,0 +1,27 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb.util;
+
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class SizeUnitTest {
+
+ public static final long COMPUTATION_UNIT = 1024L;
+
+ @Test
+ public void sizeUnit() {
+ assertThat(SizeUnit.KB).isEqualTo(COMPUTATION_UNIT);
+ assertThat(SizeUnit.MB).isEqualTo(
+ SizeUnit.KB * COMPUTATION_UNIT);
+ assertThat(SizeUnit.GB).isEqualTo(
+ SizeUnit.MB * COMPUTATION_UNIT);
+ assertThat(SizeUnit.TB).isEqualTo(
+ SizeUnit.GB * COMPUTATION_UNIT);
+ assertThat(SizeUnit.PB).isEqualTo(
+ SizeUnit.TB * COMPUTATION_UNIT);
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/TestUtil.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/TestUtil.java
new file mode 100644
index 000000000..57347b084
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/TestUtil.java
@@ -0,0 +1,61 @@
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb.util;
+
+import org.rocksdb.CompactionPriority;
+import org.rocksdb.Options;
+import org.rocksdb.WALRecoveryMode;
+
+import java.util.Random;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+/**
+ * General test utilities.
+ */
+public class TestUtil {
+
+ /**
+ * Get the options for log iteration tests.
+ *
+ * @return the options
+ */
+ public static Options optionsForLogIterTest() {
+ return defaultOptions()
+ .setCreateIfMissing(true)
+ .setWalTtlSeconds(1000);
+ }
+
+ /**
+ * Get the default options.
+ *
+ * @return the options
+ */
+ public static Options defaultOptions() {
+ return new Options()
+ .setWriteBufferSize(4090 * 4096)
+ .setTargetFileSizeBase(2 * 1024 * 1024)
+ .setMaxBytesForLevelBase(10 * 1024 * 1024)
+ .setMaxOpenFiles(5000)
+ .setWalRecoveryMode(WALRecoveryMode.TolerateCorruptedTailRecords)
+ .setCompactionPriority(CompactionPriority.ByCompensatedSize);
+ }
+
+ private static final Random random = new Random();
+
+ /**
+ * Generate a random string of bytes.
+ *
+ * @param len the length of the string to generate.
+ *
+ * @return the random string of bytes
+ */
+ public static byte[] dummyString(final int len) {
+ final byte[] str = new byte[len];
+ random.nextBytes(str);
+ return str;
+ }
+}
diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/WriteBatchGetter.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/WriteBatchGetter.java
new file mode 100644
index 000000000..646e8b8f8
--- /dev/null
+++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/WriteBatchGetter.java
@@ -0,0 +1,134 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+package org.rocksdb.util;
+
+import org.rocksdb.RocksDBException;
+import org.rocksdb.WriteBatch;
+
+import java.util.Arrays;
+
+public class WriteBatchGetter extends WriteBatch.Handler {
+
+ private int columnFamilyId = -1;
+ private final byte[] key;
+ private byte[] value;
+
+ public WriteBatchGetter(final byte[] key) {
+ this.key = key;
+ }
+
+ public byte[] getValue() {
+ return value;
+ }
+
+ @Override
+ public void put(final int columnFamilyId, final byte[] key,
+ final byte[] value) {
+ if(Arrays.equals(this.key, key)) {
+ this.columnFamilyId = columnFamilyId;
+ this.value = value;
+ }
+ }
+
+ @Override
+ public void put(final byte[] key, final byte[] value) {
+ if(Arrays.equals(this.key, key)) {
+ this.value = value;
+ }
+ }
+
+ @Override
+ public void merge(final int columnFamilyId, final byte[] key,
+ final byte[] value) {
+ if(Arrays.equals(this.key, key)) {
+ this.columnFamilyId = columnFamilyId;
+ this.value = value;
+ }
+ }
+
+ @Override
+ public void merge(final byte[] key, final byte[] value) {
+ if(Arrays.equals(this.key, key)) {
+ this.value = value;
+ }
+ }
+
+ @Override
+ public void delete(final int columnFamilyId, final byte[] key) {
+ if(Arrays.equals(this.key, key)) {
+ this.columnFamilyId = columnFamilyId;
+ this.value = null;
+ }
+ }
+
+ @Override
+ public void delete(final byte[] key) {
+ if(Arrays.equals(this.key, key)) {
+ this.value = null;
+ }
+ }
+
+ @Override
+ public void singleDelete(final int columnFamilyId, final byte[] key) {
+ if(Arrays.equals(this.key, key)) {
+ this.columnFamilyId = columnFamilyId;
+ this.value = null;
+ }
+ }
+
+ @Override
+ public void singleDelete(final byte[] key) {
+ if(Arrays.equals(this.key, key)) {
+ this.value = null;
+ }
+ }
+
+ @Override
+ public void deleteRange(final int columnFamilyId, final byte[] beginKey,
+ final byte[] endKey) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void deleteRange(final byte[] beginKey, final byte[] endKey) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void logData(final byte[] blob) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void putBlobIndex(final int columnFamilyId, final byte[] key,
+ final byte[] value) {
+ if(Arrays.equals(this.key, key)) {
+ this.columnFamilyId = columnFamilyId;
+ this.value = value;
+ }
+ }
+
+ @Override
+ public void markBeginPrepare() throws RocksDBException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void markEndPrepare(final byte[] xid) throws RocksDBException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void markNoop(final boolean emptyBatch) throws RocksDBException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void markRollback(final byte[] xid) throws RocksDBException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void markCommit(final byte[] xid) throws RocksDBException {
+ throw new UnsupportedOperationException();
+ }
+}